blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ee59ea8489cc423e5053c5e1f04f3b92b54998cf | 15e4e57f1e49ec38fe6c6e86f8309b0105093960 | /python/runFakeRateExtrapolation.py | 6220e321b8681c80102aeda3773d4360f19c1fe6 | [] | no_license | govoni/FlatNtStudy | eb621eb12d3a35bd58cd7f2de2a1c371d81b9eb3 | e58cbfed0c9a5e11822a254a554ecb62a7457ebc | refs/heads/master | 2020-12-24T16:59:17.649955 | 2015-05-13T10:31:05 | 2015-05-13T10:31:05 | 27,048,572 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 18,639 | py | #! /usr/bin/env pythony
## example python python/runLumiExtrapolation.py --datacardDIR output/DataCards_WW_SS_Inclusive/Card1D/lumiExtrapolation/ --inputVariable ptjj --outputDIR computeAsymptotic --makeAsymptotic --injectSignal 1 --nToys 100 --rMin 0.85 --rMax 1.15
import os
import glob
import math
from array import array
import sys
import time
import subprocess
import ROOT
from optparse import OptionParser
from subprocess import Popen
from collections import defaultdict
############################################
# Job steering #
############################################
parser = OptionParser()
parser.add_option('-b', action='store_true', dest='noX', default=False, help='no X11 windows')
##### other basci options for all the methods
parser.add_option('--datacardDIR', action="store", type="string", dest="datacardDIR", default="", help="direcotry where to find datacard")
parser.add_option('--inputVariable', action="store", type="string", dest="inputVariable", default="", help="name of the input variable to be used")
parser.add_option('--fakeType', action="store", type="string", dest="fakeType", default="", help="name of fake process to be rescaled in the card")
parser.add_option('--outputDIR', action="store", type="string", dest="outputDIR", default="", help="output directory")
parser.add_option('--batchMode', action='store_true', dest='batchMode', default=False, help='to run jobs on condor fnal')
parser.add_option('--queque', action="store", type="string", dest="queque", default="")
parser.add_option('--noGenerateCards', action='store_true', dest='noGenerateCards', default=False, help='not generate again the cards')
parser.add_option('--runCombinationOnly', action='store_true', dest='runCombinationOnly', default=False, help='just run comb cards')
parser.add_option('--runWWWZCombination', action='store_true', dest='runWWWZCombination', default=False, help='WW/WZ cards combination')
parser.add_option('--makeAsymptotic', action="store_true", dest="makeAsymptotic", default=0)
parser.add_option('--makeProfileLikelihood', action="store_true", dest="makeProfileLikelihood", default=0)
parser.add_option('--makeMaxLikelihoodFit', action="store_true", dest="makeMaxLikelihoodFit", default=0)
parser.add_option('--injectSignal', action="store", type=float, dest="injectSignal", default=0., help='inject a singal in the toy generation')
parser.add_option('--nToys', action="store", type="int", dest="nToys", default=0, help="number of toys to generate")
parser.add_option('--bruteForce', action="store", type="int", dest="bruteForce", default=0, help="use brute force for profile likelihood")
parser.add_option('--rMin', action="store", type=float, dest="rMin", default=0)
parser.add_option('--rMax', action="store", type=float, dest="rMax", default=10)
fakeRateScaleFactor = [0.1,0.2,0.5,0.7,0.8,0.9,1.1,1.2,1.5,2,2.5,3.0,4.0,5.0,6.5,8.0,10.0];
(options, args) = parser.parse_args()
##########################################
###### Submit batch job for combine ######
##########################################
def submitBatchJobCombine(command, fn, fileNames):
currentDir = os.getcwd();
# create a dummy bash/csh
outScript = open(fn+".sh","w");
outScript.write('#!/bin/bash \n');
outScript.write('cd '+currentDir+'\n');
outScript.write('eval `scram runtime -sh`'+'\n');
outScript.write('cd - \n');
if fileNames.find("COMB") == -1 :
outScript.write('cp '+currentDir+"/"+fileNames+'* ./ \n');
else :
outScript.write('cp '+currentDir+"/"+fileNames+'* ./ \n');
nametemp = fileNames.replace("COMB","UUpp");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","UUmm");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","EEpp");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","EEmm");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","EUpp");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","EUmm");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","UEpp");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","UEmm");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","UUU");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","EEE");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","UUE");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
nametemp = fileNames.replace("COMB","EEU");
outScript.write('cp '+currentDir+"/"+nametemp+'* ./ \n');
outScript.write(command+'\n');
outScript.write("cp higgsCombine*"+fileNames+"* "+currentDir+"/"+options.outputDIR+'\n');
outScript.write("cp mlfit*"+fileNames+"* "+currentDir+"/"+options.outputDIR+'\n');
outScript.write("rm rootstats* "+'\n');
outScript.close();
os.system("chmod 777 "+currentDir+"/"+fn+".sh");
if options.queque!="" :
os.system("bsub -q "+options.queque+" -o "+currentDir+"/subJob"+fileNames+".log -e "+currentDir+"/subJob"+fileNames+".err "+fn+".sh");
else:
os.system("bsub -q 1nh -o "+currentDir+"/subJob"+fileNames+".log -e "+currentDir+"/subJob"+fileNames+".err "+fn+".sh");
##################################
########### Main Code ############
##################################
if __name__ == '__main__':
print "###### start extrapolate analysis ########";
if options.datacardDIR == "":
sys.exit("provide a datacard directory");
os.chdir(options.datacardDIR);
if not options.noGenerateCards :
for scalefactor in fakeRateScaleFactor :
os.system("rm *_%d.txt"%(scalefactor*10))
os.system("rm *_%d.root"%(scalefactor*10))
## make the card list
for var in options.inputVariable.split(",") :
os.system("ls | grep txt | grep -v COMB | grep _UUpp.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _UUmm.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _EEpp.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _EEmm.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _EUpp.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _EUmm.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _UEpp.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _UEmm.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _UUU.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _EEE.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _EEU.txt | grep "+options.inputVariable+" >> list.txt");
os.system("ls | grep txt | grep -v COMB | grep _UUE.txt | grep "+options.inputVariable+" >> list.txt");
datacardFile = open("list.txt","r");
datacardList = [];
for iline in datacardFile :
if iline.split(" ")[0]!="" and iline.split(" ")[0]!="#" and iline.split(" ")[0]!=" " and iline.split(" ")[0]!="\n" and iline.split(" ")[0]!="\t":
datacardList.append(iline.split(" ")[0].replace("\n",""));
## total number of datacards
os.system("rm list.txt");
createdCards = [];
fakeColumn = 0 ;
## fake rate on datacard list
if not options.noGenerateCards :
for datacard in datacardList :
observed = 0;
## loop on lumi values
for scalefactor in fakeRateScaleFactor :
inputfile = open('%s'%(datacard),'r');
## create a new root file
inputrootfile = ROOT.TFile(datacard.replace(".txt",".root"),"READ");
outname = datacard ;
outname = datacard.replace(".txt","_%d.txt"%(scalefactor*10));
print "create the new datacard ",outname;
fileNew = open('%s'%(outname), 'w');
createdCards.append(outname);
for ifile in inputfile :
if ifile.find(datacard.replace(".txt",".root"))!=-1 :
line = ifile.replace(datacard.replace(".txt",".root"),datacard.replace(".txt","_%d.root"%(scalefactor*10)));
fileNew.write(line);
continue ;
if ifile.split(" ")[0] != "rate" and ifile.split(" ")[0] != "process" :
fileNew.write(ifile);
continue;
if ifile.split(" ")[0] == "process":
fileNew.write(ifile);
icol = 0;
for columns in ifile.split() :
if columns != options.fakeType :
icol = icol+1;
continue;
else :
fakeColumn = icol;
if ifile.split(" ")[0] == "rate" :
lineToWrite = "rate ";
icol = 0;
for columns in ifile.split() :
if columns == "rate" :
icol = icol+1;
continue ;
elif icol != fakeColumn :
lineToWrite += " %f "%(float(columns));
icol = icol+1;
else :
lineToWrite += " %f "%(float(columns)*scalefactor);
fileNew.write(lineToWrite+"\n");
continue ;
fileNew.close();
## copy root file
outrootname = outname.replace(".txt",".root");
outrootfile = ROOT.TFile("%s"%(outrootname),"RECREATE");
for key in inputrootfile.GetListOfKeys() :
if key.GetClassName().find("TH1") == -1 and key.GetClassName().find("TH2") == -1 :
continue ;
outrootfile.cd();
histo = inputrootfile.Get(key.GetName()).Clone("temp");
if ROOT.TString(key.GetName()).Contains(options.fakeType):
histo.Scale(scalefactor);
histo.Write(key.GetName());
outrootfile.Write();
outrootfile.Close();
else:
for datacard in datacardList :
for scalefactor in fakeRateScaleFactor :
outname = datacard ;
outname = datacard.replace(".txt","_%d.txt"%(scalefactor*10));
createdCards.append(outname);
## merge the two datacard set
if options.outputDIR == "" :
sys.exit("cannot run combine --> outDir to be provided");
else :
os.system("mkdir -p "+options.outputDIR);
## combine the cards
combinedCards = [];
for scalefactor in fakeRateScaleFactor :
for datacard in createdCards :
if datacard.find("_%d"%(scalefactor*10)) != -1 :
if datacard.find("_UEpp") != -1 :
combinedCards.append(datacard.replace("_UEpp","_COMB"));
if datacard.find("_UEmm") != -1 :
combinedCards.append(datacard.replace("_UEmm","_COMB"));
if datacard.find("_EUpp") != -1 :
combinedCards.append(datacard.replace("_EUpp","_COMB"));
if datacard.find("_EUmm") != -1 :
combinedCards.append(datacard.replace("_EUmm","_COMB"));
if datacard.find("_EEpp") != -1 :
combinedCards.append(datacard.replace("_EEpp","_COMB"));
if datacard.find("_EEmm") != -1 :
combinedCards.append(datacard.replace("_EEmm","_COMB"));
if datacard.find("_UUpp") != -1 :
combinedCards.append(datacard.replace("_UUpp","_COMB"));
if datacard.find("_UUmm") != -1 :
combinedCards.append(datacard.replace("_UUmm","_COMB"));
if datacard.find("_UUU") != -1 :
combinedCards.append(datacard.replace("_UUU","_COMB"));
if datacard.find("_EEE") != -1 :
combinedCards.append(datacard.replace("_EEE","_COMB"));
if datacard.find("_UUE") != -1 :
combinedCards.append(datacard.replace("_UUE","_COMB"));
if datacard.find("_EEU") != -1 :
combinedCards.append(datacard.replace("_EEU","_COMB"));
break ;
if not options.noGenerateCards :
for card in combinedCards :
if options.runWWWZCombination :
print "combineCards.py "+card.replace("_COMB","_EEpp")+" "+card.replace("_COMB","_UUpp")+" "+card.replace("_COMB","_EUpp")+" "+card.replace("_COMB","_UEpp")+" "+card.replace("_COMB","_EEmm")+" "+card.replace("_COMB","_UUmm")+" "+card.replace("_COMB","_EUmm")+" "+card.replace("_COMB","_UEmm")+" "+card.replace("_COMB","_UUU")+" "+card.replace("_COMB","_EEE")+" "+card.replace("_COMB","_UUE")+" "+card.replace("_COMB","_EEU")+" > "+card;
os.system("combineCards.py "+card.replace("_COMB","_EEpp")+" "+card.replace("_COMB","_UUpp")+" "+card.replace("_COMB","_EUpp")+" "+card.replace("_COMB","_UEpp")+" "+card.replace("_COMB","_EEmm")+" "+card.replace("_COMB","_UUmm")+" "+card.replace("_COMB","_EUmm")+" "+card.replace("_COMB","_UEmm")+" "+card.replace("_COMB","_UUU")+" "+card.replace("_COMB","_EEE")+" "+card.replace("_COMB","_UUE")+" "+card.replace("_COMB","_EEU")+" > "+card);
else :
print "combineCards.py "+card.replace("_COMB","_EEpp")+" "+card.replace("_COMB","_UUpp")+" "+card.replace("_COMB","_EUpp")+" "+card.replace("_COMB","_UEpp")+" "+card.replace("_COMB","_EEmm")+" "+card.replace("_COMB","_UUmm")+" "+card.replace("_COMB","_EUmm")+" "+card.replace("_COMB","_UEmm")+" > "+card;
os.system("combineCards.py "+card.replace("_COMB","_EEpp")+" "+card.replace("_COMB","_UUpp")+" "+card.replace("_COMB","_EUpp")+" "+card.replace("_COMB","_UEpp")+" "+card.replace("_COMB","_EEmm")+" "+card.replace("_COMB","_UUmm")+" "+card.replace("_COMB","_EUmm")+" "+card.replace("_COMB","_UEmm")+" > "+card);
totalCards = [];
if not options.runCombinationOnly :
totalCards = createdCards + combinedCards
else :
totalCards = combinedCards
for card in totalCards :
outname = card.replace(".txt","");
if options.makeAsymptotic :
runCmmd = "combine -M Asymptotic --minimizerAlgo Minuit2 --minosAlgo stepping -n %s -m 100 -d %s -s -1 --expectSignal=%d -t %d --toysNoSystematics"%(outname,card,options.injectSignal,options.nToys);
print runCmmd ;
if options.batchMode:
fn = "combineScript_Asymptotic_%s"%(outname);
submitBatchJobCombine(runCmmd,fn,outname);
else :
os.system(runCmmd);
os.system("mv higgsCombine* "+options.outputDIR);
os.system("rm roostat*");
continue ;
if options.makeProfileLikelihood :
if options.bruteForce == 0 :
runCmmd = "combine -M ProfileLikelihood --signif -n %s -m 100 -d %s -t %d --expectSignal=%d -s -1 --toysNoSystematics"%(outname,card,options.nToys,options.injectSignal);
else:
runCmmd = "combine -M ProfileLikelihood --signif -n %s -m 100 -d %s -t %d --expectSignal=%d -s -1 --toysNoSystematics --bruteForce"%(outname,card,options.nToys,options.injectSignal);
print "runCmmd ",runCmmd;
if options.batchMode:
fn = "combineScript_ProfileLikelihood_exp_%s_iToy_%d"%(outname,options.nToys);
submitBatchJobCombine(runCmmd,fn,outname);
else:
os.system(runCmmd);
os.system("mv higgsCombine* "+options.outputDIR);
os.system("rm roostat* ");
continue ;
if options.makeMaxLikelihoodFit :
runCmmd = "combine -M MaxLikelihoodFit --minimizerAlgo Minuit2 --minimizerStrategy 1 --rMin %f --rMax %f --saveNormalizations --saveWithUncertainties -n %s -m 100 -d %s --do95=1 --robustFit=1 -s -1 -t %d --expectSignal %d --toysNoSystematics --skipBOnlyFit"%(options.rMin,options.rMax,outname,card,options.nToys,options.injectSignal);
print runCmmd ;
if options.batchMode:
fn = "combineScript_MaxLikelihoodFit_%s_nToys_%d"%(outname,options.nToys);
submitBatchJobCombine(runCmmd,fn,outname);
else:
os.system(runCmmd);
os.system("mv higgsCombine* "+options.outputDIR);
os.system("mv mlfit* "+options.outputDIR);
os.system("rm roostat* ");
continue ;
| [
"[email protected]"
] | |
6f227c5ce3a2d188622f589658f80e019ffc2ec6 | 7e4460c85790fae2d470182732289bcd1b8777b2 | /Process/process_scripts.py | c6028329d884b48f3b57d3958e624f02c6d43b3a | [] | no_license | khamukkamu/swconquest-msys | 5b23654c8dd2e8b2f25bc7914252eedc05a5cc1e | 71337a4ae9c507b9440e84cf49d31fc67a781978 | refs/heads/master | 2021-04-29T19:00:10.389224 | 2019-05-01T15:11:11 | 2019-05-01T15:11:11 | 121,704,753 | 1 | 1 | null | 2018-02-16T01:40:58 | 2018-02-16T01:40:58 | null | UTF-8 | Python | false | false | 1,812 | py | import string
from module_info import *
from module_scripts import *
from process_common import *
from process_operations import *
from module_info import wb_compile_switch as is_wb
def save_scripts(variable_list,variable_uses,scripts,tag_uses,quick_strings):
file = open(export_dir + "scripts.txt","w")
file.write("scriptsfile version 1\n")
file.write("%d\n"%len(scripts))
temp_list = []
list_type = type(temp_list)
for i_script in xrange(len(scripts)):
func = scripts[i_script]
if (type(func[1]) == list_type):
file.write("%s -1\n"%(convert_to_identifier(func[0])))
save_statement_block(file,convert_to_identifier(func[0]), 0,func[1], variable_list,variable_uses,tag_uses,quick_strings, convert_to_identifier(func[0]) )
else:
file.write("%s %s\n"%(convert_to_identifier(func[0]), swytrailzro(func[1])))
save_statement_block(file,convert_to_identifier(func[0]), 0,func[2], variable_list,variable_uses,tag_uses,quick_strings, convert_to_identifier(func[0]) )
file.write("\n")
file.close()
def save_python_header():
if (is_wb):
file = open("./IDs/ID_scripts_wb.py","w")
else:
file = open("./IDs/ID_scripts_mb.py","w")
for i_script in xrange(len(scripts)):
file.write("script_%s = %d\n"%(convert_to_identifier(scripts[i_script][0]),i_script))
file.write("\n\n")
file.close()
print "Exporting scripts..."
save_python_header()
variable_uses = []
variables = load_variables(export_dir, variable_uses)
tag_uses = load_tag_uses(export_dir)
quick_strings = load_quick_strings(export_dir)
save_scripts(variables,variable_uses,scripts,tag_uses,quick_strings)
save_variables(export_dir,variables,variable_uses)
save_tag_uses(export_dir, tag_uses)
save_quick_strings(export_dir,quick_strings)
| [
"[email protected]"
] | |
9c06c0a29a8845ed289678b35982f9e2dbc2a720 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03957/s736073526.py | 4d07ff9077817797927326fee3fdb9b2cb662fdf | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | s=input()
ans="No"
for i in range(len(s)):
if s[i]=="C":
for j in range(i+1,len(s)):
if s[j]=="F":
print("Yes")
exit()
else:
print(ans)
| [
"[email protected]"
] | |
a546526b405a6825d7312a49a2cd25bcb0d101ae | 0abd812a50ba3330734fcbb0088a74c5ad6735a2 | /python/asKeyword.py | 77dc239390af38863bc903a53b3a7baf0e65c86c | [] | no_license | scMarth/Learning | a914af6f6327454234e5f98dfc8cf95d6d4f8077 | ae696461c2c8edc9944879503cce01d525cf4ce0 | refs/heads/master | 2023-08-03T05:13:03.162533 | 2023-07-28T22:58:51 | 2023-07-28T22:58:51 | 120,689,926 | 2 | 0 | null | 2022-12-11T13:14:07 | 2018-02-08T00:33:42 | JavaScript | UTF-8 | Python | false | false | 56 | py | import re
import re as regex
print(id(re) == id(regex)) | [
"[email protected]"
] | |
7c02d2ad4e1e378078eab256ef590b3dbb318934 | 43d8b1639320fbafb9635f47c17e56fceed9faba | /edashi_1428/urls.py | f5d1d9d26e849b64a4b3c7e0ed6e30da099c7ba3 | [] | no_license | crowdbotics-apps/edashi-1428 | fd731df62fb45e554031f56fd85700149a2acb18 | 30615d9118d998e82c463df6bddcc1f858d9bb86 | refs/heads/master | 2022-12-10T04:09:32.671280 | 2019-03-17T09:38:05 | 2019-03-17T09:38:05 | 176,081,279 | 0 | 0 | null | 2022-12-08T04:55:54 | 2019-03-17T09:37:04 | Python | UTF-8 | Python | false | false | 1,042 | py | """edashi_1428 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url('', include('home.urls')),
url(r'^accounts/', include('allauth.urls')),
url(r'^api/v1/', include('home.api.v1.urls')),
url(r'^admin/', admin.site.urls),
]
admin.site.site_header = 'Edashi'
admin.site.site_title = 'Edashi Admin Portal'
admin.site.index_title = 'Edashi Admin'
| [
"[email protected]"
] | |
bafd53e16b68d5c5315f2de4dc3a24be45844475 | ae9bb7babce2a0349ae932985cf418a03057c670 | /test_ProjectPractice/test_requests/wework/__init__.py | 3c6ed16fe565082753b7192859a99a55e588806c | [] | no_license | Veraun/HogwartsSDET17-1 | d2592fcb4c9c63724c19bcf9edde349ebcd2c8af | 6648dbfb640b065ff2c76cb6889a8f9e4f124b91 | refs/heads/main | 2023-07-02T05:20:32.161248 | 2021-08-06T03:55:13 | 2021-08-06T03:55:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | '''
#!/usr/bin/python3
# -*- coding: utf-8 -*-
@author: wangwei
@project: HogwartsSDET17
@file: __init__.py.py
@time: 2021/3/28 19:30
@Email: Warron.Wang
''' | [
"[email protected]"
] | |
13fe7acffc167ef651043847166ade55dfbe7fad | 6219e6536774e8eeb4cadc4a84f6f2bea376c1b0 | /scraper/storage_spiders/thienthientanvn.py | 80cce4c40f006fcc7e32a7cdf2085a62934bb55f | [
"MIT"
] | permissive | nguyenminhthai/choinho | 109d354b410b92784a9737f020894d073bea1534 | d2a216fe7a5064d73cdee3e928a7beef7f511fd1 | refs/heads/master | 2023-05-07T16:51:46.667755 | 2019-10-22T07:53:41 | 2019-10-22T07:53:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | # Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//div[@class='mytextarea']/span",
'price' : "//span/div[@class='price']",
'category' : "//div[@id='accordion']/ul/li",
'description' : "//div[@class='table_center']/div[2]/table/tbody/tr/td|//div[@class='table_center']/div[3]/table/tbody/tr/td",
'images' : "//img[@id='ctl00_MainPlaceHolder_ctl00_imgLaptop']/@src|//ul/li/a[@class='highslide']/img/@src",
'canonical' : "",
'base_url' : "",
'brand' : ""
}
name = 'thienthientan.vn'
allowed_domains = ['thienthientan.vn']
start_urls = ['http://www.thienthientan.vn/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(), 'parse_item'),
Rule(LinkExtractor(), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| [
"[email protected]"
] | |
5ae2c762306e5a94e26a671d98093c5b02e5db3d | f167dffa2f767a0419aa82bf434852069a8baeb8 | /lib/youtube_dl/extractor/arcpublishing.py | ca6a6c4d87f9f13a259f8402f5e7ef51ad097088 | [
"MIT"
] | permissive | firsttris/plugin.video.sendtokodi | d634490b55149adfdcb62c1af1eb77568b8da3f5 | 1095c58e2bc21de4ab6fcb67a70e4f0f04febbc3 | refs/heads/master | 2023-08-18T10:10:39.544848 | 2023-08-15T17:06:44 | 2023-08-15T17:06:44 | 84,665,460 | 111 | 31 | MIT | 2022-11-11T08:05:21 | 2017-03-11T16:53:06 | Python | UTF-8 | Python | false | false | 7,970 | py | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
extract_attributes,
int_or_none,
parse_iso8601,
try_get,
)
class ArcPublishingIE(InfoExtractor):
_UUID_REGEX = r'[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12}'
_VALID_URL = r'arcpublishing:(?P<org>[a-z]+):(?P<id>%s)' % _UUID_REGEX
_TESTS = [{
# https://www.adn.com/politics/2020/11/02/video-senate-candidates-campaign-in-anchorage-on-eve-of-election-day/
'url': 'arcpublishing:adn:8c99cb6e-b29c-4bc9-9173-7bf9979225ab',
'only_matching': True,
}, {
# https://www.bostonglobe.com/video/2020/12/30/metro/footage-released-showing-officer-talking-about-striking-protesters-with-car/
'url': 'arcpublishing:bostonglobe:232b7ae6-7d73-432d-bc0a-85dbf0119ab1',
'only_matching': True,
}, {
# https://www.actionnewsjax.com/video/live-stream/
'url': 'arcpublishing:cmg:cfb1cf1b-3ab5-4d1b-86c5-a5515d311f2a',
'only_matching': True,
}, {
# https://elcomercio.pe/videos/deportes/deporte-total-futbol-peruano-seleccion-peruana-la-valorizacion-de-los-peruanos-en-el-exterior-tras-un-2020-atipico-nnav-vr-video-noticia/
'url': 'arcpublishing:elcomercio:27a7e1f8-2ec7-4177-874f-a4feed2885b3',
'only_matching': True,
}, {
# https://www.clickondetroit.com/video/community/2020/05/15/events-surrounding-woodward-dream-cruise-being-canceled/
'url': 'arcpublishing:gmg:c8793fb2-8d44-4242-881e-2db31da2d9fe',
'only_matching': True,
}, {
# https://www.wabi.tv/video/2020/12/30/trenton-company-making-equipment-pfizer-covid-vaccine/
'url': 'arcpublishing:gray:0b0ba30e-032a-4598-8810-901d70e6033e',
'only_matching': True,
}, {
# https://www.lateja.cr/el-mundo/video-china-aprueba-con-condiciones-su-primera/dfcbfa57-527f-45ff-a69b-35fe71054143/video/
'url': 'arcpublishing:gruponacion:dfcbfa57-527f-45ff-a69b-35fe71054143',
'only_matching': True,
}, {
# https://www.fifthdomain.com/video/2018/03/09/is-america-vulnerable-to-a-cyber-attack/
'url': 'arcpublishing:mco:aa0ca6fe-1127-46d4-b32c-be0d6fdb8055',
'only_matching': True,
}, {
# https://www.vl.no/kultur/2020/12/09/en-melding-fra-en-lytter-endret-julelista-til-lewi-bergrud/
'url': 'arcpublishing:mentormedier:47a12084-650b-4011-bfd0-3699b6947b2d',
'only_matching': True,
}, {
# https://www.14news.com/2020/12/30/whiskey-theft-caught-camera-henderson-liquor-store/
'url': 'arcpublishing:raycom:b89f61f8-79fa-4c09-8255-e64237119bf7',
'only_matching': True,
}, {
# https://www.theglobeandmail.com/world/video-ethiopian-woman-who-became-symbol-of-integration-in-italy-killed-on/
'url': 'arcpublishing:tgam:411b34c1-8701-4036-9831-26964711664b',
'only_matching': True,
}, {
# https://www.pilotonline.com/460f2931-8130-4719-8ea1-ffcb2d7cb685-132.html
'url': 'arcpublishing:tronc:460f2931-8130-4719-8ea1-ffcb2d7cb685',
'only_matching': True,
}]
_POWA_DEFAULTS = [
(['cmg', 'prisa'], '%s-config-prod.api.cdn.arcpublishing.com/video'),
([
'adn', 'advancelocal', 'answers', 'bonnier', 'bostonglobe', 'demo',
'gmg', 'gruponacion', 'infobae', 'mco', 'nzme', 'pmn', 'raycom',
'spectator', 'tbt', 'tgam', 'tronc', 'wapo', 'wweek',
], 'video-api-cdn.%s.arcpublishing.com/api'),
]
@staticmethod
def _extract_urls(webpage):
entries = []
# https://arcpublishing.atlassian.net/wiki/spaces/POWA/overview
for powa_el in re.findall(r'(<div[^>]+class="[^"]*\bpowa\b[^"]*"[^>]+data-uuid="%s"[^>]*>)' % ArcPublishingIE._UUID_REGEX, webpage):
powa = extract_attributes(powa_el) or {}
org = powa.get('data-org')
uuid = powa.get('data-uuid')
if org and uuid:
entries.append('arcpublishing:%s:%s' % (org, uuid))
return entries
def _real_extract(self, url):
org, uuid = re.match(self._VALID_URL, url).groups()
for orgs, tmpl in self._POWA_DEFAULTS:
if org in orgs:
base_api_tmpl = tmpl
break
else:
base_api_tmpl = '%s-prod-cdn.video-api.arcpublishing.com/api'
if org == 'wapo':
org = 'washpost'
video = self._download_json(
'https://%s/v1/ansvideos/findByUuid' % (base_api_tmpl % org),
uuid, query={'uuid': uuid})[0]
title = video['headlines']['basic']
is_live = video.get('status') == 'live'
urls = []
formats = []
for s in video.get('streams', []):
s_url = s.get('url')
if not s_url or s_url in urls:
continue
urls.append(s_url)
stream_type = s.get('stream_type')
if stream_type == 'smil':
smil_formats = self._extract_smil_formats(
s_url, uuid, fatal=False)
for f in smil_formats:
if f['url'].endswith('/cfx/st'):
f['app'] = 'cfx/st'
if not f['play_path'].startswith('mp4:'):
f['play_path'] = 'mp4:' + f['play_path']
if isinstance(f['tbr'], float):
f['vbr'] = f['tbr'] * 1000
del f['tbr']
f['format_id'] = 'rtmp-%d' % f['vbr']
formats.extend(smil_formats)
elif stream_type in ('ts', 'hls'):
m3u8_formats = self._extract_m3u8_formats(
s_url, uuid, 'mp4', 'm3u8' if is_live else 'm3u8_native',
m3u8_id='hls', fatal=False)
if all([f.get('acodec') == 'none' for f in m3u8_formats]):
continue
for f in m3u8_formats:
if f.get('acodec') == 'none':
f['preference'] = -40
elif f.get('vcodec') == 'none':
f['preference'] = -50
height = f.get('height')
if not height:
continue
vbr = self._search_regex(
r'[_x]%d[_-](\d+)' % height, f['url'], 'vbr', default=None)
if vbr:
f['vbr'] = int(vbr)
formats.extend(m3u8_formats)
else:
vbr = int_or_none(s.get('bitrate'))
formats.append({
'format_id': '%s-%d' % (stream_type, vbr) if vbr else stream_type,
'vbr': vbr,
'width': int_or_none(s.get('width')),
'height': int_or_none(s.get('height')),
'filesize': int_or_none(s.get('filesize')),
'url': s_url,
'preference': -1,
})
self._sort_formats(
formats, ('preference', 'width', 'height', 'vbr', 'filesize', 'tbr', 'ext', 'format_id'))
subtitles = {}
for subtitle in (try_get(video, lambda x: x['subtitles']['urls'], list) or []):
subtitle_url = subtitle.get('url')
if subtitle_url:
subtitles.setdefault('en', []).append({'url': subtitle_url})
return {
'id': uuid,
'title': self._live_title(title) if is_live else title,
'thumbnail': try_get(video, lambda x: x['promo_image']['url']),
'description': try_get(video, lambda x: x['subheadlines']['basic']),
'formats': formats,
'duration': int_or_none(video.get('duration'), 100),
'timestamp': parse_iso8601(video.get('created_date')),
'subtitles': subtitles,
'is_live': is_live,
}
| [
"[email protected]"
] | |
483cc434f7750ca41c1475f6670f1c174d708d87 | 8ef8e6818c977c26d937d09b46be0d748022ea09 | /nlp/dialogue_generation/cpm/pytorch/iluvatar/cpm/config/layers/fast_self_multihead_attn_func.py | 01c7eedcf77eaaaf34487a80bdb34ae4fd3a42be | [
"Apache-2.0"
] | permissive | Deep-Spark/DeepSparkHub | eb5996607e63ccd2c706789f64b3cc0070e7f8ef | 9d643e88946fc4a24f2d4d073c08b05ea693f4c5 | refs/heads/master | 2023-09-01T11:26:49.648759 | 2023-08-25T01:50:18 | 2023-08-25T01:50:18 | 534,133,249 | 7 | 6 | Apache-2.0 | 2023-03-28T02:54:59 | 2022-09-08T09:07:01 | Python | UTF-8 | Python | false | false | 13,572 | py | import torch
# import fast_self_multihead_attn
try:
import ext_ops as fast_self_multihead_attn_bias
except:
pass
# import fast_self_multihead_attn_bias_additive_mask
class FastSelfAttnFunc(torch.autograd.Function) :
@staticmethod
def forward(ctx, use_time_mask, is_training, heads, inputs, input_weights, output_weights, input_biases, output_biases, pad_mask, mask_additive, dropout_prob):
use_biases_t = torch.tensor([input_biases is not None])
heads_t = torch.tensor([heads])
dropout_prob_t = torch.tensor([dropout_prob])
null_tensor = torch.tensor([])
use_mask = (pad_mask is not None)
mask_additive_t= torch.tensor([mask_additive])
if use_biases_t[0]:
if not mask_additive:
input_lin_results, \
softmax_results, \
dropout_results, \
dropout_mask, \
matmul2_results, \
outputs = \
fast_self_multihead_attn_bias.forward( \
use_mask, \
use_time_mask, \
is_training, \
heads, \
inputs, \
input_weights, \
output_weights, \
input_biases, \
output_biases, \
pad_mask if use_mask else null_tensor, \
dropout_prob)
ctx.save_for_backward(use_biases_t, \
heads_t, \
matmul2_results, \
dropout_results, \
softmax_results, \
null_tensor, \
null_tensor, \
mask_additive_t, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t)
else:
input_lin_results, \
bmm1_results, \
dropout_results, \
dropout_mask, \
matmul2_results, \
outputs = \
fast_self_multihead_attn_bias_additive_mask.forward( \
use_mask, \
use_time_mask, \
is_training, \
heads, \
inputs, \
input_weights, \
output_weights, \
input_biases, \
output_biases, \
pad_mask if use_mask else null_tensor, \
dropout_prob)
ctx.save_for_backward(use_biases_t, \
heads_t, \
matmul2_results, \
dropout_results, \
null_tensor, \
bmm1_results, \
pad_mask, \
mask_additive_t, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t)
else:
input_lin_results, \
softmax_results, \
dropout_results, \
dropout_mask, \
matmul2_results, \
outputs = \
fast_self_multihead_attn.forward( \
use_mask, \
use_time_mask, \
is_training, \
heads, \
inputs, \
input_weights, \
output_weights, \
pad_mask if use_mask else null_tensor, \
dropout_prob)
ctx.save_for_backward(use_biases_t, \
heads_t, \
matmul2_results, \
dropout_results, \
softmax_results, \
null_tensor, \
null_tensor, \
mask_additive_t, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t)
return outputs.detach()
@staticmethod
def backward(ctx, output_grads):
use_biases_t, \
heads_t, \
matmul2_results, \
dropout_results, \
softmax_results, \
bmm1_results, \
pad_mask, \
mask_additive_t, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t = ctx.saved_tensors
output_grads = output_grads.contiguous()
if use_biases_t[0]:
if not mask_additive_t[0]:
input_grads, \
input_weight_grads, \
output_weight_grads, \
input_bias_grads, \
output_bias_grads = \
fast_self_multihead_attn_bias.backward( \
heads_t[0], \
output_grads, \
matmul2_results, \
dropout_results, \
softmax_results, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t[0])
else:
input_grads, \
input_weight_grads, \
output_weight_grads, \
input_bias_grads, \
output_bias_grads = \
fast_self_multihead_attn_bias_additive_mask.backward( \
heads_t[0], \
output_grads, \
matmul2_results, \
dropout_results, \
bmm1_results, \
pad_mask, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t[0])
else:
input_bias_grads = None
output_bias_grads = None
input_grads, \
input_weight_grads, \
output_weight_grads = \
fast_self_multihead_attn.backward( \
heads_t[0], \
output_grads, \
matmul2_results, \
dropout_results, \
softmax_results, \
input_lin_results, \
inputs, \
input_weights, \
output_weights, \
dropout_mask, \
dropout_prob_t[0])
return None, None, None, input_grads, input_weight_grads, output_weight_grads,input_bias_grads, output_bias_grads, None, None, None
fast_self_attn_func = FastSelfAttnFunc.apply
| [
"[email protected]"
] | |
b1effa43176d77ba4cd5d71fe491629591f33413 | 978a0ff297cfe68baa8b62a30aaacefa3efdd48d | /flaskfiles/flaskpractice.py | 4fe3f0d5817107d5f5eb70af42986722a0a65e5e | [] | no_license | pavi535/pythonpratice | d55f263cf4170ace3fa8ba7f4a26d67f950af7ce | 9f66be3e609f2b4fbc1a035e67d6fcf08992818a | refs/heads/main | 2023-08-27T06:38:30.446752 | 2021-11-10T03:03:24 | 2021-11-10T03:03:24 | 426,094,134 | 0 | 0 | null | 2021-11-10T03:03:25 | 2021-11-09T04:46:11 | Python | UTF-8 | Python | false | false | 2,593 | py | from datetime import datetime
from flask import Flask, render_template, url_for, flash, redirect
from flask_sqlalchemy import SQLAlchemy
from forms import RegistrationForm, LoginForm
app = Flask(__name__)
app.config['SECRET_KEY'] = '5791628bb0b13ce0c676dfde280ba245'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
db=SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(20), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
image_file = db.Column(db.String(20), nullable=False, default='default.jpg')
password = db.Column(db.String(60), nullable=False)
posts = db.relationship('Post', backref='author', lazy=True)
def __repr__(self):
return f"User('{self.username}', '{self.email}', '{self.image_file}')"
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return f"Post('{self.title}', '{self.date_posted}')"
posts=[{'Product_name': 'car',
'car_make':'Toyota',
'car_year': 2019,
'car_description':'It is a black car with 52000 miles on it' },
{'Product_name': 'jeep',
'car_make':'Wrangler',
'car_year': 2020,
'car_description':'It is a black car with 12000 miles on it' }
]
@app.route('/')
def home():
return render_template('home.html', posts=posts)
@app.route('/help')
def help():
return render_template('help.html', title='help')
@app.route("/register", methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
flash(f'Account created for {form.username.data}!', 'success')
return redirect(url_for('home'))
return render_template('register.html', title='Register', form=form)
@app.route("/login", methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
if form.email.data == '[email protected]' and form.password.data == 'password':
flash('You have been logged in!', 'success')
return redirect(url_for('home'))
else:
flash('Login Unsuccessful. Please check username and password', 'danger')
return render_template('login.html', title='Login', form=form)
if __name__=='__main__':
app.run(debug=True) | [
"[email protected]"
] | |
8a995bd441b1bf0410c40c17856e88cacb7fdc00 | 840ca6face6cb369104eec228fe7b51630bd10f1 | /剑指offer/52-两个链表的第一个公共节点.py | ca8ed7ed73921386ff21cac0c21a687a57434913 | [] | no_license | Leofighting/Practice-on-LeetCode | 56e6245eb03f76ca254e54dc0a0cdd2c71ec3dd0 | 6d7dad991922abe862f19009b261b5146e059955 | refs/heads/master | 2021-08-16T04:21:04.699124 | 2020-06-29T22:48:38 | 2020-06-29T22:48:38 | 197,718,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | # -*- coding:utf-8 -*-
__author__ = "leo"
# 输入两个链表,找出它们的第一个公共节点。
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def get_intersection_node(self, headA, headB):
node1, node2 = headA, headB
while node1 != node2:
node1 = node1.next if node1 else headB
node2 = node2.next if node2 else headA
return node1
| [
"[email protected]"
] | |
b06e3ace791dfcd120050816b47cf3cea36e3caf | 056adbbdfb968486ecc330f913f0de6f51deee33 | /609-find-duplicate-file-in-system/find-duplicate-file-in-system.py | 369212a29b568e52d671b267faa76bb344d532b9 | [] | no_license | privateHmmmm/leetcode | b84453a1a951cdece2dd629c127da59a4715e078 | cb303e610949e953b689fbed499f5bb0b79c4aea | refs/heads/master | 2021-05-12T06:21:07.727332 | 2018-01-12T08:54:52 | 2018-01-12T08:54:52 | 117,215,642 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,759 | py | # -*- coding:utf-8 -*-
# Given a list of directory info including directory path, and all the files with contents in this directory, you need to find out all the groups of duplicate files in the file system in terms of their paths.
#
# A group of duplicate files consists of at least two files that have exactly the same content.
#
# A single directory info string in the input list has the following format:
# "root/d1/d2/.../dm f1.txt(f1_content) f2.txt(f2_content) ... fn.txt(fn_content)"
# It means there are n files (f1.txt, f2.txt ... fn.txt with content f1_content, f2_content ... fn_content, respectively) in directory root/d1/d2/.../dm. Note that n >= 1 and m >= 0. If m = 0, it means the directory is just the root directory.
#
# The output is a list of group of duplicate file paths. For each group, it contains all the file paths of the files that have the same content. A file path is a string that has the following format:
# "directory_path/file_name.txt"
#
#
# Example 1:
#
# Input:
# ["root/a 1.txt(abcd) 2.txt(efgh)", "root/c 3.txt(abcd)", "root/c/d 4.txt(efgh)", "root 4.txt(efgh)"]
# Output:
# [["root/a/2.txt","root/c/d/4.txt","root/4.txt"],["root/a/1.txt","root/c/3.txt"]]
#
#
#
#
# Note:
#
# No order is required for the final output.
# You may assume the directory name, file name and file content only has letters and digits, and the length of file content is in the range of [1,50].
# The number of files given is in the range of [1,20000].
# You may assume no files or directories share the same name in the same directory.
# You may assume each given directory info represents a unique directory. Directory path and file info are separated by a single blank space.
#
#
#
#
# Follow-up beyond contest:
#
# Imagine you are given a real file system, how will you search files? DFS or BFS?
# If the file content is very large (GB level), how will you modify your solution?
# If you can only read the file by 1kb each time, how will you modify your solution?
# What is the time complexity of your modified solution? What is the most time-consuming part and memory consuming part of it? How to optimize?
# How to make sure the duplicated files you find are not false positive?
#
class Solution(object):
def findDuplicate(self, paths):
"""
:type paths: List[str]
:rtype: List[List[str]]
"""
dicts = collections.defaultdict(list)
for path in paths:
files = path.split(" ")
dir = files[0]
for f in files[1:]:
filename,_,content = f.partition('(')
dicts[content[:-1]].append(dir+'/'+filename)
return [g for g in dicts.values() if len(g)>1]
| [
"[email protected]"
] | |
7863e3a9c1b084f2424bfe6b8e926d7afd714b98 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p04030/s580737724.py | e5017d598b9bc2f7e817eadffadd9076c0229da3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 308 | py | S = input()
Z = []
X = len(S)
i = 0
while i < X :
if S[i] == "0":
Z.append(0)
elif S[i] == "1":
Z.append(1)
elif S[i] == "B":
if len(Z)== 0:
pass
else:
Z.pop()
i += 1
i = 0
X = len(Z)
while i < X:
print(Z[i] , end ="")
i +=1 | [
"[email protected]"
] | |
98e8cfe4279ac7e1dfe3f4566e407a589595201e | 82f993631da2871933edf83f7648deb6c59fd7e4 | /w2/L6/11.py | 2cf14d2b9a804b5ec5eaec7caa0a54bb13eddce8 | [] | no_license | bobur554396/PPII2021Summer | 298f26ea0e74c199af7b57a5d40f65e20049ecdd | 7ef38fb4ad4f606940d2ba3daaa47cbd9ca8bcd2 | refs/heads/master | 2023-06-26T05:42:08.523345 | 2021-07-24T12:40:05 | 2021-07-24T12:40:05 | 380,511,125 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | a = ['a', 'b', 'c']
b = ['hello', 'hi', 'hola']
c = [12, 30, 20]
for i in zip(a, b, c):
print(i) | [
"[email protected]"
] | |
3267827c6172fd22712c30402e7fc68868d81061 | 42b84b02e64d21234372501a20bf820e0bcbf281 | /site/threath/apps/user_profiles/views.py | 2054832130d5f203d6bf0ea498dde605276bad9c | [] | no_license | gage/proto | 861d1e1190770b0cc74f51a6fe140157cc0ac12e | e13ac7d0ee5c6acce2557dcf71a00a941543c006 | refs/heads/master | 2020-04-06T06:44:01.712532 | 2013-06-28T06:30:59 | 2013-06-28T06:30:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,876 | py | import time
from django.contrib.auth.decorators import login_required
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.template.loader import render_to_string
from django.conf import settings
from django.contrib.auth.models import User
from globals.utils import bigpipe_pagelet
def verify_email(request):
'''
When user click the activation link, the email will be verified.
'''
activation_code = request.GET.get('activation_code')
email = request.GET.get('email')
uid = request.GET.get('id')
# print "activation_code: %s" % activation_code
# print "email: %s" % email
# print "id: %s" % uid
user = User.objects.get(id=uid)
# print user
profile = user.get_profile()
if profile.verify_email(email, activation_code):
return HttpResponse("Email has been verified successfully.")
else:
return HttpResponse("This activation code is expired.")
@login_required
def user_main(request, user_id=None):
def stream_response_generator():
context = {
'BIG_PIPE': True
}
base_view = render_to_string("main.html", context, context_instance=RequestContext(request))
yield base_view.ljust(4096)
yield bp_testpagelet(request).ljust(4096)
yield render_to_string("bp_page_end.html", {}, context_instance=RequestContext(request))
return HttpResponse(stream_response_generator(), mimetype='text/html', stream_content=True)
@bigpipe_pagelet
def bp_testpagelet(request):
innerHTML = render_to_string("bp_testpagelet.html", {'BIG_PIPE': True}, context_instance=RequestContext(request))
return ['testpagelet',
innerHTML,
'chatRoom/chatRoom',
['base.css','test.css']
]
| [
"[email protected]"
] | |
0039e5a8aec878cb771be2ecdc89116f71a7cd5f | 48ff6b01dabc631c8924f3c51996010d9e0d2086 | /psypl/experiments/variable_count.py | 8f9bdaeef8f1030d231ba50b2ed17beb7e2c70bb | [] | no_license | willcrichton/psypl-experiments | b4522908f17ba9fbc023fa627a260e645a511bc4 | 7b0a134cc17919e62707d005fc03f2e22938eb13 | refs/heads/master | 2022-12-18T01:41:20.964024 | 2021-02-13T00:46:55 | 2021-02-13T00:46:55 | 219,410,440 | 3 | 0 | null | 2022-12-07T14:42:13 | 2019-11-04T03:33:04 | Jupyter Notebook | UTF-8 | Python | false | false | 3,326 | py | from enum import Enum
from ..utils import random_tree, ConstNode, OpNode, all_names, shuffle, try_int
from random import sample
from itertools import combinations, product
from ..base import Experiment
from pprint import pprint
import pandas as pd
import numpy as np
import experiment_widgets
class VariableCountExperiment(Experiment):
Widget = experiment_widgets.FunctionBasicExperiment
all_n_op = [6, 9]
all_n_var = [0, 2, 4]
class Condition(Enum):
#Random = 1
Even = 1
Frontloaded = 2
def generate_experiment(self, N_trials=24):
conditions = list(product(self.all_n_var, self.all_n_op, list(self.Condition)))
return {
"trials": shuffle(
[
self.generate_trial(*conds)
for conds in conditions
for _ in range(N_trials // len(conditions))
]
),
"between_trials_time": 4000,
}
def node_size(self, t, idxs):
if isinstance(t, OpNode):
lmap, lsize = self.node_size(t.left, idxs)
rmap, rsize = self.node_size(t.right, idxs)
size = lsize + rsize + 1
if t.index in idxs:
return {t.index: size, **lmap, **rmap}, 0
else:
return {**lmap, **rmap}, size
else:
return {}, 0
def generate_trial(self, N_var, N_op, cond):
tree = random_tree(N_op)
if N_var > 0:
coverings = pd.DataFrame([{
'sizes': self.node_size(tree, idxs)[0],
'remaining': self.node_size(tree, idxs)[1],
'idxs': idxs
} for idxs in combinations(list(range(N_op-1)), N_var)])
coverings['size_seq'] = coverings.apply(
lambda row: [t[1] for t in sorted(row.sizes.items(), key=lambda t: t[0])] + [row.remaining],
axis=1)
if cond == self.Condition.Even:
coverings.score = coverings.size_seq.map(lambda seq: np.std(seq))
elif cond == self.Condition.Frontloaded:
def compute_score(seq):
return np.sum([(i+1) * seq[i] for i in range(len(seq))])
coverings['score'] = coverings.size_seq.map(compute_score)
best_rows = coverings[coverings.score == coverings.score.min()]
row = best_rows.sample().iloc[0]
indices = row.idxs
size_seq = row.size_seq
names = sample(all_names, k=N_var)
defs, call = tree.to_mixed_str({i: n for i, n in zip(indices, names)})
else:
defs = []
call = tree.to_paren_str()
size_seq = [N_op]
program = '\n'.join(defs + [call])
globls = {}
exec(program, globls, globls)
answer = eval(call, globls, globls)
return {
'program': program,
'call': call if N_var > 0 else None,
'cond': str(cond),
'N_var': N_var,
'N_op': N_op,
'size_seq': size_seq,
'answer': str(answer)
}
def eval_trial(self, trial, result):
return {
"correct": 1 if int(trial["answer"]) == try_int(result["response"]) else 0,
"cond": trial["cond"]
}
| [
"[email protected]"
] | |
ab5a8efe6ee474ebb3d0874bd150540fd5990e8f | b05ae08859d3b593b6c815a10e0705e13c1ae1eb | /RinoNakasone/RinoNakasone/spiders/piaohua.py | 46b6392f62d63419047047495d160ab00d756622 | [] | no_license | jacksonyoudi/Rino_nakasone_backend | 32425bcd9087384fa25db1fe51e854b7a4f1fa12 | e838668a6f67a6a4eca52d7658ad84b61b4123db | refs/heads/master | 2021-04-15T18:21:17.678794 | 2019-03-02T15:16:30 | 2019-03-02T15:16:30 | 126,698,903 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,131 | py | # -*- coding: utf-8 -*-
import scrapy
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from RinoNakasone.settings import PIAOHUA
class PiaohuaSpider(scrapy.Spider):
name = 'piaohua'
allowed_domains = ['www.piaohua.com']
start_urls = ['http://www.piaohua.com/']
def parse(self, response):
html_doc = response.body
soup = BeautifulSoup(html_doc, 'html.parser')
for i in soup.find_all('a', class_="img"):
if i.attrs.get('href'):
url = i.attrs.get('href')
full_url = urljoin(PIAOHUA, url)
yield scrapy.Request(full_url, callback=self.parse_detail)
next_url = urljoin(response.url.split('list_')[0],
soup.find('div', class_='page tk').find_all('a')[-2].attrs.get('href'))
yield scrapy.Request(next_url, callback=self.parse)
def parse_detail(self, response):
item = IreadweekItem()
html_doc = response.body
soup = BeautifulSoup(html_doc, 'html.parser')
img_url = urljoin(CDN, soup.find('img').attrs.get('src').replace('//', '/'))
download_url = soup.find('a', class_='downloads').attrs.get('href')
title = soup.find_all('div', class_='hanghang-za-title')
name = title[0].text
content = soup.find_all('div', class_='hanghang-za-content')
author_info = content[0].text
directory = '\n'.join([i.text.replace("\u3000", '') for i in content[1].find_all('p')])
info = soup.find('div', class_='hanghang-shu-content-font').find_all('p')
author = info[0].text.split('作者:')[1]
category = info[1].text.split('分类:')[1]
score = info[2].text.split('豆瓣评分:')[1]
introduction = info[4].text
item['name'] = name
item['img_url'] = img_url
item['download_url'] = download_url
item['author'] = author
item['author_info'] = author_info
item['category'] = category
item['score'] = score
item['introduction'] = introduction
item['directory'] = directory
return item
| [
"[email protected]"
] | |
187166ea2567f6b8c7bba448205dd30a929a7111 | f156beb6c5d911e86c28ea71f70f7422391a2c12 | /ipynb/biplot.py | 8dce7fa3067b700bbaf52f0c0f3842ed253917e4 | [] | no_license | knightlab-analyses/office-study | 31382dc259b1b21c3288709e5a49070186c5e66b | 506e1d037c982e23538aec4742305ccd2508d844 | refs/heads/master | 2021-01-12T14:36:42.088868 | 2017-08-20T23:46:57 | 2017-08-20T23:46:57 | 72,040,888 | 0 | 2 | null | 2017-08-20T23:46:58 | 2016-10-26T20:17:35 | Jupyter Notebook | UTF-8 | Python | false | false | 17,786 | py | from __future__ import division
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.colors as colors
import matplotlib.cm as cmx
import numpy as np
import pandas as pd
from collections import OrderedDict
def make_biplot(samples,
features=None,
sample_metadata=None,
feature_metadata=None,
sample_color_category=None,
feature_color_category=None,
sample_color_dict=None,
feature_color_dict=None,
sample_zorder=None,
feature_zorder=None,
**kwargs):
figure_size = (15, 15)
samples_x = 'PCA1'
samples_y = 'PCA2'
samp_col = 'RdGy'
samp_alpha = 1
samp_marker = 'o'
samp_ms = 8
samp_leg_loc = 2
features_x = 'PCA1'
features_y = 'PCA2'
feat_col = 'Set1'
feat_alpha = 1
arrow_width = 0.02
arrow_head = 0.05
feat_leg_loc = 1
feature_order = 0
sample_drop_list = []
show_color_drop = False
sample_drop_col = ['#FFFFFF']
eigenvalues = []
x_pad = 0.3
y_pad = 0.3
for key, value in kwargs.items():
if key == 'figure_size':
figure_size = value
if key == 'samples_x':
samples_x = value
if key == 'samples_y':
samples_y = value
if key == 'samp_col':
samp_col = value
if key == 'samp_alpha':
samp_alpha = value
if key == 'samp_marker':
samp_marker = value
if key == 'samp_ms':
samp_ms = value
if key == 'samp_leg_loc':
samp_leg_loc = value
if key == 'features_x':
samples_x = value
if key == 'features_y':
samples_y = value
if key == 'feat_col':
feat_col = value
if key == 'feat_alpha':
feat_alpha = value
if key == 'arrow_width':
arrow_width = value
if key == 'arrow_head':
arrow_head = value
if key == 'feat_leg_loc':
feat_leg_loc = value
if key == 'feature_order':
if value == 0:
feature_order = 0
if value == 1:
feature_order = 1
if key == 'sample_drop_list':
sample_drop_list = value
if key == 'show_color_drop':
show_color_drop = value
if key == 'sample_drop_col':
sample_drop_col = value
if key == 'eigenvalues':
eigenvalues = value
if key == 'x_pad':
x_pad = value
if key == 'y_pad':
y_pad = value
if not isinstance(samples, pd.core.frame.DataFrame):
raise ValueError('`samples` must be a `pd.DataFrame`, '
'not %r.' % type(samples).__name__)
if features is not None:
if not isinstance(features, pd.core.frame.DataFrame):
raise ValueError('`features` must be a `pd.DataFrame`, '
'not %r.' % type(features).__name__)
if sample_metadata is not None:
if not isinstance(sample_metadata, pd.core.frame.DataFrame):
raise ValueError('`sample_metadata` must be a `pd.DataFrame`, '
'not %r.' % type(sample_metadata).__name__)
if feature_metadata is not None:
if not isinstance(feature_metadata, pd.core.frame.DataFrame):
raise ValueError('`feature_metadata` must be a `pd.DataFrame`, '
'not %r.' % type(feature_metadata).__name__)
if sample_color_dict is not None:
if not isinstance(sample_color_dict, dict):
raise ValueError('`sample_color_dict` must be a `dictionary`, '
'not %r.' % type(sample_color_dict).__name__)
if feature_color_dict is not None:
if not isinstance(feature_color_dict, dict):
raise ValueError('`feature_color_dict` must be a `dictionary`, '
'not %r.' % type(feature_color_dict).__name__)
if sample_metadata is not None and sample_color_dict is None:
if sample_color_category is None:
raise ValueError('sample_color_category must be a specified')
if sample_metadata is not None and sample_color_dict is not None:
if sample_color_category is None:
raise ValueError('sample_color_category must be a specified')
if feature_metadata is not None and feature_color_dict is not None:
if feature_color_category is None:
raise ValueError('feature_color_category must be a specified')
if sample_drop_list is not None:
if not isinstance(sample_drop_list, list):
raise ValueError('`sample_drop_list` must be a `list`, '
'not %r.' % type(sample_drop_list).__name__)
if sample_drop_col is not None:
if not isinstance(sample_drop_col, list):
raise ValueError('`sample_drop_col` must be a `list`, '
'not %r.' % type(sample_drop_col).__name__)
if sample_metadata is not None:
if (samples.index != sample_metadata.index).any():
samples = samples.sort_index(axis=0)
sample_metadata = sample_metadata.sort_index(axis=0)
fig = plt.figure(figsize=figure_size)
ax = fig.add_subplot(111)
sample_colors = plt.get_cmap(samp_col)
feature_colors = plt.get_cmap(feat_col)
sample_group_append = []
colorVal = []
if sample_metadata is None:
ax.plot(np.ravel(samples[samples_x]),
np.ravel(samples[samples_y]),
marker=samp_marker, linestyle='',
ms=samp_ms, alpha=samp_alpha)
if (sample_metadata is not None and sample_color_dict is None):
sample_groups = samples.groupby(sample_metadata[sample_color_category])
if len(sample_drop_list) > 0:
def dropf(x):
return x not in sample_drop_list
index_drop = sample_metadata[sample_color_category].apply(dropf)
samp_r = samples.loc[sample_metadata.index]
samp_met_r = sample_metadata.loc[index_drop][sample_color_category]
for name, group in samp_r.groupby(samp_met_r):
sample_group_append.append(name)
sample_group_append = sorted(list(set(sample_group_append)))
cNorm = colors.Normalize(vmin=0,
vmax=(len(sample_group_append)-1))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=sample_colors)
for index, row in enumerate(sample_group_append):
colorVal.append(scalarMap.to_rgba(index))
if not show_color_drop:
sample_color_dict = dict(zip(sample_group_append, colorVal))
sample_color_dict = OrderedDict(
sorted(sample_color_dict.items(),
key=lambda x: x[0],
reverse=True))
for name, group in samp_r.groupby(samp_met_r):
ax.plot(np.ravel(group[samples_x]),
np.ravel(group[samples_y]),
marker=samp_marker, linestyle='', ms=samp_ms,
color=sample_color_dict[name],
label=name, alpha=samp_alpha)
else:
color_drop_append = []
if len(sample_drop_col) == 1:
for index in range(len(sample_drop_list)):
color_drop_append.append(sample_drop_col[0])
colorVal = colorVal + color_drop_append
if len(sample_drop_col) == len(sample_drop_list):
for index in range(len(sample_drop_list)):
color_drop_append.append(sample_drop_col[index])
colorVal = colorVal + color_drop_append
sample_group_append = list(sample_group_append)
sample_group_append += list(sample_drop_list)
sample_color_dict = dict(zip(sample_group_append, colorVal))
sample_color_dict = OrderedDict(
sorted(sample_color_dict.items(),
key=lambda x: x[0],
reverse=True))
for name, group in sample_groups:
if name not in sample_drop_list:
ax.plot(np.ravel(group[samples_x]),
np.ravel(group[samples_y]),
marker=samp_marker, linestyle='', ms=samp_ms,
color=sample_color_dict[name],
label=name, alpha=samp_alpha)
for name, group in sample_groups:
if name in sample_drop_list:
ax.plot(np.ravel(group[samples_x]),
np.ravel(group[samples_y]),
marker=samp_marker, linestyle='', ms=samp_ms,
color=sample_color_dict[name],
label=name, alpha=samp_alpha)
else:
sample_group_append = []
for name, group in sample_groups:
sample_group_append.append(name)
sample_group_append = sorted(list(set(sample_group_append)))
cNorm = colors.Normalize(vmin=0,
vmax=(len(sample_group_append)-1))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=sample_colors)
for index, row in enumerate(sample_group_append):
colorVal.append(scalarMap.to_rgba(index))
sample_color_dict = dict(zip(sample_group_append, colorVal))
sample_color_dict = OrderedDict(
sorted(sample_color_dict.items(),
key=lambda x: x[0],
reverse=True))
for name, group in sample_groups:
ax.plot(np.ravel(group[samples_x]),
np.ravel(group[samples_y]),
marker=samp_marker, linestyle='', ms=samp_ms,
color=sample_color_dict[name],
label=name, alpha=samp_alpha)
sample_color_dict = None
if (sample_metadata is not None and sample_color_dict is not None):
sample_groups = samples.groupby(sample_metadata[sample_color_category])
if len(sample_drop_list) > 0:
def dropf(x):
return x not in sample_drop_list
index_drop = sample_metadata[sample_color_category].apply(dropf)
samp_r = samples.loc[sample_metadata.index]
samp_met_r = sample_metadata.loc[index_drop]
sample_color_dict = OrderedDict(
sorted(sample_color_dict.items(),
key=lambda x: x[0],
reverse=True))
sample_groups = samp_r.groupby(samp_met_r[sample_color_category])
for name, group in sample_groups:
ax.plot(np.ravel(group[samples_x]),
np.ravel(group[samples_y]),
marker=samp_marker, linestyle='', ms=samp_ms,
color=sample_color_dict[name],
label=name, alpha=samp_alpha)
if not sample_drop_list:
sample_color_dict = OrderedDict(
sorted(sample_color_dict.items(),
key=lambda x: x[0],
reverse=True))
for name, group in sample_groups:
ax.plot(np.ravel(group[samples_x]),
np.ravel(group[samples_y]),
marker=samp_marker, linestyle='', ms=samp_ms,
color=sample_color_dict[name],
label=name, alpha=samp_alpha)
sample_color_dict = None
ax2 = ax.twinx()
if sample_color_category is not None:
ax.legend(title=sample_color_category, loc=samp_leg_loc, numpoints=1)
else:
ax.legend(loc=samp_leg_loc, numpoints=1)
ax2.set_ylim(ax.get_ylim())
recs = []
feature = []
otu_feature_append = []
colorVal = []
if (features is not None and feature_metadata is None):
for index, row in features.iterrows():
ax2.arrow(0, 0, row[features_x], row[features_y],
width=arrow_width, head_width=arrow_head,
alpha=feat_alpha, color='b')
if (features is not None and
feature_metadata is not None and
feature_color_category is None):
otu_feature_append = []
feature_groups = features.groupby(feature_metadata.columns[0])
for name, group in feature_groups:
otu_feature_append.append(name)
otu_feature_append = sorted(list(set(otu_feature_append)))
cNorm = colors.Normalize(vmin=0, vmax=(len(otu_feature_append)-1))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=feature_colors)
for index, row in enumerate(otu_feature_append):
colorVal.append(scalarMap.to_rgba(index))
feature_color_dict = dict(zip(otu_feature_append, colorVal))
feature_color_dict = OrderedDict(
sorted(feature_color_dict.items(),
key=lambda x: x[0]))
for name, group in feature_groups:
for i in range(group[features_x].shape[0]):
_id = group.index[i]
ax2.arrow(0, 0,
group.loc[_id, features_x],
group.loc[_id, features_y],
width=arrow_width, head_width=arrow_head,
alpha=feat_alpha,
color=feature_color_dict[name])
for key, value in feature_color_dict.items():
recs.append(mpatches.Rectangle((0, 0), 1, 1,
fc=feature_color_dict[key],
alpha=feat_alpha))
feature.append(key)
ax2.legend(recs, feature, loc=feat_leg_loc,
title=feature_color_category)
feature_color_dict = None
if (features is not None and
feature_metadata is not None and
feature_color_category is not None):
feature_groups = features.groupby(
feature_metadata[feature_color_category])
if feature_color_dict is None:
otu_feature_append = []
feature_groups = features.groupby(
feature_metadata[feature_color_category])
for name, group in feature_groups:
otu_feature_append.append(name)
otu_feature_append = sorted(list(set(otu_feature_append)))
cNorm = colors.Normalize(vmin=0,
vmax=(len(otu_feature_append)-1))
scalarMap = cmx.ScalarMappable(norm=cNorm, cmap=feature_colors)
for index, row in enumerate(otu_feature_append):
colorVal.append(scalarMap.to_rgba(index))
feature_color_dict = dict(zip(otu_feature_append, colorVal))
feature_color_dict = OrderedDict(
sorted(feature_color_dict.items(),
key=lambda x: x[0]))
for name, group in feature_groups:
for i in range(group[features_x].shape[0]):
_id = group.index[i]
kwds = {}
if feature_zorder is not None:
kwds['zorder'] = feature_zorder[name]
ax2.arrow(0, 0,
group.loc[_id, features_x],
group.loc[_id, features_y],
width=arrow_width, head_width=arrow_head,
alpha=feat_alpha,
color=feature_color_dict[name],
**kwds)
for key, value in feature_color_dict.items():
recs.append(mpatches.Rectangle((0, 0), 1, 1,
fc=feature_color_dict[key],
alpha=feat_alpha))
feature.append(key)
ax2.legend(recs, feature, loc=feat_leg_loc,
title=feature_color_category)
if features is not None:
xmin = min([min(samples.ix[:, 0]), min(features.ix[:, 0])])
xmax = max([max(samples.ix[:, 0]), max(features.ix[:, 0])])
ymin = min([min(samples.ix[:, 1]), min(features.ix[:, 1])])
ymax = max([max(samples.ix[:, 1]), max(features.ix[:, 1])])
xpad = (xmax - xmin) * x_pad
ypad = (ymax - ymin) * y_pad
ax.set_zorder(ax2.get_zorder()+(1-feature_order))
ax.patch.set_visible(False)
ax.set_xlim(xmin - xpad, xmax + xpad)
ax.set_ylim(ymin - ypad, ymax + ypad)
ax2.set_xlim(xmin - xpad, xmax + xpad)
ax2.set_ylim(ymin - ypad, ymax + ypad)
ax2.set_yticks([])
else:
xmin = min([min(samples.ix[:, 0])])
xmax = max([max(samples.ix[:, 0])])
ymin = min([min(samples.ix[:, 1])])
ymax = max([max(samples.ix[:, 1])])
xpad = (xmax - xmin) * x_pad
ypad = (ymax - ymin) * y_pad
ax.set_xlim(xmin - xpad, xmax + xpad)
ax.set_ylim(ymin - ypad, ymax + ypad)
ax2.set_yticks([])
if len(eigenvalues) > 2:
e_0 = eigenvalues[0]
e_1 = eigenvalues[1]
ax.set_xlabel('PC 1 ({:.2%})'.format(e_0**2/sum(eigenvalues**2)))
ax.set_ylabel('PC 2 ({:.2%})'.format(e_1**2/sum(eigenvalues**2)))
return fig, [ax, ax2]
| [
"[email protected]"
] | |
9565c6008d359c9ef4776815146440ba81e91136 | a4f2d74559b00191454d7d3492f8d35d118332b5 | /src/atra/plot/network_air.py | 15d33b561b56963e9c5b77d2ee76eb5a2084872d | [
"MIT"
] | permissive | nfontan/argentina-transport | c4b6f06a33034ce1c3ce905f901ff5086013b38b | f1583b077844e6b20b2c81144dec0872c88bdb80 | refs/heads/master | 2023-03-18T10:23:44.580084 | 2019-08-11T22:01:34 | 2019-08-11T22:01:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,910 | py | """Plot air network
"""
import os
import cartopy.crs as ccrs
import geopandas
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from atra.utils import load_config, get_axes, plot_basemap, scale_bar, plot_basemap_labels, save_fig
def main(config):
"""Read shapes, plot map
"""
data_path = config['paths']['data']
# data
output_file = os.path.join(config['paths']['figures'], 'network-air-map.png')
air_edge_file = os.path.join(data_path, 'network', 'air_edges.shp')
air_node_file = os.path.join(data_path, 'network', 'air_nodes.shp')
# air_usage_file = os.path.join(data_path, 'usage', 'air_passenger.csv')
# basemap
proj_lat_lon = ccrs.PlateCarree()
ax = get_axes()
plot_basemap(ax, data_path)
scale_bar(ax, location=(0.8, 0.05))
plot_basemap_labels(ax, data_path, include_regions=False)
colors = {
'Air route': '#252525',
'Airport': '#d95f0e'
}
# edges
edges = geopandas.read_file(air_edge_file)
ax.add_geometries(
list(edges.geometry),
crs=proj_lat_lon,
linewidth=1.5,
edgecolor=colors['Air route'],
facecolor='none',
zorder=4
)
# edges merged with usage
# usage = pandas.read_csv(air_usage_file)
# edges_with_usage = edges.merge(usage[['id', 'passengers_2016']], on='id')
# nodes
nodes = geopandas.read_file(air_node_file)
ax.scatter(
list(nodes.geometry.x),
list(nodes.geometry.y),
transform=proj_lat_lon,
facecolor=colors['Airport'],
s=12,
zorder=5
)
# legend
legend_handles = [
mpatches.Patch(color=color, label=label)
for label, color in colors.items()
]
plt.legend(handles=legend_handles, loc='lower left')
# save
save_fig(output_file)
if __name__ == '__main__':
CONFIG = load_config()
main(CONFIG)
| [
"[email protected]"
] | |
2264e15313f69e818f1bbdd697aae79e592592ad | 4273f162abb12ef1939271c2aabee9547ac6afee | /crowd/utils/config.py | 054ef5faf1c8d84308890a15230a3b194adf10e5 | [] | no_license | xiyuhao/subins_tutorials | 2717c47aac0adde099432e5dfd231606bf45a266 | acbe4fe16483397e9b0f8e240ca23bdca652b92d | refs/heads/master | 2023-07-28T13:42:41.445399 | 2021-09-12T11:02:37 | 2021-09-12T11:02:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | '''
config.py 0.0.1
Date: January 15, 2019
Last modified: June 14, 2019
Author: Subin. Gopi([email protected])
# Copyright(c) 2019, Subin Gopi
# All rights reserved.
# WARNING! All changes made in this file will be lost!
Description
None.
'''
def get_conig():
return 'Linux', 'maya', '2016', '2.7.5'
def get_tool_kit():
tools = {
'create': ['create', 'Create', '0.0.1'],
'publish': ['publish', 'Publish', '0.0.1']
}
return tools
| [
"[email protected]"
] | |
6ef4885b55b2959e9db0e836280c30f7bf832629 | a2860dd0acbb7b85d30fad1be52512fa7bc4c611 | /cerebralcortex/core/file_manager/read_handler.py | f1d9eb4c750b94afc422959e5232bc2448e3825c | [
"BSD-2-Clause"
] | permissive | hippietilley/CerebralCortex-Kernel | b1783c8156744f7809c9a3810b990c45945da936 | c7dac033d9561f14bdb72430577db6ae4e3c7911 | refs/heads/master | 2020-04-18T15:15:47.199601 | 2019-01-18T16:05:14 | 2019-01-18T16:05:14 | 167,607,878 | 0 | 0 | BSD-2-Clause | 2019-01-25T20:16:54 | 2019-01-25T20:16:54 | null | UTF-8 | Python | false | false | 6,069 | py | # Copyright (c) 2018, MD2K Center of Excellence
# - Nasir Ali <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
import gzip
import json
import traceback
from typing import List
from pympler import asizeof
from cerebralcortex.core.datatypes.datastream import DataStream, DataPoint
from cerebralcortex.core.datatypes.stream_types import StreamTypes
class ReadHandler():
def read_file(self, filepath: str) -> str:
"""
Read a file and return contents
:param filepath:
:return: file contents
:rtype: str
"""
if not filepath:
raise ValueError("File path is required field.")
with open(filepath, "r") as file:
data = file.read()
file.close()
return data
def file_processor(self, msg: dict, zip_filepath: str) -> DataStream:
"""
Process a Kafka or MySQL msg. Parse compressed files. Convert json metadata and data in DataStream object.
:param msg:
:param zip_filepath:
:return: DataStream object with metadata and data
:rtype: DataStream
"""
if not isinstance(msg["metadata"], dict):
metadata_header = json.loads(msg["metadata"])
else:
metadata_header = msg["metadata"]
identifier = metadata_header["identifier"]
owner = metadata_header["owner"]
name = metadata_header["name"]
data_descriptor = metadata_header["data_descriptor"]
execution_context = metadata_header["execution_context"]
if "annotations" in metadata_header:
annotations = metadata_header["annotations"]
else:
annotations = {}
if "stream_type" in metadata_header:
stream_type = metadata_header["stream_type"]
else:
stream_type = StreamTypes.DATASTREAM
try:
gzip_file_content = self.get_gzip_file_contents(zip_filepath + msg["filename"])
datapoints = list(map(lambda x: self.row_to_datapoint(x), gzip_file_content.splitlines()))
# self.rename_file(zip_filepath + msg["filename"])
start_time = datapoints[0].start_time
end_time = datapoints[len(datapoints) - 1].end_time
ds = DataStream(identifier,
owner,
name,
data_descriptor,
execution_context,
annotations,
stream_type,
start_time,
end_time,
datapoints)
return ds
except Exception as e:
self.logging.log(error_message="In Kafka preprocessor - Error in processing file: " + str(
msg["filename"]) + " Owner-ID: " + owner + "Stream Name: " + name + " - " + str(traceback.format_exc()),
error_type=self.logtypes.CRITICAL)
return DataStream
def row_to_datapoint(self, row: str) -> DataPoint:
"""
Format data based on mCerebrum's current GZ-CSV format into what Cerebral
Cortex expects
:param row:
:return: single DataPoint
:rtype: DataPoint
"""
ts, offset, values = row.split(',', 2)
ts = int(ts) / 1000.0
offset = int(offset)
timezone = datetime.timezone(datetime.timedelta(milliseconds=offset))
ts = datetime.datetime.fromtimestamp(ts, timezone)
return DataPoint(start_time=ts, sample=values)
def get_gzip_file_contents(self, filepath: str) -> str:
"""
Read and return gzip compressed file contents
:param filepath:
:return: gzip_file_content
:rtype: str
"""
fp = gzip.open(filepath)
gzip_file_content = fp.read()
fp.close()
gzip_file_content = gzip_file_content.decode('utf-8')
return gzip_file_content
def get_chunk_size(self, data: List[DataPoint]) -> int:
"""
get chunk size of DataPoint objects in 0.75 MB blocks. This method is computationally heavy and not scalable.
:param data:
:return: size of a list
:rtype: int
"""
if len(data) > 0:
chunk_size = 750000 / (asizeof.asizeof(data) / len(data)) # 0.75MB chunk size without metadata
return round(chunk_size)
else:
return 0
def chunks(data: str, max_len: int) -> str:
"""
Yields max_len sized chunks with the remainder in the last
:param data:
:param max_len:
"""
# TODO: default yield value needs to be set
for i in range(0, len(data), max_len):
yield data[i:i + max_len]
| [
"[email protected]"
] | |
996925f5530dd5af83fba2da9dd72a0012fcef11 | a0f27e45f598a5c4145efa44ae05edf431b7e06f | /seqmod/modules/ff.py | 4d6a8e6043e089cd4a0ed05da4145de8fd6bf3c5 | [] | no_license | cmry/seqmod | af4d2e6227247f5d3630a53818328cea493672f4 | ddc57cd36c6b6204263db770f4c98923ffb4ba0b | refs/heads/master | 2021-09-11T23:50:01.261133 | 2018-01-09T15:51:23 | 2018-01-09T15:51:23 | 113,448,571 | 0 | 0 | null | 2017-12-07T12:31:43 | 2017-12-07T12:31:43 | null | UTF-8 | Python | false | false | 5,677 | py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
class MLP(nn.Module):
"""
Standard MLP
"""
def __init__(self, inp_size, hid_size, nb_classes,
nb_layers=1, dropout=0.0, act='relu'):
self.inp_size, self.hid_size = inp_size, hid_size
self.nb_layers, self.nb_classes = nb_layers, nb_classes
self.dropout, self.act = dropout, act
super(MLP, self).__init__()
layers = []
for i in range(nb_layers):
layers.append(nn.Linear(inp_size, hid_size))
inp_size = hid_size
self.layers = nn.ModuleList(layers)
self.output = nn.Linear(hid_size, nb_classes)
def forward(self, inp):
"""
:param inp: torch.FloatTensor (batch_size x inp_size)
:return: torch.FloatTensor (batch_size x nb_classes)
"""
# hidden layers
for layer in self.layers:
out = layer(inp)
if self.act is not None:
out = getattr(F, self.act)(out)
if self.dropout > 0:
out = F.dropout(out, p=self.dropout, training=self.training)
inp = out
# output projection
out = self.output(out)
return out
class MaxOut(nn.Module):
def __init__(self, in_dim, out_dim, k):
"""
Implementation of MaxOut:
h_i^{maxout} = max_{j \in [1, ..., k]} x^T W_{..., i, j} + b_{i, j}
where W is in R^{D x M x K}, D is the input size, M is the output size
and K is the number of pieces to max-pool from. (i.e. i ranges over M,
j ranges over K and ... corresponds to the input dimension)
Parameters:
-----------
in_dim: int, Input dimension
out_dim: int, Output dimension
k: int, number of "pools" to max over
Returns:
--------
out: torch.Tensor (batch x k)
"""
self.in_dim, self.out_dim, self.k = in_dim, out_dim, k
super(MaxOut, self).__init__()
self.projection = nn.Linear(in_dim, k * out_dim)
def forward(self, inp):
"""
Because of the linear projection we are bound to 1-d input
(excluding batch-dim), therefore there is no need to generalize
the implementation to n-dimensional input.
"""
batch, in_dim = inp.size()
# (batch x self.k * self.out_dim) -> (batch x self.out_dim x self.k)
out = self.projection(inp).view(batch, self.out_dim, self.k)
out, _ = out.max(2)
return out
class Highway(torch.nn.Module):
"""
Reference:
https://github.com/allenai/allennlp/blob/master/allennlp/modules/highway.py
A `Highway layer <https://arxiv.org/abs/1505.00387>`_ does a gated
combination of a linear transformation and a non-linear transformation
of its input. y = g * x + (1 - g) * f(A(x)), where A
is a linear transformation, `f` is an element-wise non-linearity,
and `g` is an element-wise gate, computed as sigmoid(B(x)).
Parameters
----------
input_dim: int, The dimensionality of `x`.
num_layers: int, optional, The number of highway layers.
activation: str or class, if string it should be an activation function
from torch.nn, otherwise it should be a class that will be instantiated
with kwargs for each layer.
dropout: float, dropout rate before the nonlinearity
"""
def __init__(self, input_dim, num_layers=1, activation='ReLU', dropout=0.0,
**kwargs):
self.input_dim = input_dim
self.dropout = dropout
super(Highway, self).__init__()
layers = []
for layer in range(num_layers):
if isinstance(activation, type): # custom activation class
nonlinear = activation(**kwargs)
else: # assume string
nonlinear = getattr(nn, activation)()
linear = nn.Linear(input_dim, input_dim * 2)
# We should bias the highway layer to just carry its input forward.
# We do that by setting the bias on B(x) to be positive, because
# that means `g` will be biased to be high, to we will carry the
# input forward. The bias on `B(x)` is the second half of the bias
# vector in each Linear layer.
linear.bias[input_dim:].data.fill_(1)
linear.bias.custom = True
layers.append(linear)
layers.append(nonlinear)
self.layers = torch.nn.ModuleList(layers)
def forward(self, inputs):
current_input = inputs
for i in range(0, len(self.layers), 2):
layer, activation = self.layers[i], self.layers[i+1]
proj, linear = layer(current_input), current_input
proj = F.dropout(proj, p=self.dropout, training=self.training)
nonlinear = activation(proj[:, 0:self.input_dim])
gate = F.sigmoid(proj[:, self.input_dim:(2 * self.input_dim)])
# apply gate
current_input = gate * linear + (1 - gate) * nonlinear
return current_input
# gracefully taken from:
# https://discuss.pytorch.org/t/solved-reverse-gradients-in-backward-pass/3589/4
class GradReverse(Function):
"Implementation of GRL from DANN (Domain Adaptation Neural Network) paper"
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.neg()
def grad_reverse(x):
"""
GRL must be placed between the feature extractor and the domain classifier
"""
return GradReverse.apply(x)
| [
"[email protected]"
] | |
fd75ee444727d1fd69c72d0457b9ea145dcba2b1 | 353def93fa77384ee3a5e3de98cfed318c480634 | /.history/week01/homework02/maoyanspiders/maoyanspiders/pipelines_20200627225401.py | 2181904c869c47b47b88084f2f73feb9b48ff6f0 | [] | no_license | ydbB/Python001-class01 | d680abc3ea1ccaeb610751e3488421417d381156 | ad80037ccfc68d39125fa94d2747ab7394ac1be8 | refs/heads/master | 2022-11-25T11:27:45.077139 | 2020-07-19T12:35:12 | 2020-07-19T12:35:12 | 272,783,233 | 0 | 0 | null | 2020-06-16T18:28:15 | 2020-06-16T18:28:15 | null | UTF-8 | Python | false | false | 523 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
class MaoyanspidersPipeline(object):
def process_item(self, item, spider):
films_name = item['films_name']
films_type = item['films_type']
release_time = item['release_time']
output = f'|{films_name}|\t|{films_type}|\t|{release_time}|\n\n'
with open('./week01/homework02/top10.scv')
| [
"[email protected]"
] | |
8536e0cce05e6fee39144b2b1c6e1b5c482b510f | 8064bbf3dadc70c3aceeecd885bc69cfddf06549 | /ZeeAnalyzer/test/runElectronPlots_Skim_v1.py | 9dc1a84ee437d62c8d259a0380e2392dbbaec102 | [] | no_license | taroni/ZeeAnalyzer | 6faf7e4d9785ab9b15559d096a2b98d5e7483be7 | 44046f7095a22a9b5486a5ab0aee2dee52b430ae | refs/heads/master | 2022-01-17T20:12:06.695267 | 2017-11-24T13:51:25 | 2017-11-24T13:51:25 | 110,087,363 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,560 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("TestElectrons")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff")
process.GlobalTag = GlobalTag(process.GlobalTag, '92X_upgrade2017_realistic_Candidate_forECALStudies', '')
# input
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10) )
inputFilesData = cms.untracked.vstring(
'/store/data/Run2017D/DoubleEG/MINIAOD/PromptReco-v1/000/302/030/00000/E69F63AA-EE8E-E711-8121-02163E019BAF.root',
'/store/data/Run2017D/DoubleEG/MINIAOD/PromptReco-v1/000/302/031/00000/008329E5-368F-E711-A1CD-02163E01A21D.root',
'/store/data/Run2017D/DoubleEG/MINIAOD/PromptReco-v1/000/302/031/00000/04BB9D82-398F-E711-B74B-02163E019BDF.root',
'/store/data/Run2017D/DoubleEG/MINIAOD/PromptReco-v1/000/302/031/00000/407638D4-4B8F-E711-AC24-02163E01437E.root',
'/store/data/Run2017D/DoubleEG/MINIAOD/PromptReco-v1/000/302/031/00000/44B91A0E-488F-E711-A372-02163E019CA5.root',
'/store/data/Run2017D/DoubleEG/MINIAOD/PromptReco-v1/000/302/031/00000/5479D9DF-3C8F-E711-BCF4-02163E01A5EB.root',
'/store/data/Run2017D/DoubleEG/MINIAOD/PromptReco-v1/000/302/031/00000/6496C386-518F-E711-B09E-02163E01341D.root'
)
inputFilesMC = cms.untracked.vstring(
'/store/mc/RunIISummer17DRPremix/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/AODSIM/92X_upgrade2017_realistic_v10_ext1-v2/10000/00CDB4C7-5C93-E711-AF33-02163E0142CA.root',
'/store/mc/RunIISummer17DRPremix/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/AODSIM/92X_upgrade2017_realistic_v10_ext1-v2/10000/027E1441-3994-E711-BFBD-02163E01A6D8.root',
'/store/mc/RunIISummer17DRPremix/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/AODSIM/92X_upgrade2017_realistic_v10_ext1-v2/10000/02FD6F07-5D93-E711-85AC-02163E01A334.root',
'/store/mc/RunIISummer17DRPremix/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/AODSIM/92X_upgrade2017_realistic_v10_ext1-v2/10000/061B6C49-5793-E711-AF23-02163E011B7C.root',
'/store/mc/RunIISummer17DRPremix/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/AODSIM/92X_upgrade2017_realistic_v10_ext1-v2/10000/0A66322F-5793-E711-9184-02163E01A2BD.root',
'/store/mc/RunIISummer17DRPremix/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/AODSIM/92X_upgrade2017_realistic_v10_ext1-v2/10000/0EFBF8C4-5C93-E711-94C9-02163E012207.root',
'/store/mc/RunIISummer17DRPremix/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/AODSIM/92X_upgrade2017_realistic_v10_ext1-v2/10000/14FDD26B-7493-E711-8B21-001E67792532.root'
)
inputFiles = inputFilesMC
outputFile = "electron_ntuple.root"
process.source = cms.Source ("PoolSource", fileNames = inputFiles )
process.ntupler = cms.EDAnalyzer(
'ElectronPlots',
beamSpot = cms.InputTag('offlineBeamSpot'),
genEventInfoProduct = cms.InputTag('generator'),
electrons = cms.InputTag("gedGsfElectrons"),
genParticles = cms.InputTag("genParticles"),
vertices = cms.InputTag("offlinePrimaryVertices"),
conversions = cms.InputTag('allConversions'),
isMC = cms.bool(True)
)
process.TFileService = cms.Service("TFileService",
fileName = cms.string( outputFile )
)
process.load("DPGAnalysis/Skims/ZElectronSkim_cff")
process.p = cms.Path(process.zdiElectronSequence*process.ntupler)
| [
"[email protected]"
] | |
e4ede140050fb8c241173693253719a2d0235799 | 799c9d7e1436232a02b213178ed0bda9d5c673e8 | /Chapter15/example2.py | b3c8ae550f592b84515a5257e78fd403bf0171f4 | [
"MIT"
] | permissive | KrisNguyen135/Advanced-Python-Programming-Second-Edition | a32578116805285983df8eac2dba584e0e77ea0d | e5d473e3efc5f6590028cb3f318e1f4aeb0aadd1 | refs/heads/main | 2023-08-14T18:14:09.087485 | 2021-09-19T17:57:03 | 2021-09-19T17:57:03 | 373,899,665 | 0 | 0 | MIT | 2021-06-04T16:23:55 | 2021-06-04T16:23:55 | null | UTF-8 | Python | false | false | 686 | py | import time
import threading
COUNT = 50000000
def countdown(n):
while n > 0:
n -= 1
###########################################################################
start = time.time()
countdown(COUNT)
print('Sequential program finished.')
print(f'Took {time.time() - start : .2f} seconds.')
###########################################################################
thread1 = threading.Thread(target=countdown, args=(COUNT // 2,))
thread2 = threading.Thread(target=countdown, args=(COUNT // 2,))
start = time.time()
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print('Concurrent program finished.')
print(f'Took {time.time() - start : .2f} seconds.')
| [
"[email protected]"
] | |
a38e00bd15b7f69cd0501f9e2a9343c1615f935c | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/83f8ec00a3cf40a78f2fd2fa2dedcd3a.py | 76cb33cce300805428c65c319f8169dd9e0ef049 | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 270 | py | import re
def hey(input):
clean_input = input.strip()
if clean_input == '':
return 'Fine. Be that way!'
if clean_input.isupper():
return 'Whoa, chill out!'
if clean_input.endswith('?'):
return 'Sure.'
return 'Whatever.'
| [
"[email protected]"
] | |
bcee99a9a701fa5486e9c1baba62c7e8182cc60d | 45de7d905486934629730945619f49281ad19359 | /xlsxwriter/test/worksheet/test_date_time_01.py | 50180b311c94a156c9af3597a1e11e5fb953c101 | [
"BSD-2-Clause"
] | permissive | jmcnamara/XlsxWriter | 599e1d225d698120ef931a776a9d93a6f60186ed | ab13807a1be68652ffc512ae6f5791d113b94ee1 | refs/heads/main | 2023-09-04T04:21:04.559742 | 2023-08-31T19:30:52 | 2023-08-31T19:30:52 | 7,433,211 | 3,251 | 712 | BSD-2-Clause | 2023-08-28T18:52:14 | 2013-01-04T01:07:06 | Python | UTF-8 | Python | false | false | 6,888 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2023, John McNamara, [email protected]
#
import unittest
from datetime import datetime
from ...worksheet import Worksheet
class TestConvertDateTime(unittest.TestCase):
"""
Test the Worksheet _convert_date_time() method against dates extracted
from Excel.
"""
def setUp(self):
self.worksheet = Worksheet()
def test_convert_date_time(self):
"""Test the _convert_date_time() method."""
# Dates and corresponding numbers from an Excel file.
excel_dates = [
("1899-12-31T00:00:00.000", 0),
("1982-08-25T00:15:20.213", 30188.010650613425),
("2065-04-19T00:16:48.290", 60376.011670023145),
("2147-12-15T00:55:25.446", 90565.038488958337),
("2230-08-10T01:02:46.891", 120753.04359827546),
("2313-04-06T01:04:15.597", 150942.04462496529),
("2395-11-30T01:09:40.889", 181130.04838991899),
("2478-07-25T01:11:32.560", 211318.04968240741),
("2561-03-21T01:30:19.169", 241507.06272186342),
("2643-11-15T01:48:25.580", 271695.07529606484),
("2726-07-12T02:03:31.919", 301884.08578609955),
("2809-03-06T02:11:11.986", 332072.09111094906),
("2891-10-31T02:24:37.095", 362261.10042934027),
("2974-06-26T02:35:07.220", 392449.10772245371),
("3057-02-19T02:45:12.109", 422637.1147234838),
("3139-10-17T03:06:39.990", 452826.12962951389),
("3222-06-11T03:08:08.251", 483014.13065105322),
("3305-02-05T03:19:12.576", 513203.13834),
("3387-10-01T03:29:42.574", 543391.14563164348),
("3470-05-27T03:37:30.813", 573579.15105107636),
("3553-01-21T04:14:38.231", 603768.17683137732),
("3635-09-16T04:16:28.559", 633956.17810832174),
("3718-05-13T04:17:58.222", 664145.17914608796),
("3801-01-06T04:21:41.794", 694333.18173372687),
("3883-09-02T04:56:35.792", 724522.20596981479),
("3966-04-28T05:25:14.885", 754710.2258667245),
("4048-12-21T05:26:05.724", 784898.22645513888),
("4131-08-18T05:46:44.068", 815087.24078782403),
("4214-04-13T05:48:01.141", 845275.24167987274),
("4296-12-07T05:53:52.315", 875464.24574438657),
("4379-08-03T06:14:48.580", 905652.26028449077),
("4462-03-28T06:46:15.738", 935840.28212659725),
("4544-11-22T07:31:20.407", 966029.31343063654),
("4627-07-19T07:58:33.754", 996217.33233511576),
("4710-03-15T08:07:43.130", 1026406.3386936343),
("4792-11-07T08:29:11.091", 1056594.3536005903),
("4875-07-04T09:08:15.328", 1086783.3807329629),
("4958-02-27T09:30:41.781", 1116971.3963169097),
("5040-10-23T09:34:04.462", 1147159.3986627546),
("5123-06-20T09:37:23.945", 1177348.4009715857),
("5206-02-12T09:37:56.655", 1207536.4013501736),
("5288-10-08T09:45:12.230", 1237725.406391551),
("5371-06-04T09:54:14.782", 1267913.412671088),
("5454-01-28T09:54:22.108", 1298101.4127558796),
("5536-09-24T10:01:36.151", 1328290.4177795255),
("5619-05-20T12:09:48.602", 1358478.5068125231),
("5702-01-14T12:34:08.549", 1388667.5237100578),
("5784-09-08T12:56:06.495", 1418855.5389640625),
("5867-05-06T12:58:58.217", 1449044.5409515856),
("5949-12-30T12:59:54.263", 1479232.5416002662),
("6032-08-24T13:34:41.331", 1509420.5657561459),
("6115-04-21T13:58:28.601", 1539609.5822754744),
("6197-12-14T14:02:16.899", 1569797.5849178126),
("6280-08-10T14:36:17.444", 1599986.6085352316),
("6363-04-06T14:37:57.451", 1630174.60969272),
("6445-11-30T14:57:42.757", 1660363.6234115392),
("6528-07-26T15:10:48.307", 1690551.6325035533),
("6611-03-22T15:14:39.890", 1720739.635183912),
("6693-11-15T15:19:47.988", 1750928.6387498612),
("6776-07-11T16:04:24.344", 1781116.6697262037),
("6859-03-07T16:22:23.952", 1811305.6822216667),
("6941-10-31T16:29:55.999", 1841493.6874536921),
("7024-06-26T16:58:20.259", 1871681.7071789235),
("7107-02-21T17:04:02.415", 1901870.7111390624),
("7189-10-16T17:18:29.630", 1932058.7211762732),
("7272-06-11T17:47:21.323", 1962247.7412190163),
("7355-02-05T17:53:29.866", 1992435.7454845603),
("7437-10-02T17:53:41.076", 2022624.7456143056),
("7520-05-28T17:55:06.044", 2052812.7465977315),
("7603-01-21T18:14:49.151", 2083000.7602910995),
("7685-09-16T18:17:45.738", 2113189.7623349307),
("7768-05-12T18:29:59.700", 2143377.7708298611),
("7851-01-07T18:33:21.233", 2173566.773162419),
("7933-09-02T19:14:24.673", 2203754.8016744559),
("8016-04-27T19:17:12.816", 2233942.8036205554),
("8098-12-22T19:23:36.418", 2264131.8080603937),
("8181-08-17T19:46:25.908", 2294319.8239109721),
("8264-04-13T20:07:47.314", 2324508.8387420601),
("8346-12-08T20:31:37.603", 2354696.855296331),
("8429-08-03T20:39:57.770", 2384885.8610853008),
("8512-03-29T20:50:17.067", 2415073.8682530904),
("8594-11-22T21:02:57.827", 2445261.8770581828),
("8677-07-19T21:23:05.519", 2475450.8910360998),
("8760-03-14T21:34:49.572", 2505638.8991848612),
("8842-11-08T21:39:05.944", 2535827.9021521294),
("8925-07-04T21:39:18.426", 2566015.9022965971),
("9008-02-28T21:46:07.769", 2596203.9070343636),
("9090-10-24T21:57:55.662", 2626392.9152275696),
("9173-06-19T22:19:11.732", 2656580.9299968979),
("9256-02-13T22:23:51.376", 2686769.9332335186),
("9338-10-09T22:27:58.771", 2716957.9360968866),
("9421-06-05T22:43:30.392", 2747146.9468795368),
("9504-01-30T22:48:25.834", 2777334.9502990046),
("9586-09-24T22:53:51.727", 2807522.9540709145),
("9669-05-20T23:12:56.536", 2837711.9673210187),
("9752-01-14T23:15:54.109", 2867899.9693762613),
("9834-09-10T23:17:12.632", 2898088.9702850925),
("9999-12-31T23:59:59.000", 2958465.999988426),
]
for excel_date in excel_dates:
date = datetime.strptime(excel_date[0], "%Y-%m-%dT%H:%M:%S.%f")
got = self.worksheet._convert_date_time(date)
exp = excel_date[1]
self.assertEqual(got, exp)
| [
"[email protected]"
] | |
e445e69c3f4201e6a82f43d62ce2eed19fa2b935 | bb47a173f6e17e5b20f4d19830466767a47b5988 | /models/system_jobs.py | 76f165893f3bd50394463b7ab4e35ae9ae355a3b | [] | no_license | gmancoder/MailChimp-Management-Console | c0ad1d7d6995d573b962eaeb2e2194b1b2ff01d7 | 93a7fc61348c57c1c8d45f60a3614171f6307c95 | refs/heads/master | 2020-03-07T06:46:09.251318 | 2018-03-29T20:52:50 | 2018-03-29T20:52:50 | 127,331,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,980 | py | #!/usr/bin/env python
from models.shared import db
from models.imports import *
from models.exports import *
from models.tracking import *
import datetime
import os
import csv
class SystemJob(db.Model):
__tablename__ = "system_job"
id = db.Column(db.Integer, primary_key=True)
brand_id = db.Column(db.Integer, db.ForeignKey('brand.id'))
activity_type = db.Column(db.String(100))
activity_id = db.Column(db.Integer)
overall_status = db.Column(db.Integer, default=0)
status_message = db.Column(db.String(2000), nullable=True)
start_date = db.Column(db.TIMESTAMP, nullable=True)
end_date = db.Column(db.TIMESTAMP, nullable=True)
created = db.Column(db.DateTime)
created_by = db.Column(db.String(10))
updated = db.Column(db.DateTime)
updated_by = db.Column(db.String(10))
def __init__(self):
self.created = datetime.datetime.now()
self.updated = datetime.datetime.now()
def run(self):
import functions.activities as activities
self.start_date = datetime.datetime.now()
self.overall_status = 1
db.session.commit()
if self.activity_type == 'tracking_exports':
import functions.tracking as trk
activity = TrackingExportActivity.query.get(self.activity_id)
if not activities.init_activity(self, activity):
return False
export_def = TrackingExportDefinition.query.get(activity.tracking_export_definition_id)
if not activities.check_object(self, activity, export_def):
return False
if export_def.target_activity == '':
activities.process_job_error(self, activity, export_def, 'Target Activity not populated');
file_path = '%s%s' % (activities.get_ftp_path(self.brand_id, "exports"), export_def.file_path)
try:
ofh = open(file_path, 'w')
writer = csv.writer(ofh, delimiter=str(export_def.file_delimiter), quoting=csv.QUOTE_ALL)
efh = open('%s.log' % file_path, 'w')
log_writer = csv.writer(efh, delimiter=str(export_def.file_delimiter), quoting=csv.QUOTE_ALL)
log_writer.writerow(["Row", "RowData", "Message"])
except Exception as e:
activities.process_job_error(self, activity, export_def, str(e))
return False
status, res = trk.export_tracking_detail(export_def, writer, log_writer)
if not status:
activities.process_job_error(self, activity, export_def, res)
efh.close()
ofh.close()
return False
#Export Successful
activity.status = 2
self.overall_status = 2
self.status_message = "Export Completed"
activity.total_rows = res['total']
activity.errors = res['errors']
activity.end_date = datetime.datetime.now()
self.end_date = datetime.datetime.now()
db.session.commit()
efh.close()
ofh.close()
if(export_def.notify_addresses != None and len(export_def.notify_addresses) > 0):
status,res = activities.send_notification(export_def, self.activity_type, res)
print res
return True
if self.activity_type == "exports":
activity = ExportActivity.query.get(self.activity_id)
if not activities.init_activity(self, activity):
return False
export_def = ExportDefinition.query.get(activity.export_definition_id)
if not activities.check_object(self, activity, export_def):
return False
if export_def.fields.count() == 0:
msg = "No Fields Passed to Export"
activities.process_job_error(self, activity, export_def, msg)
return False
if export_def.target_objects.count() == 0:
msg = "No Objects to Export"
activities.process_job_error(self, activity, export_def, msg)
return False
file_path = '%s%s' % (activities.get_ftp_path(self.brand_id, "exports"), export_def.file_path)
try:
ofh = open(file_path, 'w')
writer = csv.writer(ofh, delimiter=str(export_def.file_delimiter), quoting=csv.QUOTE_ALL)
efh = open('%s.log' % file_path, 'w')
log_writer = csv.writer(efh, delimiter=str(export_def.file_delimiter), quoting=csv.QUOTE_ALL)
log_writer.writerow(["Row", "RowData", "Message"])
except Exception as e:
activities.process_job_error(self, activity, export_def, str(e))
return False
if export_def.target_type == "lists":
import functions.lists as lists
status, res = lists.export_lists(export_def, writer, log_writer)
elif export_def.target_type == "subscribers":
import functions.lists as lists
status, res = lists.export_subscribers(export_def, writer, log_writer)
elif export_def.target_type == "template_categories":
import functions.templates as tmpl
status, res = tmpl.export_categories(export_def, writer, log_writer)
elif export_def.target_type == "segment_subscribers":
import functions.segments as seg
status, res = seg.export_subscribers(export_def, writer, log_writer)
elif export_def.target_type == "campaign_tracking":
import functions.tracking as trk
status, res = trk.export_tracking_summary(export_def, writer, log_writer)
else:
msg = "Export target_type of '%s' not defined" % activity.target_type
activities.process_job_error(self, activity, export_def, msg)
efh.close()
ofh.close()
return False
if not status:
activities.process_job_error(self, activity, export_def, msg)
efh.close()
ofh.close()
return False
#Export Successful
activity.status = 2
self.overall_status = 2
self.status_message = "Export Completed"
activity.total_rows = res['total']
activity.errors = res['errors']
activity.end_date = datetime.datetime.now()
self.end_date = datetime.datetime.now()
db.session.commit()
efh.close()
ofh.close()
if(export_def.notify_addresses != None and len(export_def.notify_addresses) > 0):
status,res = activities.send_notification(export_def, self.activity_type, res)
print res
return True
elif self.activity_type == "imports":
activity = ImportActivity.query.get(self.activity_id)
if not activities.init_activity(self, activity):
return False
import_def = ImportDefinition.query.get(activity.import_definition_id)
if not activities.check_object(self, activity, import_def):
return False
file_path = '%s%s' % (activities.get_ftp_path(self.brand_id, "imports"), import_def.file_path)
if not os.path.exists(file_path):
msg = "File '%s' Not Found" % file_path
activities.process_job_error(self, activity, import_def, msg)
return False
try:
fh = open(file_path, 'r')
reader = csv.reader(fh, delimiter=str(import_def.file_delimiter), quoting=csv.QUOTE_ALL)
ofh = open('%s.log' % file_path, 'w')
writer = csv.writer(ofh, delimiter=",", quoting=csv.QUOTE_ALL)
writer.writerow(["Row", "RowData", "Message"])
except Exception as e:
msg = str(e)
activities.process_job_error(self, activity, import_def, msg)
#fh.close()
#ofh.close()
return False
if import_def.mappings.count() == 0:
msg = "Import contains no mappings"
writer.writerow(["0", "", msg])
activities.process_job_error(self, activity, import_def, msg)
fh.close()
ofh.close()
return False
if import_def.target_type == "lists":
import functions.lists as lists
status, res = lists.import_lists(import_def, reader, writer)
elif import_def.target_type == "subscribers":
import functions.lists as lists
status, res = lists.import_subscribers(import_def, reader, writer)
elif import_def.target_type == "template_categories":
import functions.templates as tmpl
status, res = tmpl.import_categories(import_def, reader, writer)
else:
msg = "Import target_type of '%s' not defined" % activity.target_type
writer.writerow(["0", "", msg])
activities.process_job_error(self, activity, import_def, msg)
fh.close()
ofh.close()
return False
if not status:
writer.writerow(["0", "", res])
activities.process_job_error(self, activity, import_def, msg)
fh.close()
ofh.close()
return False
#Import Successful
activity.status = 2
self.overall_status = 2
self.status_message = "Import Completed"
activity.total_rows = res['total']
activity.inserts = res['inserted']
activity.updates = res['updated']
activity.ignored = res['ignored']
activity.errors = res['errors']
activity.end_date = datetime.datetime.now()
self.end_date = datetime.datetime.now()
db.session.commit()
fh.close()
ofh.close()
if(import_def.notify_addresses != None and len(import_def.notify_addresses) > 0):
status,res = activities.send_notification(import_def, self.activity_type, res)
print res
return True
else:
self.overall_status = 3
self.status_message = "Activity type '%s' not defined" % self.activity_type
self.end_date = datetime.datetime.now()
db.session.commit()
return False
| [
"[email protected]"
] | |
d0bc437a44318504958582938e6622cdb01b23a9 | 33836016ea99776d31f7ad8f2140c39f7b43b5fe | /fip_collab/2016_01_20_gsh_database_red/integrate_parallel.py | 19794112e938772715aa9bad100ef9986045d7cc | [] | no_license | earthexploration/MKS-Experimentation | 92a2aea83e041bfe741048d662d28ff593077551 | 9b9ff3b468767b235e7c4884b0ed56c127328a5f | refs/heads/master | 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,702 | py | import numpy as np
# import itertools as it
import db_functions as fn
import gsh_hex_tri_L0_16 as gsh
import h5py
import time
import sys
tnum = np.int64(sys.argv[1])
filename = 'log_integrate_parallel_%s.txt' % str(tnum)
""" Load Y vec """
f = h5py.File('var_extract_total.hdf5', 'r')
var_set = f.get('var_set')
sinphi = np.sin(var_set[:, 2])
Y = var_set[:, 4]
f.close
""" Initialize important variables """
# these indices are defined for the sampled db inputs
inc = 6 # degree increment for angular variables
sub2rad = inc*np.pi/180.
n_th = 60/inc # number of theta samples for FZ
n_p1 = 360/inc # number of phi1 samples for FZ
n_P = 90/inc # number of Phi samples for FZ
n_p2 = 60/inc # number of phi2 samples for FZ
N_p = 215 # number of GSH bases to evaluate
N_q = 9 # number of cosine bases to evaluate
L_th = np.pi/3.
n_eul = n_p1*n_P*n_p2
n_jobs = 10. # number of jobs submitted to cluster
""" Calculate basis function indices """
cmax = N_p*N_q # total number of permutations of basis functions
fn.WP(str(cmax), filename)
# cmat is the matrix containing all permutations of basis function indices
cmat = np.unravel_index(np.arange(cmax), [N_p, N_q])
cmat = np.array(cmat).T
""" Deal with the parallelization of this operation. specifically pick range
of indxmat to calculate """
n_ii = np.int64(np.ceil(np.float(cmax)/n_jobs)) # number dot products per job
fn.WP(str(n_ii), filename)
ii_stt = tnum*n_ii # start index
if (tnum+1)*n_ii > cmax:
ii_end = cmax
else:
ii_end = (tnum+1)*n_ii # end index
msg = "ii_stt = %s" % ii_stt
fn.WP(msg, filename)
msg = "ii_end = %s" % ii_end
fn.WP(msg, filename)
""" perform the orthogonal regressions """
coeff_prt = np.zeros(ii_end-ii_stt, dtype='complex128')
f = h5py.File('X_parts.hdf5', 'r')
c = 0
indxvec = gsh.gsh_basis_info()
bsz_gsh = ((np.pi**3)/3)/n_eul
bsz_cos = L_th/n_th
for ii in xrange(ii_stt, ii_end):
msg = str(ii)
fn.WP(msg, filename)
st = time.time()
p, q = cmat[ii, :]
basis_p = f.get('p_%s' % p)[...]
basis_q = f.get('q_%s' % q)[...]
ep_set = np.squeeze(basis_p)*basis_q
msg = "load time: %ss" % np.round(time.time()-st, 3)
fn.WP(msg, filename)
st = time.time()
l = indxvec[p, 0]
c_gsh = (1./(2.*l+1.))*(3./(2.*np.pi**2))
if q == 0:
c_cos = 1./L_th
else:
c_cos = 2./L_th
c_tot = c_gsh*c_cos*bsz_gsh*bsz_cos
tmp = c_tot*np.sum(Y*ep_set.conj()*sinphi)
del ep_set
coeff_prt[c] = tmp
msg = "regression time: %ss" % np.round(time.time()-st, 3)
fn.WP(msg, filename)
c += 1
f.close()
f = h5py.File('coeff_prt_%s.hdf5' % tnum, 'w')
f.create_dataset('coeff_prt', data=coeff_prt)
f.close()
| [
"[email protected]"
] | |
e1f86e42b1651b24b49a852a30e9ba287c876154 | 36126f91a2d5903483b84ba2d8be77e160803058 | /tests/test_model.py | 2fcf991147d46894fa7d917d389309988844fd6e | [
"Apache-2.0"
] | permissive | open-risk/transitionMatrix | 9962bb2656eb637ba56afc3adecf42bbe68f9593 | d05e75cbc251f01842dd8c5ce225894b988f4d99 | refs/heads/master | 2023-03-05T08:01:20.816425 | 2023-02-22T20:46:38 | 2023-02-22T20:46:38 | 110,365,127 | 73 | 29 | Apache-2.0 | 2022-12-08T11:37:12 | 2017-11-11T17:25:08 | Python | UTF-8 | Python | false | false | 5,126 | py | # encoding: utf-8
# (c) 2017-2022 Open Risk, all rights reserved
#
# TransitionMatrix is licensed under the Apache 2.0 license a copy of which is included
# in the source distribution of TransitionMatrix. This is notwithstanding any licenses of
# third-party software included in this distribution. You may not use this file except in
# compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pandas as pd
from scipy.linalg import expm
import transitionMatrix as tm
from transitionMatrix import source_path
ACCURATE_DIGITS = 7
class TestTransitionMatrix(unittest.TestCase):
'''
Default instance (2x2 identity matrix)
'''
def test_instantiate_matrix(self):
a = tm.TransitionMatrix()
self.assertAlmostEqual(a[0, 0], 1.0, places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[0, 1], 0.0, places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 0], 0.0, places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 1], 1.0, places=ACCURATE_DIGITS, msg=None, delta=None)
b = tm.TransitionMatrix([[1.0, 3.0], [1.0, 4.0]])
self.assertAlmostEqual(b[0, 0], 1.0, places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(b[0, 1], 3.0, places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(b[1, 0], 1.0, places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(b[1, 1], 4.0, places=ACCURATE_DIGITS, msg=None, delta=None)
def test_csv_io(self):
a = tm.TransitionMatrix()
a.to_csv("test.csv")
b = tm.TransitionMatrix(csv_file="test.csv")
self.assertAlmostEqual(a[0, 0], b[0, 0], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[0, 1], b[0, 1], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 0], b[1, 0], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 1], b[1, 1], places=ACCURATE_DIGITS, msg=None, delta=None)
def test_json_io(self):
a = tm.TransitionMatrix()
a.to_json("test.json")
b = tm.TransitionMatrix(json_file="test.json")
self.assertAlmostEqual(a[0, 0], b[0, 0], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[0, 1], b[0, 1], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 0], b[1, 0], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 1], b[1, 1], places=ACCURATE_DIGITS, msg=None, delta=None)
def test_validation(self):
a = tm.TransitionMatrix()
self.assertEqual(a.validate(), True)
b = tm.TransitionMatrix(values=[1.0, 3.0])
self.assertEqual(b.validate()[0][0], 'Matrix Dimensions Differ: ')
c = tm.TransitionMatrix(values=[[0.75, 0.25], [0.0, 0.9]])
self.assertEqual(c.validate()[0][0], 'Rowsum not equal to one: ')
d = tm.TransitionMatrix(values=[[0.75, 0.25], [-0.1, 1.1]])
self.assertEqual(d.validate()[0][0], 'Negative Probabilities: ')
def test_generator(self):
a = tm.TransitionMatrix([[1.0, 3.0], [1.0, 4.0]])
self.assertAlmostEqual(a[0, 0], expm(a.generator())[0, 0], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[0, 1], expm(a.generator())[0, 1], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 0], expm(a.generator())[1, 0], places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a[1, 1], expm(a.generator())[1, 1], places=ACCURATE_DIGITS, msg=None, delta=None)
class TestTransitionMatrixSet(unittest.TestCase):
def test_instantiate_matrix_set(self):
periods = 5
a = tm.TransitionMatrixSet(dimension=2, periods=periods)
self.assertEqual(a.temporal_type, 'Incremental')
self.assertAlmostEqual(a.entries[0][0, 0], 1.0, places=ACCURATE_DIGITS, msg=None, delta=None)
self.assertAlmostEqual(a.entries[periods-1][0, 0], 1.0, places=ACCURATE_DIGITS, msg=None, delta=None)
pass
def test_set_validation(self):
a = tm.TransitionMatrixSet(dimension=2, periods=5)
self.assertEqual(a.validate(), True)
def test_set_cumulate_incremental(self):
a = tm.TransitionMatrix(values=[[0.6, 0.2, 0.2], [0.2, 0.6, 0.2], [0.2, 0.2, 0.6]])
a_set = tm.TransitionMatrixSet(values=a, periods=3, method='Copy', temporal_type='Incremental')
b_set = a_set
b_set.cumulate()
b_set.incremental()
self.assertAlmostEqual(a_set.entries[2][0, 0], b_set.entries[2][0, 0], places=ACCURATE_DIGITS, msg=None, delta=None)
pass
def test_set_csv_io(self):
pass
def test_set_json_io(self):
pass
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
d1df29cfcfd4dace82fa7e4e728abf9975d61641 | 94615230d5733282fb69ae5d35411c04a337d353 | /sublime-text-3/Packages/HTML-CSS-JS Prettify/src/py/utils/constants.py | 2d082675e21dc88f0f92e4c331ef81174d4f9007 | [
"Unlicense"
] | permissive | EnTeQuAk/dotfiles | fcef6a885891c3c132da3ea970dd21aee16b72c1 | b00890fa64a01b3a0e4eaaada13e90c1ef36b9e0 | refs/heads/master | 2023-01-04T21:09:37.330838 | 2019-09-16T14:49:45 | 2019-09-16T14:49:45 | 1,558,950 | 1 | 0 | Unlicense | 2023-01-04T05:01:57 | 2011-04-02T08:31:38 | Vim script | UTF-8 | Python | false | false | 623 | py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Various constants used by this plugin"""
from sublime import platform, version
PLATFORM = platform()
SUBLIME_VERSION = int(version())
DIAGNOSTICS_MARKER_BEGIN = b"### HTMLPrettify diagnostics begin ###"
DIAGNOSTICS_MARKER_END = b"### HTMLPrettify diagnostics end ###"
PRETTIFIED_CODE_MARKER_BEGIN = b"### HTMLPrettify prettified code begin ###"
PRETTIFIED_CODE_MARKER_END = b"### HTMLPrettify prettified code end ###"
| [
"[email protected]"
] | |
1b02cbd81e6d0d70e6c61416944602b6e863075c | e15b2ebbb9bf30a50d1e720624e9853aa269fc05 | /CoverSlasher/items.py | 563945642bcb4b389d94c4ea6bdfbf3d8b5cf0e0 | [] | no_license | Hodo7amShichiYA/WnacgCoverSlasher | e42ce1ec438558c2890d1bf34f9a192eb1ab4f81 | 5734d58caedb3defff622bb45de6cd073f8b656d | refs/heads/master | 2020-04-27T16:03:02.225473 | 2019-03-08T09:05:06 | 2019-03-08T09:05:06 | 174,470,262 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | # -*- coding: utf-8 -*-
import scrapy
class CoverslasherItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
image_urls = scrapy.Field() #保存图片地址
images = scrapy.Field() #保存图片的信息
image_names = scrapy.Field() #保存图片的信息
| [
"[email protected]"
] | |
91f92c326775e661700467fed42718d9b09d1adb | e53d8488ffea72db3f3618f5639f2ddfa929f11b | /perpustakaan/migrations/0002_delete_buku.py | 63677b59c0a8886eaae0991e502bc4c513f0449e | [] | no_license | writerlab/perpus | 60473fa2b51d67617525bfc25b75656141e9529b | 9cf7676a543c3414ac2e7fca88c3d26ac403be3b | refs/heads/master | 2023-07-15T05:27:23.859264 | 2021-08-29T02:02:34 | 2021-08-29T02:02:34 | 262,490,216 | 5 | 10 | null | null | null | null | UTF-8 | Python | false | false | 288 | py | # Generated by Django 2.2.12 on 2020-05-11 07:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('perpustakaan', '0001_initial'),
]
operations = [
migrations.DeleteModel(
name='Buku',
),
]
| [
"[email protected]"
] | |
2a930f8fc17f6a4af9fdfaeb6ff31fb3020b1552 | 6be845bf70a8efaf390da28c811c52b35bf9e475 | /windows/Resources/Python/Core/Lib/lib2to3/main.py | 21120209a72ee4a781dc2b7ce9223426acd4d8bd | [] | no_license | kyeremalprime/ms | 228194910bf2ed314d0492bc423cc687144bb459 | 47eea098ec735b2173ff0d4e5c493cb8f04e705d | refs/heads/master | 2020-12-30T15:54:17.843982 | 2017-05-14T07:32:01 | 2017-05-14T07:32:01 | 91,180,709 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,385 | py | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: main.py
"""
Main program for 2to3.
"""
from __future__ import with_statement
import sys
import os
import difflib
import logging
import shutil
import optparse
from . import refactor
def diff_texts(a, b, filename):
"""Return a unified diff of two strings."""
a = a.splitlines()
b = b.splitlines()
return difflib.unified_diff(a, b, filename, filename, '(original)', '(refactored)', lineterm='')
class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool):
"""
Prints output to stdout.
"""
def __init__(self, fixers, options, explicit, nobackups, show_diffs):
self.nobackups = nobackups
self.show_diffs = show_diffs
super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
def log_error(self, msg, *args, **kwargs):
self.errors.append((msg, args, kwargs))
self.logger.error(msg, *args, **kwargs)
def write_file(self, new_text, filename, old_text, encoding):
if not self.nobackups:
backup = filename + '.bak'
if os.path.lexists(backup):
try:
os.remove(backup)
except os.error as err:
self.log_message("Can't remove backup %s", backup)
try:
os.rename(filename, backup)
except os.error as err:
self.log_message("Can't rename %s to %s", filename, backup)
write = super(StdoutRefactoringTool, self).write_file
write(new_text, filename, old_text, encoding)
if not self.nobackups:
shutil.copymode(backup, filename)
def print_output(self, old, new, filename, equal):
if equal:
self.log_message('No changes to %s', filename)
else:
self.log_message('Refactored %s', filename)
if self.show_diffs:
diff_lines = diff_texts(old, new, filename)
try:
if self.output_lock is not None:
with self.output_lock:
for line in diff_lines:
print line
sys.stdout.flush()
else:
for line in diff_lines:
print line
except UnicodeEncodeError:
warn("couldn't encode %s's diff for your terminal" % (
filename,))
return
return
def warn(msg):
print >> sys.stderr, 'WARNING: %s' % (msg,)
def main(fixer_pkg, args=None):
"""Main program.
Args:
fixer_pkg: the name of a package where the fixers are located.
args: optional; a list of command line arguments. If omitted,
sys.argv[1:] is used.
Returns a suggested exit status (0, 1, 2).
"""
parser = optparse.OptionParser(usage='2to3 [options] file|dir ...')
parser.add_option('-d', '--doctests_only', action='store_true', help='Fix up doctests only')
parser.add_option('-f', '--fix', action='append', default=[], help='Each FIX specifies a transformation; default: all')
parser.add_option('-j', '--processes', action='store', default=1, type='int', help='Run 2to3 concurrently')
parser.add_option('-x', '--nofix', action='append', default=[], help='Prevent a transformation from being run')
parser.add_option('-l', '--list-fixes', action='store_true', help='List available transformations')
parser.add_option('-p', '--print-function', action='store_true', help='Modify the grammar so that print() is a function')
parser.add_option('-v', '--verbose', action='store_true', help='More verbose logging')
parser.add_option('--no-diffs', action='store_true', help="Don't show diffs of the refactoring")
parser.add_option('-w', '--write', action='store_true', help='Write back modified files')
parser.add_option('-n', '--nobackups', action='store_true', default=False, help="Don't write backups for modified files")
refactor_stdin = False
flags = {}
options, args = parser.parse_args(args)
if not options.write and options.no_diffs:
warn("not writing files and not printing diffs; that's not very useful")
if not options.write and options.nobackups:
parser.error("Can't use -n without -w")
if options.list_fixes:
print 'Available transformations for the -f/--fix option:'
for fixname in refactor.get_all_fix_names(fixer_pkg):
print fixname
if not args:
return 0
if not args:
print >> sys.stderr, 'At least one file or directory argument required.'
print >> sys.stderr, 'Use --help to show usage.'
return 2
if '-' in args:
refactor_stdin = True
if options.write:
print >> sys.stderr, "Can't write to stdin."
return 2
if options.print_function:
flags['print_function'] = True
level = logging.DEBUG if options.verbose else logging.INFO
logging.basicConfig(format='%(name)s: %(message)s', level=level)
avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg))
unwanted_fixes = set((fixer_pkg + '.fix_' + fix for fix in options.nofix))
explicit = set()
if options.fix:
all_present = False
for fix in options.fix:
if fix == 'all':
all_present = True
else:
explicit.add(fixer_pkg + '.fix_' + fix)
if all_present:
requested = avail_fixes.union(explicit) if 1 else explicit
else:
requested = avail_fixes.union(explicit)
fixer_names = requested.difference(unwanted_fixes)
rt = StdoutRefactoringTool(sorted(fixer_names), flags, sorted(explicit), options.nobackups, not options.no_diffs)
if rt.errors or refactor_stdin:
rt.refactor_stdin()
else:
try:
rt.refactor(args, options.write, options.doctests_only, options.processes)
except refactor.MultiprocessingUnsupported:
print >> sys.stderr, "Sorry, -j isn't supported on this platform."
return 1
rt.summarize()
return int(bool(rt.errors)) | [
"[email protected]"
] | |
f1aff488cc397a61c84e4bf306f6c1cf8961a77a | 4557846c914c259bd520f6c8bd358e8c5fb8e820 | /bin/__init__.py | b78fa2a4678d9ddf5c8ca9c763a457facdb431cf | [] | no_license | ninthDevilHAUNSTER/EasyEnglishTerminal | 7c854475b6f472d296a1f8ee25c9f5c9b961a641 | 3f68209b5ff9b3e35e9660b60b91ebddd99ac33b | refs/heads/master | 2020-12-11T23:13:57.678840 | 2020-01-15T02:59:37 | 2020-01-15T02:59:37 | 233,981,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41 | py | from bin.base import EasyEnglishTerminal
| [
"[email protected]"
] | |
f92227c51ec1996e3e31c2e0073f8916609625b5 | e4bab7fc4e8eacb62ad35b4b58b9a5093bae44c7 | /spec/rift/data/models/tenant.py | a7a4d5803847af6a42bab44665e8cd139f2cfdba | [
"Apache-2.0"
] | permissive | mkam/Rift | 972d5c571ead01480519509b783ec70b0636d10f | 802892f7c119845e0f2ec5b0798463f210e7061f | refs/heads/master | 2021-06-01T12:33:58.147207 | 2015-08-27T19:35:51 | 2015-08-27T19:35:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | py | import uuid
from specter import Spec, expect
from rift.data.models.tenant import Tenant
class TenantModel(Spec):
def can_convert_to_dictionary(self):
tmp_uuid = str(uuid.uuid4())
tenant = Tenant(name=tmp_uuid, tenant_id=tmp_uuid)
tenant_dict = tenant.as_dict()
test_dict = Tenant.build_tenant_from_dict(tenant_dict).as_dict()
expect(tenant_dict).to.equal(test_dict)
| [
"[email protected]"
] | |
c6181955ae958e8e09f7d70d6cabc46465b949a8 | 9930f08717594022e0f7fde2a96baaa7fcfce784 | /assignment3_prime number or not.py | c6ba297b1ad4aab747f95704e77a94145abc75b2 | [] | no_license | dinesh5555/python_assignments | 72bd2d1cc35a92a01826536eeb4107953d8d73c7 | 33fbcbe1de8f92bd6ffe07fa66640ce1ab84a756 | refs/heads/master | 2022-11-11T18:42:41.621053 | 2020-07-03T09:12:49 | 2020-07-03T09:12:49 | 276,854,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | #!/usr/bin/env python
# coding: utf-8
# In[19]:
num=int(input("enter a number"))
if num>1:
for i in range(2,num):
if (num%i)==0:
print(num,"is not a prime number")
break
else:
print(num,"is a prime number")
| [
"[email protected]"
] | |
7f78820ec7b0fb9e06f0c1b1bdf198ef2b2cabe4 | 77900cdd9a815caf1cd04705321ca93f5072179f | /Project2/Project2/.history/blog/views_20211114212413.py | 9aa8a5a017e67a14ca1de498d7a75944f750328a | [] | no_license | Bom19990111/helloword_python | 717799d994223d65de5adaeabecf396ff2bc1fb7 | 2ee2e67a60043f03c1ce4b070470c7d2dcdc72a7 | refs/heads/master | 2023-09-06T04:17:02.057628 | 2021-11-21T20:00:46 | 2021-11-21T20:00:46 | 407,063,273 | 0 | 1 | null | 2021-11-21T20:00:47 | 2021-09-16T07:18:35 | Python | UTF-8 | Python | false | false | 486 | py | from django.shortcuts import get_object_or_404, render
from .models import Blog
# Create your views here.
def all_blogs(request):
blogs = Blog.objects.filter(status=1).order_by('-created_on')
return render(request, 'blog/all_blogs.html', {'blogs': blogs})
def detail(request, slug):
blog = get_object_or_404(Blog, slug_title=slug)
return render(request, 'movies_details.html', {'blog': blog, 'blogs': app_movies})
return render(request, 'blog/detail.html')
| [
"[email protected]"
] | |
73a769860358682887712313fed38e62177e3612 | 55815c281f6746bb64fc2ba46d074ca5af966441 | /medium/299.py | 4b6e196e858ad7ee70a73d1e40dbf8f868f06bf8 | [] | no_license | brandoneng000/LeetCode | def5107b03187ad7b7b1c207d39c442b70f80fc2 | c7a42753b2b16c7b9c66b8d7c2e67b683a15e27d | refs/heads/master | 2023-08-30T23:38:04.845267 | 2023-08-30T08:42:57 | 2023-08-30T08:42:57 | 199,584,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | import collections
class Solution:
def getHint(self, secret: str, guess: str) -> str:
cows = 0
bulls = 0
for i in range(len(secret)):
if secret[i] == guess[i]:
bulls += 1
secret_counter = collections.Counter(secret)
guess_counter = collections.Counter(guess)
cows = sum(secret_counter.values()) - sum((secret_counter - guess_counter).values()) - bulls
return f"{bulls}A{cows}B"
def main():
sol = Solution()
print(sol.getHint(secret = "1807", guess = "7810"))
print(sol.getHint(secret = "1123", guess = "0111"))
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
610dffc1617cd22bd7b8e889c292a9d1ef1e3346 | 677002b757c0a1a00b450d9710a8ec6aeb9b9e9a | /tiago_public_ws/build/tiago_bringup/catkin_generated/pkg.develspace.context.pc.py | dd49ac14d12608a0d4daa23f54bf5fb2b0e9670f | [] | no_license | mrrocketraccoon/tiago_development | ce686c86459dbfe8623aa54cf4279021342887fb | a0539bdcf21b67ab902a4649b516dcb929c54042 | refs/heads/main | 2023-06-16T19:39:33.391293 | 2021-07-08T21:20:03 | 2021-07-08T21:20:03 | 384,249,894 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "tiago_bringup"
PROJECT_SPACE_DIR = "/tiago_public_ws/devel/.private/tiago_bringup"
PROJECT_VERSION = "2.0.58"
| [
"[email protected]"
] | |
4af74a933a0a3e003647d10696dc9afb71b9e739 | 080c13cd91a073457bd9eddc2a3d13fc2e0e56ae | /MY_REPOS/awesome-4-new-developers/tensorflow-master/tensorflow/python/framework/experimental/math_ops.py | bb168de21996ba1f8aa825afb66054bf16c1f338 | [
"Apache-2.0"
] | permissive | Portfolio-Projects42/UsefulResourceRepo2.0 | 1dccc8961a09347f124d3ed7c27c6d73b9806189 | 75b1e23c757845b5f1894ebe53551a1cf759c6a3 | refs/heads/master | 2023-08-04T12:23:48.862451 | 2021-09-15T12:51:35 | 2021-09-15T12:51:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,746 | py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental impl for gen_math_ops.py using unified APIs, for testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework.experimental import _math_ops
from tensorflow.python.framework.experimental import context_stack as context
def add(a, b, name=None):
ctx = context.get_default()
return _math_ops.add(ctx, a, b, name)
def mat_mul(a, b, name=None):
ctx = context.get_default()
return _math_ops.mat_mul(ctx, a, b, name)
def neg(a, name=None):
ctx = context.get_default()
return _math_ops.neg(ctx, a, name)
def sub(a, b, name=None):
ctx = context.get_default()
return _math_ops.sub(ctx, a, b, name)
def mul(a, b, name=None):
ctx = context.get_default()
return _math_ops.mul(ctx, a, b, name)
def log1p(a, name=None):
ctx = context.get_default()
return _math_ops.log1p(ctx, a, name)
def div_no_nan(a, b, name=None):
ctx = context.get_default()
return _math_ops.div_no_nan(ctx, a, b, name)
| [
"[email protected]"
] | |
29d682db283e2dc08722ab6dd840796f0e982a94 | e67a0139092d3389fea0075de9ecf12ab209649f | /scripts/addons_extern/AF_3dview_specials/VIEW3D_MT_armature_specials.py | 47b9a6cae734433af79d7e3a2b6eef5aca78063f | [] | no_license | amagnoni/blenderpython | 9fe864d287f992b7cd71cd584fca4a501a6ac954 | d2fec1a35369b7b171e2f0999196b87e242e08f3 | refs/heads/master | 2021-01-18T11:28:55.372759 | 2015-10-17T20:16:57 | 2015-10-17T20:16:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,882 | py | # 3Dビュー > アーマチュア編集モード > 「W」キー
import bpy
import re
################
# オペレーター #
################
class CreateMirror(bpy.types.Operator):
bl_idname = "armature.create_mirror"
bl_label = "Select bones mirroring."
bl_description = "Mirrored at any axes selected bone."
bl_options = {'REGISTER', 'UNDO'}
def execute(self, context):
obj = context.active_object
if (obj.type == "ARMATURE"):
if (obj.mode == "EDIT"):
preCursorCo = context.space_data.cursor_location[:]
prePivotPoint = context.space_data.pivot_point
preUseMirror = context.object.data.use_mirror_x
context.space_data.cursor_location = (0, 0, 0)
context.space_data.pivot_point = 'CURSOR'
context.object.data.use_mirror_x = True
selectedBones = context.selected_bones[:]
bpy.ops.armature.autoside_names(type='XAXIS')
bpy.ops.armature.duplicate()
axis = (True, False, False)
bpy.ops.transform.mirror(constraint_axis=axis)
bpy.ops.armature.flip_names()
newBones = []
for bone in context.selected_bones:
for pre in selectedBones:
if (bone.name == pre.name):
break
else:
newBones.append(bone)
bpy.ops.armature.select_all(action='DESELECT')
for bone in selectedBones:
bone.select = True
bone.select_head = True
bone.select_tail = True
bpy.ops.transform.transform(mode='BONE_ROLL', value=(0, 0, 0, 0))
bpy.ops.armature.select_all(action='DESELECT')
for bone in newBones:
bone.select = True
bone.select_head = True
bone.select_tail = True
context.space_data.cursor_location = preCursorCo[:]
context.space_data.pivot_point = prePivotPoint
context.object.data.use_mirror_x = preUseMirror
else:
self.report(type={"ERROR"}, message="Please perform in edit mode")
else:
self.report(type={"ERROR"}, message="Armature object is not")
return {'FINISHED'}
class CopyBoneName(bpy.types.Operator):
bl_idname = "armature.copy_bone_name"
bl_label = "Copy to Clipboard bone name"
bl_description = "Copies the Clipboard the name of active bone"
bl_options = {'REGISTER', 'UNDO'}
isObject = bpy.props.BoolProperty(name="Object name", default=False)
def execute(self, context):
if (self.isObject):
context.window_manager.clipboard = context.active_object.name + ":" + context.active_bone.name
else:
context.window_manager.clipboard = context.active_bone.name
return {'FINISHED'}
class RenameBoneRegularExpression(bpy.types.Operator):
bl_idname = "armature.rename_bone_regular_expression"
bl_label = "Replace the bone names in regular expressions"
bl_description = "In the bone name (of choice) to match regular expression replace"
bl_options = {'REGISTER', 'UNDO'}
isAll = bpy.props.BoolProperty(name="Including non-select all", default=False)
pattern = bpy.props.StringProperty(name="Replacement front (in regular expressions)", default="^")
repl = bpy.props.StringProperty(name="Replacement", default="@")
def execute(self, context):
obj = context.active_object
if (obj.type == "ARMATURE"):
if (obj.mode == "EDIT"):
bones = context.selected_bones
if (self.isAll):
bones = obj.data.bones
for bone in bones:
try:
new_name = re.sub(self.pattern, self.repl, bone.name)
except:
continue
bone.name = new_name
else:
self.report(type={"ERROR"}, message="Please perform in edit mode")
else:
self.report(type={"ERROR"}, message="Armature object is not")
return {'FINISHED'}
class RenameOppositeBone(bpy.types.Operator):
bl_idname = "armature.rename_opposite_bone"
bl_label = "Bones in the opposite position, rename."
bl_description = "Bone is located opposite the X axis selection in bone \"1.R 1 longs.L \' of so versus the"
bl_options = {'REGISTER', 'UNDO'}
threshold = bpy.props.FloatProperty(name="At the threshold", default=0.00001, min=0, soft_min=0, step=0.001, precision=5)
def execute(self, context):
obj = context.active_object
if (obj.type == "ARMATURE"):
if (obj.mode == "EDIT"):
arm = obj.data
bpy.ops.armature.autoside_names(type='XAXIS')
selectedBones = context.selected_bones[:]
bpy.ops.armature.select_all(action='DESELECT')
bpy.ops.object.mode_set(mode='OBJECT')
threshold = self.threshold
for bone in selectedBones:
bone = arm.bones[bone.name]
head = (-bone.head_local[0], bone.head_local[1], bone.head_local[2])
tail = (-bone.tail_local[0], bone.tail_local[1], bone.tail_local[2])
for b in arm.bones:
if ( (head[0]-threshold) <= b.head_local[0] <= (head[0]+threshold)):
if ( (head[1]-threshold) <= b.head_local[1] <= (head[1]+threshold)):
if ( (head[2]-threshold) <= b.head_local[2] <= (head[2]+threshold)):
if ( (tail[0]-threshold) <= b.tail_local[0] <= (tail[0]+threshold)):
if ( (tail[1]-threshold) <= b.tail_local[1] <= (tail[1]+threshold)):
if ( (tail[2]-threshold) <= b.tail_local[2] <= (tail[2]+threshold)):
b.name = bone.name
b.select = True
b.select_head = True
b.select_tail = True
break
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.armature.flip_names()
else:
self.report(type={"ERROR"}, message="Please perform in edit mode")
else:
self.report(type={"ERROR"}, message="Armature object is not")
return {'FINISHED'}
return {'FINISHED'}
# Menu
def menu(self, context):
self.layout.separator()
self.layout.prop(context.object.data, "use_mirror_x", icon="PLUGIN", text="X axis mirror edit")
self.layout.operator(CreateMirror.bl_idname, icon="PLUGIN")
self.layout.operator(RenameOppositeBone.bl_idname, icon="PLUGIN")
self.layout.separator()
self.layout.operator(CopyBoneName.bl_idname, icon="PLUGIN")
self.layout.operator(RenameBoneRegularExpression.bl_idname, icon="PLUGIN")
| [
"[email protected]"
] | |
14258aac0b0a7e639801a834cddbdf0089e45ea8 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_240/ch140_2020_04_01_19_19_15_085921.py | 4c86827b303c447f074e6de350d868de44996204 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | def faixa_notas(notas):
a = 0
b = 0
c = 0
for i in notas:
if i < 5:
a += 1
elif i <= 7:
b += 1
else:
c += 1
return [a ,b ,c] | [
"[email protected]"
] | |
9d4fa0a1977714a0290798e157f5b22310e8461f | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/hdinsight/v20210601/_enums.py | edcf6d44105644a4c49b3c3c4900f7f15073a76f | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,314 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'DaysOfWeek',
'DirectoryType',
'JsonWebKeyEncryptionAlgorithm',
'OSType',
'PrivateIPAllocationMethod',
'PrivateLink',
'PrivateLinkServiceConnectionStatus',
'ResourceIdentityType',
'ResourceProviderConnection',
'Tier',
]
class DaysOfWeek(str, Enum):
MONDAY = "Monday"
TUESDAY = "Tuesday"
WEDNESDAY = "Wednesday"
THURSDAY = "Thursday"
FRIDAY = "Friday"
SATURDAY = "Saturday"
SUNDAY = "Sunday"
class DirectoryType(str, Enum):
"""
The directory type.
"""
ACTIVE_DIRECTORY = "ActiveDirectory"
class JsonWebKeyEncryptionAlgorithm(str, Enum):
"""
Algorithm identifier for encryption, default RSA-OAEP.
"""
RS_A_OAEP = "RSA-OAEP"
RS_A_OAE_P_256 = "RSA-OAEP-256"
RSA1_5 = "RSA1_5"
class OSType(str, Enum):
"""
The type of operating system.
"""
WINDOWS = "Windows"
LINUX = "Linux"
class PrivateIPAllocationMethod(str, Enum):
"""
The method that private IP address is allocated.
"""
DYNAMIC = "dynamic"
STATIC = "static"
class PrivateLink(str, Enum):
"""
Indicates whether or not private link is enabled.
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class PrivateLinkServiceConnectionStatus(str, Enum):
"""
The concrete private link service connection.
"""
APPROVED = "Approved"
REJECTED = "Rejected"
PENDING = "Pending"
REMOVED = "Removed"
class ResourceIdentityType(str, Enum):
"""
The type of identity used for the cluster. The type 'SystemAssigned, UserAssigned' includes both an implicitly created identity and a set of user assigned identities.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned"
NONE = "None"
class ResourceProviderConnection(str, Enum):
"""
The direction for the resource provider connection.
"""
INBOUND = "Inbound"
OUTBOUND = "Outbound"
class Tier(str, Enum):
"""
The cluster tier.
"""
STANDARD = "Standard"
PREMIUM = "Premium"
| [
"[email protected]"
] | |
a7e651038218c9e972e369896ec435d505b8823a | a4135d6b14b05cb1aacfaa9548e5bf9db1ef8585 | /pddlstream/language/stream.py | 8f7b912af7192e42f26c74ade003ef963f7c1a42 | [
"MIT"
] | permissive | yijiangh/pddlstream | e3b05a237c3e510b1fb2aad4d0bcd62c35f0f49b | 8d3782fc65a2c44c77ae34eba9769d855925c10e | refs/heads/master | 2023-03-11T00:17:24.716411 | 2018-11-02T19:02:42 | 2018-11-02T19:02:42 | 155,922,752 | 1 | 0 | MIT | 2018-11-02T21:21:11 | 2018-11-02T21:21:10 | null | UTF-8 | Python | false | false | 18,592 | py | import time
from collections import Counter, defaultdict, namedtuple, Sequence
from itertools import count
from pddlstream.algorithms.downward import make_preconditions, make_parameters
from pddlstream.language.constants import AND, get_prefix, get_args, is_parameter
from pddlstream.language.conversion import list_from_conjunction, remap_objects, \
substitute_expression, get_formula_operators, evaluation_from_fact, values_from_objects, obj_from_value_expression
from pddlstream.language.external import ExternalInfo, Result, Instance, External, DEBUG, get_procedure_fn, \
parse_lisp_list
from pddlstream.language.generator import get_next, from_fn
from pddlstream.language.object import Object, OptimisticObject, UniqueOptValue
from pddlstream.utils import str_from_object, get_mapping, irange
VERBOSE_FAILURES = True
INTERNAL = False
DEFAULT_UNIQUE = False
NEGATIVE_BLOCKED = True
# TODO: could also make only wild facts and automatically identify output tuples satisfying certified
# TODO: default effort cost of streams with more inputs to be higher (but negated are free)
# TODO: automatically convert to test streams on inputs
def get_empty_fn():
return lambda *input_values: None
def get_constant_fn(constant):
return lambda *input_values: constant
def get_identity_fn(indices):
return lambda *input_values: tuple(input_values[i] for i in indices)
##################################################
OptValue = namedtuple('OptValue', ['stream', 'inputs', 'values', 'output'])
class PartialInputs(object):
def __init__(self, inputs='', unique=False): #, num=1):
self.inputs = tuple(inputs.split())
self.unique = unique
#self.num = num
def get_opt_gen_fn(self, stream):
inputs = stream.inputs if self.unique else self.inputs
assert set(inputs) <= set(stream.inputs)
# TODO: ensure no scoping error with inputs
def gen_fn(*input_values):
input_objects = tuple(map(Object.from_value, input_values))
instance = stream.get_instance(input_objects)
mapping = get_mapping(stream.inputs, input_objects)
values = tuple(mapping[inp] for inp in inputs)
assert(len(inputs) == len(values))
#for _ in irange(self.num):
for _ in irange(instance.num_optimistic):
yield [tuple(OptValue(stream.name, inputs, values, out)
for out in stream.outputs)]
return gen_fn
def get_constant_gen_fn(stream, constant):
def gen_fn(*input_values):
assert (len(stream.inputs) == len(input_values))
yield [tuple(constant for _ in range(len(stream.outputs)))]
return gen_fn
##################################################
# def get_unique_fn(stream):
# # TODO: this should take into account the output number...
# def fn(*input_values):
# #input_objects = map(opt_obj_from_value, input_values)
# #stream_instance = stream.get_instance(input_objects)
# #output_values = tuple(UniqueOpt(stream_instance, i) for i in range(len(stream.outputs)))
# output_values = tuple(object() for _ in range(len(stream.outputs)))
# return [output_values]
# return fn
def get_debug_gen_fn(stream):
return from_fn(lambda *args: tuple(DebugValue(stream.name, args, o)
for o in stream.outputs))
class DebugValue(object): # TODO: could just do an object
_output_counts = defaultdict(count)
_prefix = '@' # $ | @
def __init__(self, stream, input_values, output_parameter):
self.stream = stream
self.input_values = input_values
self.output_parameter = output_parameter
self.index = next(self._output_counts[output_parameter])
def __repr__(self):
# Can also just return first letter of the prefix
return '{}{}{}'.format(self._prefix, self.output_parameter[1:], self.index)
##################################################
class StreamInfo(ExternalInfo):
def __init__(self, opt_gen_fn=PartialInputs(unique=DEFAULT_UNIQUE), eager=False,
p_success=None, overhead=None, negate=False, effort_fn=None, simultaneous=False):
# TODO: could change frequency/priority for the incremental algorithm
super(StreamInfo, self).__init__(eager, p_success, overhead, effort_fn)
self.opt_gen_fn = opt_gen_fn
self.negate = negate
self.simultaneous = simultaneous
#self.order = 0
class StreamResult(Result):
def __init__(self, instance, output_objects, opt_index=None, call_index=None, list_index=None):
super(StreamResult, self).__init__(instance, opt_index)
self.output_objects = tuple(output_objects)
self.mapping = get_mapping(self.external.outputs, self.output_objects)
self.mapping.update(instance.mapping)
self.certified = substitute_expression(self.external.certified, self.get_mapping())
self.call_index = call_index
self.list_index = list_index
def get_mapping(self):
return self.mapping
def get_certified(self):
return self.certified
def get_tuple(self):
return self.external.name, self.instance.input_objects, self.output_objects
def remap_inputs(self, bindings):
input_objects = remap_objects(self.instance.input_objects, bindings)
fluent_facts = [(get_prefix(f),) + remap_objects(get_args(f), bindings)
for f in self.instance.fluent_facts]
new_instance = self.external.get_instance(input_objects, fluent_facts=fluent_facts)
new_instance.opt_index = self.instance.opt_index
return self.__class__(new_instance, self.output_objects, self.opt_index)
def is_successful(self):
return True
def __repr__(self):
return '{}:{}->{}'.format(self.external.name,
str_from_object(self.instance.input_objects),
str_from_object(self.output_objects))
class StreamInstance(Instance):
def __init__(self, stream, input_objects, fluent_facts):
super(StreamInstance, self).__init__(stream, input_objects)
self._generator = None
self.opt_index = stream.num_opt_fns
self.fluent_facts = frozenset(fluent_facts)
self.axiom_predicate = None
self.disabled_axiom = None
self.num_optimistic = 1
def _check_output_values(self, new_values):
if not isinstance(new_values, Sequence):
raise ValueError('An output list for stream [{}] is not a sequence: {}'.format(self.external.name, new_values))
for output_values in new_values:
if not isinstance(output_values, Sequence):
raise ValueError('An output tuple for stream [{}] is not a sequence: {}'.format(
self.external.name, output_values))
if len(output_values) != len(self.external.outputs):
raise ValueError('An output tuple for stream [{}] has length {} instead of {}: {}'.format(
self.external.name, len(output_values), len(self.external.outputs), output_values))
def _check_wild_facts(self, new_facts):
if not isinstance(new_facts, Sequence):
raise ValueError('Output wild facts for wild stream [{}] is not a sequence: {}'.format(
self.external.name, new_facts))
def get_result(self, object_objects, opt_index=None, list_index=None):
return self.external._Result(self, tuple(object_objects), opt_index=opt_index,
call_index=self.num_calls, list_index=list_index)
def use_unique(self):
return self.opt_index == 0
def get_fluent_values(self):
return [(get_prefix(f),) + values_from_objects(get_args(f)) for f in self.fluent_facts]
def _create_generator(self):
if self._generator is None:
input_values = self.get_input_values()
#try:
if self.external.is_fluent(): # self.fluent_facts
self._generator = self.external.gen_fn(*input_values, fluents=self.get_fluent_values())
else:
self._generator = self.external.gen_fn(*input_values)
#except TypeError as err:
# print('Stream [{}] expects {} inputs'.format(self.external.name, len(input_values)))
# raise err
def _next_outputs(self):
self._create_generator()
output, self.enumerated = get_next(self._generator, default=None)
if output is None:
return [], []
if not self.external.is_wild:
return output, []
if len(output) != 2:
raise RuntimeError('Wild stream [{}] does not generate pairs of output values and wild facts'.format(
self.external.name))
return output
def next_results(self, accelerate=1, verbose=False):
# TODO: prune repeated values
all_new_values = []
all_new_facts = []
all_results = []
start_calls = self.num_calls
for attempt in range(accelerate):
if all_results or self.enumerated:
break
start_time = time.time()
new_values, new_facts = self._next_outputs()
self._check_output_values(new_values)
self._check_wild_facts(new_facts)
new_results = [self.get_result(map(Object.from_value, output_values), list_index=list_index)
for list_index, output_values in enumerate(new_values)]
all_new_values.extend(new_values)
all_new_facts.extend(new_facts)
all_results.extend(new_results)
self.update_statistics(start_time, new_results)
if verbose and (VERBOSE_FAILURES or all_new_values):
print('{}-{}) {}:{}->{}'.format(start_calls, self.num_calls, self.external.name,
str_from_object(self.get_input_values()),
str_from_object(all_new_values)))
if verbose and all_new_facts:
# TODO: format all_new_facts
print('{}-{}) {}:{}->{}'.format(start_calls, self.num_calls, self.external.name,
str_from_object(self.get_input_values()), all_new_facts))
return all_results, list(map(obj_from_value_expression, all_new_facts))
def next_optimistic(self):
# TODO: compute this just once and store
if self.enumerated or self.disabled:
return []
# TODO: (potentially infinite) sequence of optimistic objects
# TODO: how do I distinguish between real and not real verifications of things?
# TODO: resue these?
self.opt_results = []
output_set = set()
for output_list in self.external.opt_gen_fn(*self.get_input_values()):
self._check_output_values(output_list)
for i, output_values in enumerate(output_list):
output_objects = []
for output_index, value in enumerate(output_values):
# TODO: maybe record history of values here?
unique = UniqueOptValue(self, len(self.opt_results), output_index) # object()
param = unique if self.use_unique() else value
output_objects.append(OptimisticObject.from_opt(value, param))
output_objects = tuple(output_objects)
if output_objects not in output_set:
output_set.add(output_objects) # No point returning the exact thing here...
self.opt_results.append(self.external._Result(self, output_objects, opt_index=self.opt_index,
call_index=len(self.opt_results), list_index=0))
return self.opt_results
def get_blocked_fact(self):
if self.external.is_fluent():
assert self.axiom_predicate is not None
return (self.axiom_predicate,) + self.input_objects
return (self.external.blocked_predicate,) + self.input_objects
def disable(self, evaluations, domain):
#assert not self.disabled
super(StreamInstance, self).disable(evaluations, domain)
if not self.external.is_fluent(): # self.fluent_facts:
if self.external.is_negated() and not self.successes:
evaluations[evaluation_from_fact(self.get_blocked_fact())] = INTERNAL
return
if self.axiom_predicate is not None:
return
index = len(self.external.disabled_instances)
self.external.disabled_instances.append(self)
self.axiom_predicate = '_ax{}-{}'.format(self.external.blocked_predicate, index)
evaluations[evaluation_from_fact(self.get_blocked_fact())] = INTERNAL
# TODO: allow reporting back which components lead to failure
import pddl
static_fact = (self.axiom_predicate,) + self.external.inputs
preconditions = [static_fact] + list(self.fluent_facts)
self.disabled_axiom = pddl.Axiom(name=self.external.blocked_predicate,
parameters=make_parameters(self.external.inputs),
num_external_parameters=len(self.external.inputs),
condition=make_preconditions(preconditions))
domain.axioms.append(self.disabled_axiom)
def enable(self, evaluations, domain):
super(StreamInstance, self).enable(evaluations, domain)
if self.axiom_predicate is not None: # TODO: re-enable?
raise NotImplementedError(self)
def __repr__(self):
return '{}:{}->{}'.format(self.external.name, self.input_objects, self.external.outputs)
class Stream(External):
_Instance = StreamInstance
_Result = StreamResult
def __init__(self, name, gen_fn, inputs, domain, outputs, certified, info, fluents=[], is_wild=False):
super(Stream, self).__init__(name, info, inputs, domain)
self.outputs = tuple(outputs)
self.certified = tuple(certified)
self.constants.update(a for i in certified for a in get_args(i) if not is_parameter(a))
for p, c in Counter(self.outputs).items():
if not is_parameter(p):
raise ValueError('Output [{}] for stream [{}] is not a parameter'.format(p, name))
if c != 1:
raise ValueError('Output [{}] for stream [{}] is not unique'.format(p, name))
for p in set(self.inputs) & set(self.outputs):
raise ValueError('Parameter [{}] for stream [{}] is both an input and output'.format(p, name))
certified_parameters = {a for i in certified for a in get_args(i) if is_parameter(a)}
for p in (certified_parameters - set(self.inputs + self.outputs)):
raise ValueError('Parameter [{}] for stream [{}] is not included within outputs'.format(p, name))
for p in (set(self.outputs) - certified_parameters):
print('Warning! Output [{}] for stream [{}] is not covered by a certified condition'.format(p, name))
# TODO: automatically switch to unique if only used once
self.gen_fn = get_debug_gen_fn(self) if gen_fn == DEBUG else gen_fn
self.num_opt_fns = 1 if self.outputs else 0 # Always unique if no outputs
if isinstance(self.info.opt_gen_fn, PartialInputs):
if self.info.opt_gen_fn.unique:
self.num_opt_fns = 0
self.opt_gen_fn = self.info.opt_gen_fn.get_opt_gen_fn(self)
else:
self.opt_gen_fn = self.info.opt_gen_fn
#self.bound_list_fn = None # TODO: generalize to a hierarchical sequence
#self.opt_fns = [get_unique_fn(self), get_shared_fn(self)] # get_unique_fn | get_shared_fn
self.fluents = [] if gen_fn == DEBUG else fluents
if NEGATIVE_BLOCKED:
self.blocked_predicate = '~{}-negative'.format(self.name) # Args are self.inputs
else:
self.blocked_predicate = '~{}'.format(self.name)
self.disabled_instances = []
self.is_wild = is_wild
if self.is_negated():
if self.outputs:
raise ValueError('Negated streams cannot have outputs: {}'.format(self.outputs))
#assert len(self.certified) == 1 # TODO: is it okay to have more than one fact?
for certified in self.certified:
if not (set(self.inputs) <= set(get_args(certified))):
raise ValueError('Negated streams must have certified facts including all input parameters')
def is_fluent(self):
return self.fluents
def is_negated(self):
return self.info.negate
def get_instance(self, input_objects, fluent_facts=frozenset()):
key = (tuple(input_objects), frozenset(fluent_facts))
if key not in self.instances:
self.instances[key] = self._Instance(self, input_objects, fluent_facts)
return self.instances[key]
def __repr__(self):
return '{}:{}->{}'.format(self.name, self.inputs, self.outputs)
##################################################
def parse_stream(lisp_list, stream_map, stream_info):
value_from_attribute = parse_lisp_list(lisp_list)
assert set(value_from_attribute) <= {':stream', ':wild-stream', ':inputs',
':domain', ':fluents', ':outputs', ':certified'}
is_wild = (':wild-stream' in value_from_attribute)
name = value_from_attribute[':wild-stream'] if is_wild else value_from_attribute[':stream']
domain = value_from_attribute.get(':domain', None)
# TODO: dnf_from_positive_formula(value_from_attribute.get(':domain', []))
if not (get_formula_operators(domain) <= {AND}):
# TODO: allow positive DNF
raise ValueError('Stream [{}] domain must be a conjunction'.format(name))
certified = value_from_attribute.get(':certified', None)
if not (get_formula_operators(certified) <= {AND}):
raise ValueError('Stream [{}] certified must be a conjunction'.format(name))
return Stream(name, get_procedure_fn(stream_map, name),
value_from_attribute.get(':inputs', []),
list_from_conjunction(domain),
value_from_attribute.get(':outputs', []),
list_from_conjunction(certified),
stream_info.get(name, StreamInfo()),
fluents=value_from_attribute.get(':fluents', []),
is_wild=is_wild)
| [
"[email protected]"
] | |
3a86a7e83c8d88aa0d7e0a5952a56771bd01f41a | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/01_netCDF_extraction/merra902Combine/604-tideGauge.py | 6e967f2eb7f15063af6ad0c01079f2c24d39111e | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,376 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 17 11:28:00 2020
--------------------------------------------
Load predictors for each TG and combine them
--------------------------------------------
@author: Michael Tadesse
"""
import os
import pandas as pd
#define directories
# dir_name = 'F:\\01_erainterim\\01_eraint_predictors\\eraint_D3'
dir_in = "/lustre/fs0/home/mtadesse/merraLocalized"
dir_out = "/lustre/fs0/home/mtadesse/merraAllCombined"
def combine():
os.chdir(dir_in)
#get names
tg_list_name = os.listdir()
x = 604
y = 605
for tg in range(x, y):
os.chdir(dir_in)
tg_name = tg_list_name[tg]
print(tg_name, '\n')
#looping through each TG folder
os.chdir(tg_name)
#check for empty folders
if len(os.listdir()) == 0:
continue
#defining the path for each predictor
where = os.getcwd()
csv_path = {'slp' : os.path.join(where, 'slp.csv'),\
"wnd_u": os.path.join(where, 'wnd_u.csv'),\
'wnd_v' : os.path.join(where, 'wnd_v.csv')}
first = True
for pr in csv_path.keys():
print(tg_name, ' ', pr)
#read predictor
pred = pd.read_csv(csv_path[pr])
#remove unwanted columns
pred.drop(['Unnamed: 0'], axis = 1, inplace=True)
#sort based on date as merra files are scrambled
pred.sort_values(by = 'date', inplace=True)
#give predictor columns a name
pred_col = list(pred.columns)
for pp in range(len(pred_col)):
if pred_col[pp] == 'date':
continue
pred_col[pp] = pr + str(pred_col[pp])
pred.columns = pred_col
#merge all predictors
if first:
pred_combined = pred
first = False
else:
pred_combined = pd.merge(pred_combined, pred, on = 'date')
#saving pred_combined
os.chdir(dir_out)
tg_name = str(tg)+"_"+tg_name;
pred_combined.to_csv('.'.join([tg_name, 'csv']))
os.chdir(dir_in)
print('\n')
#run script
combine()
| [
"[email protected]"
] | |
282178eb96083ed7af80ea573a1dde94a35cd474 | 1bad7d2b7fc920ecf2789755ed7f44b039d4134d | /ABC/141/D.py | d02d4346f436cb62c503781981a2c46b5f3839df | [] | no_license | kanekyo1234/AtCoder_solve | ce95caafd31f7c953c0fc699f0f4897dddd7a159 | e5ea7b080b72a2a2fd3fcb826cd10c4ab2e2720e | refs/heads/master | 2023-04-01T04:01:15.885945 | 2021-04-06T04:03:31 | 2021-04-06T04:03:31 | 266,151,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | n,m=map(int,input().split())
a=list(map(int,input().split()))
for i in range(m):
a[a.index(max(a))]//=2
print(sum(a))
| [
"[email protected]"
] | |
d8697fb977edd87fe504d6ffb8a9a877b8389dfc | 9be7dd059042e382bc68d2da0e9db929770c36a1 | /madgraph/aloha/aloha_object.py | 8da8fee9f9e7b8286a6b1d6d08387d081f5a6cb2 | [] | no_license | restrepo/SimplifiedDM-SSSFDM-Toolbox | ee891d51d252e3087e6287fb9e3ce055f55e1354 | bbdefde970b7016159f2f9f51eaf9cefc127f220 | refs/heads/master | 2021-01-12T17:15:48.830510 | 2017-11-11T22:13:54 | 2017-11-11T22:13:54 | 71,532,276 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 41,974 | py | ################################################################################
#
# Copyright (c) 2010 The MadGraph5_aMC@NLO Development team and Contributors
#
# This file is a part of the MadGraph5_aMC@NLO project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph5_aMC@NLO license which should accompany this
# distribution.
#
# For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch
#
################################################################################
## Diagram of Class
##
## Variable <--- aloha_lib.Variable
## |
## +- LorentzObject <--- Gamma
## |
## +- Sigma
## |
## +- P
##
## list <--- AddVariable
## |
## +- MultVariable <--- MultLorentz
##
## list <--- LorentzObjectRepresentation <-- ConstantObject
##
################################################################################
from __future__ import division
import aloha.aloha_lib as aloha_lib
import aloha
import cmath
#===============================================================================
# P (Momenta)
#===============================================================================
class L_P(aloha_lib.LorentzObject):
""" Helas Object for an Impulsion """
contract_first = 1
def __init__(self, name, lorentz1, particle):
self.particle = particle
aloha_lib.LorentzObject.__init__(self, name,[lorentz1], [],['P%s'%particle])
aloha_lib.KERNEL.add_tag((name,))
def create_representation(self):
self.sub0 = aloha_lib.DVariable('P%s_0' % self.particle)
self.sub1 = aloha_lib.DVariable('P%s_1' % self.particle)
self.sub2 = aloha_lib.DVariable('P%s_2' % self.particle)
self.sub3 = aloha_lib.DVariable('P%s_3' % self.particle)
self.representation= aloha_lib.LorentzObjectRepresentation(
{(0,): self.sub0, (1,): self.sub1, \
(2,): self.sub2, (3,): self.sub3},
self.lorentz_ind, [])
class P(aloha_lib.FactoryLorentz):
""" Helas Object for an Impulsion """
object_class = L_P
#def __init__(self, lorentz1, particle):
@classmethod
def get_unique_name(self, lorentz1, particle):
return '_P^%s_%s' % (particle, lorentz1)
#===============================================================================
# Pslash
#===============================================================================
class L_PSlash(aloha_lib.LorentzObject):
""" Gamma Matrices """
#gamma0 = [[0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0]]
#gamma1 = [[0, 0, 0, 1], [0, 0, 1, 0], [0, -1, 0, 0], [-1, 0, 0, 0]]
#gamma2 = [[0, 0, 0, -complex(0,1)],[0, 0, complex(0,1), 0],
# [0, complex(0,1), 0, 0], [-complex(0,1), 0, 0, 0]]
#gamma3 = [[0, 0, 1, 0], [0, 0, 0, -1], [-1, 0, 0, 0], [0, 1, 0, 0]]
#
#gamma = [gamma0, gamma1, gamma2, gamma3]
def __init__(self, name, spin1, spin2, particle):
self.particle = particle
aloha_lib.LorentzObject.__init__(self,name,[], [spin1, spin2])
def create_representation(self):
"""create representation"""
p0 = aloha_lib.DVariable('P%s_0' % self.particle)
p1 = aloha_lib.DVariable('P%s_1' % self.particle)
p2 = aloha_lib.DVariable('P%s_2' % self.particle)
p3 = aloha_lib.DVariable('P%s_3' % self.particle)
gamma = {
(0, 0): 0, (0, 1): 0, (0, 2): p0-p3, (0, 3): -1*p1+1j*p2,
(1, 0): 0, (1, 1): 0, (1, 2): -1*p1-1j*p2, (1, 3): p0+p3,
(2, 0): p0+p3, (2, 1): p1-1j*p2, (2, 2): 0, (2, 3): 0,
(3, 0): p1+1j*p2, (3, 1): p0-p3, (3, 2): 0, (3, 3): 0}
self.representation = aloha_lib.LorentzObjectRepresentation(gamma,
self.lorentz_ind,self.spin_ind)
class PSlash(aloha_lib.FactoryLorentz):
object_class = L_PSlash
@classmethod
def get_unique_name(self, spin1, spin2, particle):
return '_P%s/_%s_%s' % (particle, spin1,spin2)
#===============================================================================
# Mass
#===============================================================================
class L_Mass(aloha_lib.LorentzObject):
""" Helas Object for a Mass"""
def __init__(self, name, particle):
self.particle = particle
aloha_lib.LorentzObject.__init__(self, name,[], [])
def create_representation(self):
mass = aloha_lib.DVariable('M%s' % self.particle)
self.representation = aloha_lib.LorentzObjectRepresentation(
mass, self.lorentz_ind, self.spin_ind)
class Mass(aloha_lib.FactoryLorentz):
object_class = L_Mass
@classmethod
def get_unique_name(self, particle):
return '_M%s' % particle
#===============================================================================
# Mass
#===============================================================================
class L_Coup(aloha_lib.LorentzObject):
""" Helas Object for a Mass"""
def __init__(self, name, nb):
self.nb = nb
aloha_lib.LorentzObject.__init__(self, name,[], [])
def create_representation(self):
coup = aloha_lib.Variable('COUP%s' % self.nb)
self.representation = aloha_lib.LorentzObjectRepresentation(
coup, self.lorentz_ind, self.spin_ind)
class Coup(aloha_lib.FactoryLorentz):
object_class = L_Coup
@classmethod
def get_unique_name(self, nb):
return 'coup%s' % nb
#===============================================================================
# FCT
#===============================================================================
class L_FCT(aloha_lib.LorentzObject):
""" Helas Object for a Mass"""
def __init__(self, name, id):
self.fctid = id
aloha_lib.LorentzObject.__init__(self, name,[], [])
def create_representation(self):
var = aloha_lib.Variable('FCT%s' % self.fctid)
self.representation = aloha_lib.LorentzObjectRepresentation(
var, self.lorentz_ind, self.spin_ind)
class FCT(aloha_lib.FactoryLorentz):
object_class = L_FCT
@classmethod
def get_unique_name(self, name):
return '_FCT%s' % name
#===============================================================================
# OverMass2
#===============================================================================
class L_OverMass2(aloha_lib.LorentzObject):
""" Helas Object for 1/M**2 """
def __init__(self, name, particle):
self.particle = particle
aloha_lib.LorentzObject.__init__(self, name, [], [], tags=['OM%s' % particle])
def create_representation(self):
mass = aloha_lib.DVariable('OM%s' % self.particle)
self.representation = aloha_lib.LorentzObjectRepresentation(
mass, self.lorentz_ind, self.spin_ind)
class OverMass2(aloha_lib.FactoryLorentz):
object_class = L_OverMass2
@classmethod
def get_unique_name(self, particle):
return '_OM2_%s' % particle
#===============================================================================
# Width
#===============================================================================
class L_Width(aloha_lib.LorentzObject):
""" Helas Object for an Impulsion """
def __init__(self, name, particle):
self.particle = particle
aloha_lib.LorentzObject.__init__(self, name, [], [])
def create_representation(self):
width = aloha_lib.DVariable('W%s' % self.particle)
self.representation= aloha_lib.LorentzObjectRepresentation(
width, self.lorentz_ind, self.spin_ind)
class Width(aloha_lib.FactoryLorentz):
object_class = L_Width
@classmethod
def get_unique_name(self, particle):
return '_W%s' % particle
#===============================================================================
# Param
#===============================================================================
class L_Param(aloha_lib.LorentzObject):
""" Object for a Model Parameter """
def __init__(self, Lname, name):
self.varname = name
aloha_lib.LorentzObject.__init__(self, name, [], [])
def create_representation(self):
param = aloha_lib.Variable( self.varname, aloha_lib.ExtVariable)
self.representation= aloha_lib.LorentzObjectRepresentation(
param, [], [])
class Param(aloha_lib.FactoryLorentz):
object_class = L_Param
@classmethod
def get_unique_name(self, name):
if name == 'Pi':
KERNEL.has_pi = True
return 'Param_%s' % name
#===============================================================================
# Scalar
#===============================================================================
class L_Scalar(aloha_lib.LorentzObject):
""" Helas Object for a Spinor"""
def __init__(self, name, particle):
self.particle = particle
aloha_lib.LorentzObject.__init__(self, name, [], [])
def create_representation(self):
rep = aloha_lib.Variable('S%s_1' % self.particle)
self.representation= aloha_lib.LorentzObjectRepresentation(
rep, [], [])
class Scalar(aloha_lib.FactoryLorentz):
object_class = L_Scalar
@classmethod
def get_unique_name(self,particle):
return '_S%s' % particle
#===============================================================================
# Spinor
#===============================================================================
class L_Spinor(aloha_lib.LorentzObject):
""" Helas Object for a Spinor"""
contract_first = 1
def __init__(self, name, spin1, particle, prefactor=1):
self.particle = particle
aloha_lib.LorentzObject.__init__(self, name,[], [spin1])
def create_representation(self):
self.sub0 = aloha_lib.Variable('F%s_1' % self.particle)
self.sub1 = aloha_lib.Variable('F%s_2' % self.particle)
self.sub2 = aloha_lib.Variable('F%s_3' % self.particle)
self.sub3 = aloha_lib.Variable('F%s_4' % self.particle)
self.representation= aloha_lib.LorentzObjectRepresentation(
{(0,): self.sub0, (1,): self.sub1, \
(2,): self.sub2, (3,): self.sub3},
[],self.spin_ind)
class Spinor(aloha_lib.FactoryLorentz):
""" Helas Object for a Spinor"""
object_class = L_Spinor
@classmethod
def get_unique_name(self,spin1, particle):
return '_F%s_%s' % (particle,spin1)
#===============================================================================
# Vector
#===============================================================================
class L_Vector(aloha_lib.LorentzObject):
""" Helas Object for a Vector"""
contract_first = 1
def __init__(self, name, lorentz, particle):
self.particle = particle
aloha_lib.LorentzObject.__init__(self, name, [lorentz], [])
def create_representation(self):
self.sub0 = aloha_lib.Variable('V%s_1' % self.particle)
self.sub1 = aloha_lib.Variable('V%s_2' % self.particle)
self.sub2 = aloha_lib.Variable('V%s_3' % self.particle)
self.sub3 = aloha_lib.Variable('V%s_4' % self.particle)
self.representation= aloha_lib.LorentzObjectRepresentation(
{(0,): self.sub0, (1,): self.sub1, \
(2,): self.sub2, (3,): self.sub3},
self.lorentz_ind, [])
class Vector(aloha_lib.FactoryLorentz):
object_class = L_Vector
@classmethod
def get_unique_name(self, lor, particle):
return '_V%s_%s' % (particle, lor)
#===============================================================================
# Spin3/2
#===============================================================================
class L_Spin3Half(aloha_lib.LorentzObject):
""" Helas Object for a Spin2"""
def __init__(self, name, lorentz, spin, particle):
self.particle = particle
aloha_lib.LorentzObject.__init__(self, name, [lorentz], [spin])
def create_representation(self):
self.sub00 = aloha_lib.Variable('R%s_1' % self.particle)
self.sub01 = aloha_lib.Variable('R%s_2' % self.particle)
self.sub02 = aloha_lib.Variable('R%s_3' % self.particle)
self.sub03 = aloha_lib.Variable('R%s_4' % self.particle)
self.sub10 = aloha_lib.Variable('R%s_5' % self.particle)
self.sub11 = aloha_lib.Variable('R%s_6' % self.particle)
self.sub12 = aloha_lib.Variable('R%s_7' % self.particle)
self.sub13 = aloha_lib.Variable('R%s_8' % self.particle)
self.sub20 = aloha_lib.Variable('R%s_9' % self.particle)
self.sub21 = aloha_lib.Variable('R%s_10' % self.particle)
self.sub22 = aloha_lib.Variable('R%s_11' % self.particle)
self.sub23 = aloha_lib.Variable('R%s_12' % self.particle)
self.sub30 = aloha_lib.Variable('R%s_13' % self.particle)
self.sub31 = aloha_lib.Variable('R%s_14' % self.particle)
self.sub32 = aloha_lib.Variable('R%s_15' % self.particle)
self.sub33 = aloha_lib.Variable('R%s_16' % self.particle)
rep = {(0,0): self.sub00, (0,1): self.sub01, (0,2): self.sub02, (0,3): self.sub03,
(1,0): self.sub10, (1,1): self.sub11, (1,2): self.sub12, (1,3): self.sub13,
(2,0): self.sub20, (2,1): self.sub21, (2,2): self.sub22, (2,3): self.sub23,
(3,0): self.sub30, (3,1): self.sub31, (3,2): self.sub32, (3,3): self.sub33}
self.representation= aloha_lib.LorentzObjectRepresentation( rep, \
self.lorentz_ind, self.spin_ind)
class Spin3Half(aloha_lib.FactoryLorentz):
object_class = L_Spin3Half
@classmethod
def get_unique_name(self, lor, spin, part):
return 'Spin3Half%s^%s_%s' % (part, lor, spin)
#===============================================================================
# Spin2
#===============================================================================
class L_Spin2(aloha_lib.LorentzObject):
""" Helas Object for a Spin2"""
def __init__(self, name, lorentz1, lorentz2, particle):
self.particle = particle
aloha_lib.LorentzObject.__init__(self, name, [lorentz1, lorentz2], [])
def create_representation(self):
self.sub00 = aloha_lib.Variable('T%s_1' % self.particle)
self.sub01 = aloha_lib.Variable('T%s_2' % self.particle)
self.sub02 = aloha_lib.Variable('T%s_3' % self.particle)
self.sub03 = aloha_lib.Variable('T%s_4' % self.particle)
self.sub10 = aloha_lib.Variable('T%s_5' % self.particle)
self.sub11 = aloha_lib.Variable('T%s_6' % self.particle)
self.sub12 = aloha_lib.Variable('T%s_7' % self.particle)
self.sub13 = aloha_lib.Variable('T%s_8' % self.particle)
self.sub20 = aloha_lib.Variable('T%s_9' % self.particle)
self.sub21 = aloha_lib.Variable('T%s_10' % self.particle)
self.sub22 = aloha_lib.Variable('T%s_11' % self.particle)
self.sub23 = aloha_lib.Variable('T%s_12' % self.particle)
self.sub30 = aloha_lib.Variable('T%s_13' % self.particle)
self.sub31 = aloha_lib.Variable('T%s_14' % self.particle)
self.sub32 = aloha_lib.Variable('T%s_15' % self.particle)
self.sub33 = aloha_lib.Variable('T%s_16' % self.particle)
rep = {(0,0): self.sub00, (0,1): self.sub01, (0,2): self.sub02, (0,3): self.sub03,
(1,0): self.sub10, (1,1): self.sub11, (1,2): self.sub12, (1,3): self.sub13,
(2,0): self.sub20, (2,1): self.sub21, (2,2): self.sub22, (2,3): self.sub23,
(3,0): self.sub30, (3,1): self.sub31, (3,2): self.sub32, (3,3): self.sub33}
self.representation= aloha_lib.LorentzObjectRepresentation( rep, \
self.lorentz_ind, [])
class Spin2(aloha_lib.FactoryLorentz):
object_class = L_Spin2
@classmethod
def get_unique_name(self, lor1, lor2, part):
return 'Spin2^%s_%s_%s' % (part, lor1, lor2)
#===============================================================================
# Gamma
#===============================================================================
class L_Gamma(aloha_lib.LorentzObject):
""" Gamma Matrices """
#gamma0 = [[0, 0, 1, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 1, 0, 0]]
#gamma1 = [[0, 0, 0, 1], [0, 0, 1, 0], [0, -1, 0, 0], [-1, 0, 0, 0]]
#gamma2 = [[0, 0, 0, -complex(0,1)],[0, 0, complex(0,1), 0],
# [0, complex(0,1), 0, 0], [-complex(0,1), 0, 0, 0]]
#gamma3 = [[0, 0, 1, 0], [0, 0, 0, -1], [-1, 0, 0, 0], [0, 1, 0, 0]]
#
#gamma = [gamma0, gamma1, gamma2, gamma3]
gamma = { #Gamma0
(0, 0, 0): 0, (0, 0, 1): 0, (0, 0, 2): 1, (0, 0, 3): 0,
(0, 1, 0): 0, (0, 1, 1): 0, (0, 1, 2): 0, (0, 1, 3): 1,
(0, 2, 0): 1, (0, 2, 1): 0, (0, 2, 2): 0, (0, 2, 3): 0,
(0, 3, 0): 0, (0, 3, 1): 1, (0, 3, 2): 0, (0, 3, 3): 0,
#Gamma1
(1, 0, 0): 0, (1, 0, 1): 0, (1, 0, 2): 0, (1, 0, 3): 1,
(1, 1, 0): 0, (1, 1, 1): 0, (1, 1, 2): 1, (1, 1, 3): 0,
(1, 2, 0): 0, (1, 2, 1): -1, (1, 2, 2): 0, (1, 2, 3): 0,
(1, 3, 0): -1, (1, 3, 1): 0, (1, 3, 2): 0, (1, 3, 3): 0,
#Gamma2
(2, 0, 0): 0, (2, 0, 1): 0, (2, 0, 2): 0, (2, 0, 3): -1j,
(2, 1, 0): 0, (2, 1, 1): 0, (2, 1, 2): 1j, (2, 1, 3): 0,
(2, 2, 0): 0, (2, 2, 1): 1j, (2, 2, 2): 0, (2, 2, 3): 0,
(2, 3, 0): -1j, (2, 3, 1): 0, (2, 3, 2): 0, (2, 3, 3): 0,
#Gamma3
(3, 0, 0): 0, (3, 0, 1): 0, (3, 0, 2): 1, (3, 0, 3): 0,
(3, 1, 0): 0, (3, 1, 1): 0, (3, 1, 2): 0, (3, 1, 3): -1,
(3, 2, 0): -1, (3, 2, 1): 0, (3, 2, 2): 0, (3, 2, 3): 0,
(3, 3, 0): 0, (3, 3, 1): 1, (3, 3, 2): 0, (3, 3, 3): 0
}
def __init__(self, name, lorentz, spin1, spin2):
aloha_lib.LorentzObject.__init__(self,name,[lorentz], [spin1, spin2])
def create_representation(self):
self.representation = aloha_lib.LorentzObjectRepresentation(self.gamma,
self.lorentz_ind,self.spin_ind)
class Gamma(aloha_lib.FactoryLorentz):
object_class = L_Gamma
@classmethod
def get_unique_name(self, lor, spin1, spin2):
return 'Gamma^%s_%s_%s' % (lor, spin1, spin2)
#===============================================================================
# Sigma
#===============================================================================
class L_Sigma(aloha_lib.LorentzObject):
""" Sigma Matrices """
#zero = [[0,0,0,0]]*4
#i = complex(0,1)
#sigma01 = [[ 0, -i, 0, 0], [-i, 0, 0, 0], [0, 0, 0, i], [0, 0, i, 0]]
#sigma02 = [[ 0, -1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1], [0, 0, -1, 0]]
#sigma03 = [[-i, 0, 0, 0], [0, i, 0, 0], [0, 0, i, 0], [0, 0, 0, -i]]
#sigma12 = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, 1, 0], [0, 0, 0, -1]]
#sigma13 = [[0, i, 0, 0], [-i, 0, 0, 0], [0, 0, 0, i], [0, 0, -i, 0]]
#sigma23 = [[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]]
#def inv(matrice):
# out=[]
# for i in range(4):
# out2=[]
# out.append(out2)
# for j in range(4):
# out2.append(-1*matrice[i][j])
# return out
#
#sigma =[[zero, sigma01, sigma02, sigma03], \
# [inv(sigma01), zero, sigma12, sigma13],\
# [inv(sigma02), inv(sigma12), zero, sigma23],\
# [inv(sigma03), inv(sigma13), inv(sigma23), zero]]
sigma={(0, 2, 0, 1): -0.5, (3, 1, 2, 0): 0, (3, 2, 3, 1): 0, (1, 3, 1, 3): 0,
(2, 3, 3, 2): 0.5, (2, 1, 3, 1): 0, (0, 2, 2, 1): 0, (3, 1, 0, 0): 0,
(2, 3, 3, 1): 0, (3, 3, 1, 2): 0, (3, 1, 0, 3): 0, (1, 1, 0, 3): 0,
(0, 1, 2, 2): 0, (3, 2, 3, 2): -0.5, (2, 1, 0, 1): 0, (3, 3, 3, 3): 0,
(1, 1, 2, 2): 0, (2, 2, 3, 2): 0, (2, 1, 2, 1): 0, (0, 1, 0, 3): 0,
(2, 1, 2, 2): -0.5, (1, 2, 2, 1): 0, (2, 2, 1, 3): 0, (0, 3, 1, 3): 0,
(3, 0, 3, 2): 0, (1, 2, 0, 1): 0, (3, 0, 3, 1): 0, (0, 0, 2, 2): 0,
(1, 2, 0, 2): 0, (2, 0, 0, 3): 0, (0, 0, 2, 1): 0, (0, 3, 3, 2): 0,
(3, 0, 1, 1): -0.5j, (3, 2, 0, 1): -0.5, (1, 0, 1, 0): 0.5j, (0, 0, 0, 1): 0,
(0, 2, 1, 1): 0, (3, 1, 3, 2): 0.5j, (3, 2, 2, 1): 0, (1, 3, 2, 3): 0.5j,
(1, 0, 3, 0): 0, (3, 2, 2, 2): 0, (0, 2, 3, 1): 0, (1, 0, 3, 3): 0,
(2, 3, 2, 1): 0, (0, 2, 3, 2): -0.5, (3, 1, 1, 3): 0, (1, 1, 1, 3): 0,
(1, 3, 0, 2): 0, (2, 3, 0, 1): 0.5, (1, 1, 1, 0): 0, (2, 3, 0, 2): 0,
(3, 3, 0, 3): 0, (1, 1, 3, 0): 0, (0, 1, 3, 3): 0, (2, 2, 0, 1): 0,
(2, 1, 1, 0): 0, (3, 3, 2, 2): 0, (2, 3, 1, 0): 0.5, (2, 2, 2, 3): 0,
(0, 3, 0, 3): 0, (0, 1, 1, 2): 0, (0, 3, 0, 0): -0.5j, (2, 3, 1, 1): 0,
(1, 2, 3, 0): 0, (2, 0, 1, 3): 0, (0, 0, 3, 1): 0, (0, 3, 2, 0): 0,
(2, 3, 1, 2): 0, (2, 0, 1, 0): -0.5, (1, 2, 1, 0): 0, (3, 0, 0, 2): 0,
(1, 0, 0, 2): 0, (0, 0, 1, 1): 0, (1, 2, 1, 3): 0, (2, 3, 1, 3): 0,
(2, 0, 3, 0): 0, (0, 0, 1, 2): 0, (1, 3, 3, 3): 0, (3, 2, 1, 0): -0.5,
(1, 3, 3, 0): 0, (1, 0, 2, 3): -0.5j, (0, 2, 0, 0): 0, (3, 1, 2, 3): -0.5j,
(3, 2, 3, 0): 0, (1, 3, 1, 0): -0.5j, (3, 2, 3, 3): 0, (0, 2, 2, 0): 0,
(2, 3, 3, 0): 0, (3, 3, 1, 3): 0, (0, 2, 2, 3): 0.5, (3, 1, 0, 2): 0,
(1, 1, 0, 2): 0, (3, 3, 1, 0): 0, (0, 1, 2, 3): 0.5j, (1, 1, 0, 1): 0,
(2, 1, 0, 2): 0, (0, 1, 2, 0): 0, (3, 3, 3, 0): 0, (1, 1, 2, 1): 0,
(2, 2, 3, 3): 0, (0, 1, 0, 0): 0, (2, 2, 3, 0): 0, (2, 1, 2, 3): 0,
(1, 2, 2, 2): 0.5, (2, 2, 1, 0): 0, (0, 3, 1, 2): 0, (0, 3, 1, 1): 0.5j,
(3, 0, 3, 0): 0, (1, 2, 0, 3): 0, (2, 0, 0, 2): 0, (0, 0, 2, 0): 0,
(0, 3, 3, 1): 0, (3, 0, 1, 0): 0, (2, 0, 0, 1): 0.5, (3, 2, 0, 2): 0,
(3, 0, 1, 3): 0, (1, 0, 1, 3): 0, (0, 0, 0, 0): 0, (0, 2, 1, 2): 0,
(3, 1, 3, 3): 0, (0, 0, 0, 3): 0, (1, 3, 2, 2): 0, (3, 1, 3, 0): 0,
(3, 2, 2, 3): -0.5, (1, 3, 2, 1): 0, (1, 0, 3, 2): -0.5j, (2, 3, 2, 2): 0,
(0, 2, 3, 3): 0, (3, 1, 1, 0): 0.5j, (1, 3, 0, 1): 0.5j, (1, 1, 1, 1): 0,
(2, 1, 3, 2): 0, (2, 3, 0, 3): 0, (3, 3, 0, 2): 0, (1, 1, 3, 1): 0,
(3, 3, 0, 1): 0, (2, 1, 3, 3): 0.5, (0, 1, 3, 2): 0.5j, (1, 1, 3, 2): 0,
(2, 1, 1, 3): 0, (3, 0, 2, 1): 0, (0, 1, 3, 1): 0, (3, 3, 2, 1): 0,
(2, 2, 2, 2): 0, (0, 1, 1, 1): 0, (2, 2, 2, 1): 0, (0, 3, 0, 1): 0,
(3, 0, 2, 2): -0.5j, (1, 2, 3, 3): -0.5, (0, 0, 3, 2): 0, (0, 3, 2, 1): 0,
(2, 0, 1, 1): 0, (2, 2, 0, 0): 0, (0, 3, 2, 2): 0.5j, (3, 0, 0, 3): 0,
(1, 0, 0, 3): 0, (1, 2, 1, 2): 0, (2, 0, 3, 1): 0, (1, 0, 0, 0): 0,
(0, 0, 1, 3): 0, (2, 0, 3, 2): 0.5, (3, 2, 1, 3): 0, (1, 3, 3, 1): 0,
(1, 0, 2, 0): 0, (2, 2, 0, 2): 0, (0, 2, 0, 3): 0, (3, 1, 2, 2): 0,
(1, 3, 1, 1): 0, (3, 1, 2, 1): 0, (2, 2, 0, 3): 0, (3, 0, 0, 1): 0,
(1, 3, 1, 2): 0, (2, 3, 3, 3): 0, (0, 2, 2, 2): 0, (3, 1, 0, 1): -0.5j,
(3, 3, 1, 1): 0, (1, 1, 0, 0): 0, (2, 1, 0, 3): 0, (0, 1, 2, 1): 0,
(3, 3, 3, 1): 0, (2, 1, 0, 0): -0.5, (1, 1, 2, 0): 0, (3, 3, 3, 2): 0,
(0, 1, 0, 1): -0.5j, (1, 1, 2, 3): 0, (2, 2, 3, 1): 0, (2, 1, 2, 0): 0,
(0, 1, 0, 2): 0, (1, 2, 2, 3): 0, (2, 0, 2, 1): 0, (2, 2, 1, 1): 0,
(1, 2, 2, 0): 0, (2, 2, 1, 2): 0, (0, 3, 1, 0): 0, (3, 0, 3, 3): 0.5j,
(2, 1, 3, 0): 0, (1, 2, 0, 0): 0.5, (0, 0, 2, 3): 0, (0, 3, 3, 0): 0,
(2, 0, 0, 0): 0, (3, 2, 0, 3): 0, (0, 3, 3, 3): -0.5j, (3, 0, 1, 2): 0,
(1, 0, 1, 2): 0, (3, 2, 0, 0): 0, (0, 2, 1, 3): 0, (1, 0, 1, 1): 0,
(0, 0, 0, 2): 0, (0, 2, 1, 0): 0.5, (3, 1, 3, 1): 0, (3, 2, 2, 0): 0,
(1, 3, 2, 0): 0, (1, 0, 3, 1): 0, (2, 3, 2, 3): 0.5, (0, 2, 3, 0): 0,
(3, 1, 1, 1): 0, (2, 3, 2, 0): 0, (1, 3, 0, 0): 0, (3, 1, 1, 2): 0,
(1, 1, 1, 2): 0, (1, 3, 0, 3): 0, (2, 3, 0, 0): 0, (2, 0, 2, 0): 0,
(3, 3, 0, 0): 0, (1, 1, 3, 3): 0, (2, 1, 1, 2): 0, (0, 1, 3, 0): 0,
(3, 3, 2, 0): 0, (2, 1, 1, 1): 0.5, (2, 0, 2, 2): 0, (3, 3, 2, 3): 0,
(0, 1, 1, 0): -0.5j, (2, 2, 2, 0): 0, (0, 3, 0, 2): 0, (3, 0, 2, 3): 0,
(0, 1, 1, 3): 0, (2, 0, 2, 3): -0.5, (1, 2, 3, 2): 0, (3, 0, 2, 0): 0,
(0, 0, 3, 3): 0, (1, 2, 3, 1): 0, (2, 0, 1, 2): 0, (0, 0, 3, 0): 0,
(0, 3, 2, 3): 0, (3, 0, 0, 0): 0.5j, (1, 2, 1, 1): -0.5, (1, 0, 0, 1): 0.5j,
(0, 0, 1, 0): 0, (2, 0, 3, 3): 0, (3, 2, 1, 2): 0, (1, 3, 3, 2): -0.5j,
(1, 0, 2, 1): 0, (3, 2, 1, 1): 0, (0, 2, 0, 2): 0, (1, 0, 2, 2): 0}
def __init__(self, name, lorentz1, lorentz2, spin1, spin2):
aloha_lib.LorentzObject.__init__(self, name, [lorentz1, lorentz2], \
[spin1, spin2])
def create_representation(self):
self.representation = aloha_lib.LorentzObjectRepresentation(self.sigma,
self.lorentz_ind,self.spin_ind)
class Sigma(aloha_lib.FactoryLorentz):
object_class = L_Sigma
@classmethod
def get_unique_name(self, lorentz1, lorentz2, spin1, spin2):
return 'Sigma_[%s,%s]^[%s,%s]' % (spin1, spin2, lorentz1, lorentz2)
#===============================================================================
# Gamma5
#===============================================================================
class L_Gamma5(aloha_lib.LorentzObject):
#gamma5 = [[-1, 0, 0, 0, 0], [0, -1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
gamma5 = {(0,0): -1, (0,1): 0, (0,2): 0, (0,3): 0,\
(1,0): 0, (1,1): -1, (1,2): 0, (1,3): 0,\
(2,0): 0, (2,1): 0, (2,2): 1, (2,3): 0,\
(3,0): 0, (3,1): 0, (3,2): 0, (3,3): 1}
def __init__(self, name, spin1, spin2):
aloha_lib.LorentzObject.__init__(self, name, [], [spin1, spin2])
def create_representation(self):
self.representation = aloha_lib.LorentzObjectRepresentation(self.gamma5,
self.lorentz_ind,self.spin_ind)
class Gamma5(aloha_lib.FactoryLorentz):
object_class = L_Gamma5
@classmethod
def get_unique_name(self, spin1, spin2):
return 'Gamma5_%s_%s' % (spin1, spin2)
#===============================================================================
# Conjugate Matrices
#===============================================================================
class L_C(aloha_lib.LorentzObject):
#[0, -1, 0, 0] [1,0,0,0] [0,0,0,1],[0,0,-1,0]
Cmetrix = {(0,0): 0, (0,1): -1, (0,2): 0, (0,3): 0,\
(1,0): 1, (1,1): 0, (1,2): 0, (1,3): 0,\
(2,0): 0, (2,1): 0, (2,2): 0, (2,3): 1,\
(3,0): 0, (3,1): 0, (3,2): -1, (3,3): 0}
def __init__(self, name, spin_list):
# spin_list is automatically ordered. The sign for the symmetrization
# is set in the Factory routine
aloha_lib.LorentzObject.__init__(self, name, [], spin_list)
def create_representation(self):
self.representation = aloha_lib.LorentzObjectRepresentation(self.Cmetrix,
self.lorentz_ind,self.spin_ind)
class C(aloha_lib.FactoryLorentz):
object_class = L_C
def __new__(cls, spin1, spin2):
spin_list = [spin1, spin2]
spin_list.sort()
sign = give_sign_perm(spin_list, [spin1, spin2])
name = cls.get_unique_name(spin_list)
if sign == 1:
return aloha_lib.FactoryVar.__new__(cls, name, cls.object_class, spin_list)
else:
out = aloha_lib.FactoryVar.__new__(cls, name, cls.object_class, spin_list)
out.prefactor = -1
return out
@classmethod
def get_unique_name(cls, spin_list):
return "C_%s_%s" % tuple(spin_list)
#===============================================================================
# EPSILON
#===============================================================================
#Helpfull function
def give_sign_perm(perm0, perm1):
"""Check if 2 permutations are of equal parity.
Assume that both permutation lists are of equal length
and have the same elements. No need to check for these
conditions.
"""
assert len(perm0) == len(perm1)
perm1 = list(perm1) ## copy this into a list so we don't mutate the original
perm1_map = dict((v, i) for i,v in enumerate(perm1))
transCount = 0
for loc, p0 in enumerate(perm0):
p1 = perm1[loc]
if p0 != p1:
sloc = perm1_map[p0] # Find position in perm1
perm1[loc], perm1[sloc] = p0, p1 # Swap in perm1
perm1_map[p0], perm1_map[p1] = loc, sloc # Swap the map
transCount += 1
# Even number of transposition means equal parity
return -2 * (transCount % 2) + 1
# Practical definition of Epsilon
class L_Epsilon(aloha_lib.LorentzObject):
""" The fully anti-symmetric object in Lorentz-Space """
def give_parity(self, perm):
"""return the parity of the permutation"""
assert set(perm) == set([0,1,2,3])
i1 , i2, i3, i4 = perm
#formula found on wikipedia
return -self.sign * ((i2-i1) * (i3-i1) *(i4-i1) * (i3-i2) * (i4-i2) *(i4-i3))/12
# DEFINE THE REPRESENTATION OF EPSILON
def __init__(self, name, lorentz1, lorentz2, lorentz3, lorentz4):
lorentz_list = [lorentz1 , lorentz2, lorentz3, lorentz4]
#order_lor = list(lorentz_list)
#order_lor.sort()
#self.sign = give_sign_perm(order_lor, lorentz_list)
self.sign=1
aloha_lib.LorentzObject.__init__(self, name, lorentz_list, [])
def create_representation(self):
if not hasattr(self, 'epsilon'):
# init all element to zero
epsilon = dict( ((l1, l2, l3, l4), 0)
for l1 in range(4) \
for l2 in range(4) \
for l3 in range(4) \
for l4 in range(4))
# update non trivial one
epsilon.update(dict(
((l1, l2, l3, l4), self.give_parity((l1,l2,l3,l4)))
for l1 in range(4) \
for l2 in range(4) if l2 != l1\
for l3 in range(4) if l3 not in [l1,l2]\
for l4 in range(4) if l4 not in [l1,l2,l3]))
L_Epsilon.epsilon = epsilon
self.representation = aloha_lib.LorentzObjectRepresentation(self.epsilon,
self.lorentz_ind,self.spin_ind)
class Epsilon(aloha_lib.FactoryLorentz):
object_class = L_Epsilon
@classmethod
def get_unique_name(cls,l1,l2,l3,l4):
return '_EPSILON_%s_%s_%s_%s' % (l1,l2,l3,l4)
#===============================================================================
# Metric
#===============================================================================
class L_Metric(aloha_lib.LorentzObject):
metric = {(0,0): 1, (0,1): 0, (0,2): 0, (0,3): 0,\
(1,0): 0, (1,1): -1, (1,2): 0, (1,3): 0,\
(2,0): 0, (2,1): 0, (2,2): -1, (2,3): 0,\
(3,0): 0, (3,1): 0, (3,2): 0, (3,3): -1}
#[[1, 0, 0,0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, -1]]
def __init__(self, name, lorentz1, lorentz2):
aloha_lib.LorentzObject.__init__(self,name,[lorentz1, lorentz2], [])
def create_representation(self):
self.representation = aloha_lib.LorentzObjectRepresentation(self.metric,
self.lorentz_ind,self.spin_ind)
class Metric(aloha_lib.FactoryLorentz):
object_class = L_Metric
@classmethod
def get_unique_name(cls,l1,l2):
if l1<l2:
return '_ETA_%s_%s' % (l1,l2)
else:
return '_ETA_%s_%s' % (l2,l1)
#===============================================================================
# Identity
#===============================================================================
class L_Identity(aloha_lib.LorentzObject):
#identity = [[1, 0, 0,0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
identity = {(0,0): 1, (0,1): 0, (0,2): 0, (0,3): 0,\
(1,0): 0, (1,1): 1, (1,2): 0, (1,3): 0,\
(2,0): 0, (2,1): 0, (2,2): 1, (2,3): 0,\
(3,0): 0, (3,1): 0, (3,2): 0, (3,3): 1}
def __init__(self, name, spin1, spin2):
aloha_lib.LorentzObject.__init__(self, name, [],[spin1, spin2])
def create_representation(self):
self.representation = aloha_lib.LorentzObjectRepresentation(self.identity,
self.lorentz_ind,self.spin_ind)
class Identity(aloha_lib.FactoryLorentz):
object_class = L_Identity
@classmethod
def get_unique_name(self, spin1, spin2):
return 'Id_%s_%s' % (spin1, spin2)
#===============================================================================
# IdentityL
#===============================================================================
class L_IdentityL(aloha_lib.LorentzObject):
#identity = [[1, 0, 0,0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
identity = {(0,0): 1, (0,1): 0, (0,2): 0, (0,3): 0,\
(1,0): 0, (1,1): 1, (1,2): 0, (1,3): 0,\
(2,0): 0, (2,1): 0, (2,2): 1, (2,3): 0,\
(3,0): 0, (3,1): 0, (3,2): 0, (3,3): 1}
def __init__(self, name, l1, l2):
aloha_lib.LorentzObject.__init__(self, name, [l1,l2], [])
def create_representation(self):
self.representation = aloha_lib.LorentzObjectRepresentation(self.identity,
self.lorentz_ind,self.spin_ind)
class IdentityL(aloha_lib.FactoryLorentz):
object_class = L_Identity
@classmethod
def get_unique_name(self, l1, l2):
return 'IdL_%s_%s' % (l1, l2)
#===============================================================================
# ProjM
#===============================================================================
class L_ProjM(aloha_lib.LorentzObject):
""" A object for (1-gamma5)/2 """
#projm = [[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]
projm= {(0,0): 1, (0,1): 0, (0,2): 0, (0,3): 0,\
(1,0): 0, (1,1): 1, (1,2): 0, (1,3): 0,\
(2,0): 0, (2,1): 0, (2,2): 0, (2,3): 0,\
(3,0): 0, (3,1): 0, (3,2): 0, (3,3): 0}
def __init__(self,name, spin1, spin2):
"""Initialize the object"""
aloha_lib.LorentzObject.__init__(self, name, [], [spin1, spin2])
def create_representation(self):
self.representation = aloha_lib.LorentzObjectRepresentation(self.projm,
self.lorentz_ind,self.spin_ind)
class ProjM(aloha_lib.FactoryLorentz):
object_class = L_ProjM
@classmethod
def get_unique_name(self, spin1, spin2):
return 'PROJM_%s_%s' % (spin1, spin2)
#===============================================================================
# ProjP
#===============================================================================
class L_ProjP(aloha_lib.LorentzObject):
"""A object for (1+gamma5)/2 """
#projp = [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]
projp = {(0,0): 0, (0,1): 0, (0,2): 0, (0,3): 0,\
(1,0): 0, (1,1): 0, (1,2): 0, (1,3): 0,\
(2,0): 0, (2,1): 0, (2,2): 1, (2,3): 0,\
(3,0): 0, (3,1): 0, (3,2): 0, (3,3): 1}
def __init__(self,name, spin1, spin2):
"""Initialize the object"""
aloha_lib.LorentzObject.__init__(self, name, [], [spin1, spin2])
def create_representation(self):
self.representation = aloha_lib.LorentzObjectRepresentation(self.projp,
self.lorentz_ind, self.spin_ind)
class ProjP(aloha_lib.FactoryLorentz):
object_class = L_ProjP
@classmethod
def get_unique_name(self, spin1, spin2):
return 'PROJP_%s_%s' % (spin1, spin2)
#===============================================================================
# Denominator Propagator
#===============================================================================
class DenominatorPropagator(aloha_lib.LorentzObject):
"""The Denominator of the Propagator"""
def __new__(cls, particle):
name = 'DenomP%s' % particle
return aloha_lib.Variable.__new__(cls, name)
def __init__(self, particle):
if self:
return
self.particle = particle
aloha_lib.LorentzObject.__init__(self, [], [])
def get_unique_name(self,*args):
return 'DenomP%s' % self.particle
def simplify(self):
"""Return the Denominator in a abstract way"""
mass = Mass(self.particle)
width = Width(self.particle)
denominator = P('i1', self.particle) * P('i1', self.particle) - \
mass * mass + complex(0,1) * mass* width
return denominator
def create_representation(self):
"""Create the representation for the Vector propagator"""
object = self.simplify()
self.representation = object.expand()
#===============================================================================
# Numerator Propagator
#===============================================================================
SpinorPropagatorout = lambda spin1, spin2, particle: -1 * (Gamma('mu', spin1, spin2) * \
P('mu', particle) - Mass(particle) * Identity(spin1, spin2))
SpinorPropagatorin = lambda spin1, spin2, particle: (Gamma('mu', spin1, spin2) * \
P('mu', particle) + Mass(particle) * Identity(spin1, spin2))
VectorPropagator = lambda l1, l2, part: complex(0,1)*(-1 * Metric(l1, l2) + OverMass2(part) * \
Metric(l1,'I3')* P('I3', part) * P(l2, part))
VectorPropagatorMassless= lambda l1, l2, part: complex(0,-1) * Metric(l1, l2)
Spin3halfPropagatorin = lambda mu, nu, s1, s2, part: (\
-1/3 * (Gamma(mu,s1,-2) + Identity(s1, -2) * P(mu, part) * Mass(part) * OverMass2(part))* \
(PSlash(-2,-3, part) - Identity(-2,-3) * Mass(part)) * \
( Gamma(nu, -3, s2)+ Mass(part) * OverMass2(part) * Identity(-3, s2) * P(nu, part) ) - \
(PSlash(s1,s2, part) + Mass(part) * Identity(s1,s2)) * (Metric(mu, nu) - OverMass2(part) * P(mu, part) * P(nu,part)))
Spin3halfPropagatorout = lambda mu, nu, s1, s2, part: ( \
-1/3 * (Gamma(mu,s1,-2) - Identity(s1, -2) * P(mu, part) * Mass(part) * OverMass2(part))* \
(-1*PSlash(-2,-3, part) - Identity(-2,-3) * Mass(part)) * \
( Gamma(nu, -3, s2)- Mass(part) * OverMass2(part) * Identity(-3, s2) * P(nu, part) ) - \
(-1*PSlash(s1,s2, part)
+ Mass(part) * Identity(s1,s2)) * (Metric(mu, nu) - OverMass2(part) * P(mu, part) * P(nu,part)))
Spin3halfPropagatorMasslessOut = lambda mu, nu, s1, s2, part: Gamma(nu, s1,-1) * PSlash(-1,-2, part) * Gamma(mu,-2, s2)
Spin3halfPropagatorMasslessIn = lambda mu, nu, s1, s2, part: -1 * Gamma(mu, s1,-1) * PSlash(-1,-2, part) * Gamma(nu,-2, s2)
Spin2masslessPropagator = lambda mu, nu, alpha, beta: 1/2 *( Metric(mu, alpha)* Metric(nu, beta) +\
Metric(mu, beta) * Metric(nu, alpha) - Metric(mu, nu) * Metric(alpha, beta))
Spin2Propagator = lambda mu, nu, alpha, beta, part: Spin2masslessPropagator(mu, nu, alpha, beta) + \
- 1/2 * OverMass2(part) * (Metric(mu,alpha)* P(nu, part) * P(beta, part) + \
Metric(nu, beta) * P(mu, part) * P(alpha, part) + \
Metric(mu, beta) * P(nu, part) * P(alpha, part) + \
Metric(nu, alpha) * P(mu, part) * P(beta , part) )+ \
1/6 * (Metric(mu,nu) + 2 * OverMass2(part) * P(mu, part) * P(nu, part)) * \
(Metric(alpha,beta) + 2 * OverMass2(part) * P(alpha, part) * P(beta, part))
| [
"[email protected]"
] | |
77b74a14ad5a1874eb757c258db26fc759163437 | 43e900f11e2b230cdc0b2e48007d40294fefd87a | /laioffer/remove-certain-characters.py | ba414a70cff9fca8d6bb41e33f8626f682e9c25a | [] | no_license | DarkAlexWang/leetcode | 02f2ed993688c34d3ce8f95d81b3e36a53ca002f | 89142297559af20cf990a8e40975811b4be36955 | refs/heads/master | 2023-01-07T13:01:19.598427 | 2022-12-28T19:00:19 | 2022-12-28T19:00:19 | 232,729,581 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | class Solution:
def remove(self, string, t):
array = list(string)
uniq_t = set(t)
slow = 0
fast = 0
for fast in range(0, len(array)):
if array[fast] not in uniq_t:
array[slow] = array[fast]
slow += 1
res = ""
for i in range(slow):
res += array[i]
return res
if __name__ == "__main__":
solution = Solution()
res = solution.remove("aaabbbccc", "a")
print(res)
| [
"[email protected]"
] | |
87b5b1013121a670da7f12288049e9aa81b73e98 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-9528.py | 9b6ba89efb100cc35e49f496f579cb54f9ede179 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,756 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
$ID:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
e856c3512502cc8ddd31849054c4633d661bca3c | 9d6271fd3851acb797a5120e0d884130f7548833 | /kmeans.py | 4950fd689a0074d89dbcfb3e82ec63e3d12597e9 | [] | no_license | Wenbin94/toolbox | f5d69e1b3a158ad076562829e2d83738e282da04 | e88e1ba51e5a4c963626000b434072b6aa64e09d | refs/heads/master | 2020-08-22T02:50:57.779313 | 2019-10-08T10:57:52 | 2019-10-08T10:57:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,233 | py | '''
2019.10.8 ming71
功能: 对box进行kmeans聚类
注意:
- 停止条件是最小值索引不变而不是最小值不变,会造成早停,可以改
- 暂时仅支持voc标注
- 如需改动再重写get_all_boxes函数即可
'''
import numpy as np
import glob
import os
from decimal import Decimal
class Kmeans:
def __init__(self, cluster_number, all_boxes, save_path):
self.cluster_number = cluster_number
self.all_boxes = all_boxes
self.save_path = save_path
# 输入两个二维数组:所有box和种子点box
# 输出[num_boxes, k]的结果
def iou(self, boxes, clusters): # 1 box -> k clusters
n = boxes.shape[0]
k = self.cluster_number #类别
box_area = boxes[:, 0] * boxes[:, 1] #列表切片操作:取所有行0列和1列相乘 ,得到gt的面积的行向量
box_area = box_area.repeat(k) #行向量进行重复
box_area = np.reshape(box_area, (n, k))
cluster_area = clusters[:, 0] * clusters[:, 1] #种子点的面积行向量
cluster_area = np.tile(cluster_area, [1, n])
cluster_area = np.reshape(cluster_area, (n, k))
box_w_matrix = np.reshape(boxes[:, 0].repeat(k), (n, k))
cluster_w_matrix = np.reshape(np.tile(clusters[:, 0], (1, n)), (n, k))
min_w_matrix = np.minimum(cluster_w_matrix, box_w_matrix)
box_h_matrix = np.reshape(boxes[:, 1].repeat(k), (n, k))
cluster_h_matrix = np.reshape(np.tile(clusters[:, 1], (1, n)), (n, k))
min_h_matrix = np.minimum(cluster_h_matrix, box_h_matrix)
inter_area = np.multiply(min_w_matrix, min_h_matrix)
result = inter_area / (box_area + cluster_area - inter_area + 1e-16)
assert (result>0).all() == True , 'negtive anchors present , cluster again!'
return result
def avg_iou(self, boxes, clusters):
accuracy = np.mean([np.max(self.iou(boxes, clusters), axis=1)])
return accuracy
#注意:这里代码选择的停止聚类的条件是最小值的索引不变,而不是种子点的数值不变。这样的误差会大一点。
def kmeans(self, boxes, k, dist=np.median):
box_number = boxes.shape[0] # box个数
distances = np.empty((box_number, k)) # 初始化[box_number , k]二维数组,存放自定义iou距离(obj*anchor)
last_nearest = np.zeros((box_number,)) # [box_number , ]的标量
np.random.seed()
clusters = boxes[np.random.choice(
box_number, k, replace=False)] # 种子点随机初始化
# 种子点一旦重复会有计算错误,避免!
while True :
uniques_clusters = np.unique(clusters,axis=0)
if len(uniques_clusters)==len(clusters) :
break
clusters = boxes[np.random.choice(box_number, k, replace=False)]
# k-means
while True:
# 每轮循环,计算种子点外所有点各自到k个种子点的自定义距离,并且按照距离各个点找离自己最近的种子点进行归类;计算新的各类中心;然后下一轮循环
distances = 1 - self.iou(boxes, clusters) # iou越大,距离越小
current_nearest = np.argmin(distances, axis=1) # 展开为box_number长度向量,代表每个box当前属于哪个种子点类别(0,k-1)
if (last_nearest == current_nearest).all(): # 每个box的当前类别所属和上一次相同,不再移动聚类
break
#计算新的k个种子点坐标
for cluster in range(k):
clusters[cluster] = dist(boxes[current_nearest == cluster], axis=0) # 只对还需要聚类的种子点进行位移
last_nearest = current_nearest
return clusters
def result2txt(self, data):
f = open(self.save_path, 'w')
row = np.shape(data)[0]
for i in range(row):
if i == 0:
x_y = "%d,%d" % (data[i][0], data[i][1])
else:
x_y = ", %d,%d" % (data[i][0], data[i][1])
f.write(x_y)
f.close() #最终输出的是w1,h1,w2,h2,w3,h3,...
def clusters(self):
all_boxes = np.array(self.all_boxes) #返回全部gt的宽高二维数组
result = self.kmeans(all_boxes, k=self.cluster_number) #传入两个聚类参数:所有gt宽高的二维数组和种子点数,并返回聚类结果k*2
result = result[np.lexsort(result.T[0, None])] #将得到的三个anchor按照宽进行从小到大,重新排序
self.result2txt(result)
print("K anchors:\n {}".format(result))
print("Accuracy: {:.2f}%".format(
self.avg_iou(all_boxes, result) * 100))
# 返回所有label的box,形式为[[w1,h1],[w2,h2],...]
def get_all_boxes(path):
mode = 'voc'
boxes = []
labels = sorted(glob.glob(os.path.join(path, '*.*')))
for label in labels:
with open(label,'r') as f:
contents = f.read()
objects = contents.split('<object>')
objects.pop(0)
assert len(objects) > 0, 'No object found in ' + xml_path
for object in objects:
xmin = int(object[object.find('<xmin>')+6 : object.find('</xmin>')])
xmax = int(object[object.find('<xmax>')+6 : object.find('</xmax>')])
ymin = int(object[object.find('<ymin>')+6 : object.find('</ymin>')])
ymax = int(object[object.find('<ymax>')+6 : object.find('</ymax>')])
box_w = xmax - xmin
box_h = ymax - ymin
boxes.append((box_w,box_h))
return boxes
if __name__ == "__main__":
cluster_number = 9 # 种子点个数,即anchor数目
label_path = r'/py/datasets/ship/tiny_ships/yolo_ship/train_labels'
save_path = r'/py/yolov3/cfg/anchor-cluster.txt'
all_boxes = get_all_boxes(label_path)
kmeans = Kmeans(cluster_number, all_boxes,save_path)
kmeans.clusters()
| [
"[email protected]"
] | |
65b8808ec3e1a0d27451e396ee0d6a134cdabb91 | a98cab2f9c24a85a5f46b2cbec7506b79f4ea634 | /app/src/models/sentence_model.py | 1588e64f2771d2064366357aaa9e173d0246e6a2 | [] | no_license | DIS-SIN/ODSC-2019 | b8b8d10b41d95925219a0be36b5ef8b541396681 | c2a606471452e358f0e245841e78f562c570bbf5 | refs/heads/master | 2020-05-17T18:27:08.591454 | 2019-04-30T03:02:35 | 2019-04-30T03:02:35 | 183,884,786 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | from neomodel import (
StructuredNode,
StringProperty,
DateTimeProperty,
UniqueIdProperty
)
from datetime import datetime
class Sentence(StructuredNode):
nodeId = UniqueIdProperty()
sentence = StringProperty(required=True)
addedOn = DateTimeProperty(default_now=True)
updatedOn = DateTimeProperty()
sentimentScore = StringProperty()
magnitudeScore = StringProperty()
def pre_save(self):
self.updatedOn = datetime.utcnow()
| [
"[email protected]"
] | |
184895f8be106c50e7efd39383cec128bad28d48 | 8780bc7f252f14ff5406ce965733c099034920b7 | /pyCode/pagesize/pagesize/wsgi.py | dfa911c708e95d9a1ec3be3f2f82bcdcfb628314 | [] | no_license | 13661892653/workspace | 5e4e458d31b9355c67d67ba7d9faccbcc1ac9f6b | 17960becabb3b4f0fc30009c71a11c4f7a5f8330 | refs/heads/master | 2020-12-24T20:00:15.541432 | 2018-08-14T13:56:15 | 2018-08-14T13:56:15 | 86,225,975 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | """
WSGI config for pagesize project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pagesize.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
72400fc6b6ffce55fbd9162fc62cecddf26120d2 | d8169f7c2efdeb40fe9dcdd59ce040138804d2af | /2nd/mysite/settings.py | b80fdf7c56fd69ac13acd7925dd80038a10abed8 | [] | no_license | KimDoKy/pyDjango | d9ab67b6da6541ebd04658945922d9924a85b107 | 53ef776dd20488f0dfda6b7e3fd5281e8f3e98fd | refs/heads/master | 2020-12-30T13:08:15.951633 | 2017-10-04T10:01:15 | 2017-10-04T10:01:15 | 91,325,691 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,604 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*kr)qf=3+unt7*9chabk@bc#(esu0cs8_o)nqgg8!e%crpv@5+'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bookmark.apps.BookmarkConfig',
'blog.apps.BlogConfig',
'tagging.apps.TaggingConfig',
'disqus',
'django.contrib.sites',
'photo.apps.PhotoConfig',
]
DISQUS_WEBSITE_SHORTNAME = 'dokys-blog'
SITE_ID = 1
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
# TIME_ZONE = 'UTC'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
# Media
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
#LOGIN_URL = '/accounts/login/'
#LOGOUT_URL = '/accounts/logout/'
LOGIN_REDIRECT_URL = '/' | [
"[email protected]"
] | |
52bc7f128792a60754a8768605b64ec973f3a0b1 | c61f41a8655b39098ffa257fb994979d17dfb10c | /cremilda/parser.py | f9fd5a4c5ff50d4043307e03b5cbf47de4a5c04b | [] | no_license | luizpaulosilva/compiladores-1 | 48f09085c0f61b2f1bea0507adde9a03473b2d23 | f553d9de0b6cd764d11bd533cec6bde9877d6587 | refs/heads/master | 2020-03-18T10:50:01.200756 | 2018-05-03T17:13:41 | 2018-05-03T17:13:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | import ox
from .lexer import tokens
from .ast import BinOp, FCall, Atom, Assign
def make_parser():
return ox.make_parser([
('module : statement SEMICOLON', lambda x, _: [x]),
('module : statement SEMICOLON module', statements),
('statement : NAME EQ expr', var_def),
('expr : atom OP expr', op_call),
('expr : atom', identity),
('atom : NUMBER', lambda x: Atom(float(x))),
('atom : STRING', lambda x: Atom(x[1:-1])),
('atom : BOOL', lambda x: Atom(x == 'true')),
('atom : LPAR expr RPAR', lambda x, y, z: y),
('atom : fcall', identity),
('fcall : NAME LPAR RPAR', lambda x, y, z: FCall(x, [])),
('fcall : NAME LPAR args RPAR', fcall),
('args : expr COMMA args', lambda x, _, xs: [x, *xs]),
('args : expr', lambda x: [x]),
], tokens=tokens)
# Funçoes auxiliares
identity = (lambda x: x)
op_call = (lambda x, op, y: BinOp(op, x, y))
fcall = (lambda x, y, z, w: FCall(x, z))
statements = (lambda x, _, xs: [x, *xs])
var_def = (lambda name, eq, expr: Assign(name, expr))
# Cria parser
parser = make_parser()
| [
"[email protected]"
] | |
51ed26d155d3ac70a5b01ef59f20d79a642bf07f | ef6229d281edecbea3faad37830cb1d452d03e5b | /ucsmsdk/mometa/adaptor/AdaptorIscsiAuth.py | 85653c123f5847d9bf6701d752efdd160c69cfe0 | [
"Apache-2.0"
] | permissive | anoop1984/python_sdk | 0809be78de32350acc40701d6207631322851010 | c4a226bad5e10ad233eda62bc8f6d66a5a82b651 | refs/heads/master | 2020-12-31T00:18:57.415950 | 2016-04-26T17:39:38 | 2016-04-26T17:39:38 | 57,148,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,402 | py | """This module contains the general information for AdaptorIscsiAuth ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class AdaptorIscsiAuthConsts():
pass
class AdaptorIscsiAuth(ManagedObject):
"""This is AdaptorIscsiAuth class."""
consts = AdaptorIscsiAuthConsts()
naming_props = set([])
mo_meta = MoMeta("AdaptorIscsiAuth", "adaptorIscsiAuth", "iscsi-auth", VersionMeta.Version201m, "InputOutput", 0x1f, [], ["read-only"], [], [], ["Get"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version201m, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"password": MoPropertyMeta("password", "password", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version201m, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"user_id": MoPropertyMeta("user_id", "userId", "string", VersionMeta.Version201m, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"password": "password",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"userId": "user_id",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.password = None
self.sacl = None
self.status = None
self.user_id = None
ManagedObject.__init__(self, "AdaptorIscsiAuth", parent_mo_or_dn, **kwargs)
| [
"[email protected]"
] | |
b07068e53d5ceac86d2431c09b775cdc9a8e872a | 159aed4755e47623d0aa7b652e178296be5c9604 | /data/scripts/templates/object/tangible/lair/base/shared_poi_all_lair_insecthill_small_fog_gray.py | 7b3c9de5103bfa24a894b44ac8e8c59d5f7349ac | [
"MIT"
] | permissive | anhstudios/swganh | fb67d42776864b1371e95f769f6864d0784061a3 | 41c519f6cdef5a1c68b369e760781652ece7fec9 | refs/heads/develop | 2020-12-24T16:15:31.813207 | 2016-03-08T03:54:32 | 2016-03-08T03:54:32 | 1,380,891 | 33 | 44 | null | 2016-03-08T03:54:32 | 2011-02-18T02:32:45 | Python | UTF-8 | Python | false | false | 469 | py | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/base/shared_poi_all_lair_insecthill_small_fog_gray.iff"
result.attribute_template_id = -1
result.stfName("lair_n","insecthill")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | [
"[email protected]"
] | |
123bd91eada7ece3d6f864c35413bb2c53b6a044 | 156d054848b211fd4ca75057b9b448c9260fdd7d | /python-data-analysis/python_data_analysis/ch06/ch06-6.py | 9c509a860d71e83883edf980f1ddaa56f8617c1d | [] | no_license | wwxFromTju/Python-datascience | adfc06030dc785901b5fd33824529f86fcf41c54 | 7c58526ef54a6f10cbe1d4c7e5e024ddc423908a | refs/heads/master | 2021-01-20T17:36:51.701638 | 2016-09-04T11:21:56 | 2016-09-04T11:21:56 | 58,730,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,564 | py | #!/usr/bin/env python
# encoding=utf-8
from urllib2 import urlopen
from lxml.html import parse
import pandas as pd
import numpy as np
from pandas.io.parsers import TextParser
from pandas import Series, DataFrame
# XML和HTML
# 通过指定kind来获得列名或数据
def _unpack(row, kind='td'):
elts = row.findall('.//%s' % kind)
return [val.text_content() for val in elts]
# 从一个table获得列名和数据
def parse_options_data(table):
rows = table.findall('.//tr')
header = _unpack(rows[0], kind='th')
data = [_unpack(r) for r in rows[1:]]
return TextParser(data, names=header).get_chunk()
# 使用urlopen打开网页,然后使用lxml解析得到数据流
parsed = parse(urlopen('http://finance.yahoo.com/q/op?s=APPL+Options'))
print parsed
doc = parsed.getroot()
print doc
# 使用XPath来访问各个标签
# 访问所有的URL链接
links = doc.findall('.//a')
# 为HTML元素的对象,要得到URL和链接文本,必须使用各对象的get(URL)和text_content(针对显示的文本)
print links[15:20]
lnk = links[28]
print lnk
print lnk.get('href')
print lnk.text_content()
# 使用list comprehension列表推导式来获得所有的URL
urls = [lnk.get('href') for lnk in doc.findall('.//a')]
print urls[-10:]
# tables = doc.findall('.//table')
# calls = tables[0]
# puts = tables[1]
# rows = calls.findall('.//tr')
# 标题行
# print _unpack(rows[0], kind='th')
# 数据
# print _unpack(rows[1], kind='td')
# call_data = parse_options_data(calls)
# put_data = parse_options_data(puts)
# print call_data[:10]
| [
"[email protected]"
] | |
0243fca320209c2522051b1b89d64c9a349e4937 | 7160f0637ba4fdd85feeb43aca2125c3479c474c | /config/spec.py | b1b021a751d98f14b75db187ffcdf0c648646468 | [
"MIT"
] | permissive | RENCI/pdspi-mapper-parallex-example | 86a39e513f1e07f73be1281c81b2b143ed7e5d80 | 1c99fa42b7b9bc2c09e9cad2f1c55ea10549814a | refs/heads/master | 2023-05-11T04:29:58.354329 | 2021-03-03T23:14:21 | 2021-03-03T23:14:21 | 260,721,734 | 0 | 2 | MIT | 2023-05-01T21:42:44 | 2020-05-02T15:54:12 | Python | UTF-8 | Python | false | false | 5,561 | py | from pdsphenotypemapping.clinical_feature import *
from tx.dateutils.utils import strtodate
from dateutil.relativedelta import relativedelta
requested_patient_variable_ids = get_patient_variable_ids(patientVariables)
timestamp_datetime = strtodate(timestamp)
for patient_id in patientIds:
patient_data = deref(data, patient_id)
patient = get_patient_patient(patient_data)
pid = patient["id"]
yield {
"patientId": pid
}
condition = get_condition_patient(fhir=patient_data)
observation = get_observation_patient(fhir=patient_data)
if "LOINC:2160-0" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:2160-0",
**serum_creatinine(observation, "mg/dL", timestamp_datetime)
}]
}
if "LOINC:82810-3" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:82810-3",
**pregnancy(condition, None, timestamp_datetime)
}]
}
if "HP:0001892" in requested_patient_variable_ids:
yield {
"values": [{
"id": "HP:0001892",
**bleeding(condition, None, timestamp_datetime)
}]
}
if "HP:0000077" in requested_patient_variable_ids:
yield {
"values": [{
"id": "HP:0000077",
**kidney_dysfunction(condition, None, timestamp_datetime)
}]
}
if "LOINC:30525-0" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:30525-0",
**age(patient, "year", timestamp_datetime)
}]
}
if "LOINC:54134-2" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:54134-2",
**race(patient, None, timestamp_datetime)
}]
}
if "LOINC:54120-1" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:54120-1",
**ethnicity(patient, None, timestamp_datetime)
}]
}
if "LOINC:21840-4" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:21840-4",
**sex(patient, None, timestamp_datetime)
}]
}
if "LOINC:8302-2" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:8302-2",
**height(observation, "m", timestamp_datetime)
}]
}
if "LOINC:29463-7" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:29463-7",
**weight(observation, "kg", timestamp_datetime)
}]
}
if "LOINC:39156-5" in requested_patient_variable_ids:
height = height(observation, "m", timestamp_datetime)
weight = weight(observation, "kg", timestamp_datetime)
yield {
"values": [{
"id": "LOINC:39156-5",
**bmi(height, weight, observation, "kg/m^2", timestamp_datetime)
}]
}
if "LOINC:45701-0" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:45701-0",
**fever(condition, None, timestamp_datetime)
}]
}
if "LOINC:LP212175-6" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:LP212175-6",
**date_of_fever_onset(condition, None, timestamp_datetime)
}]
}
if "LOINC:64145-6" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:64145-6",
**cough(condition, None, timestamp_datetime)
}]
}
if "LOINC:85932-2" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:85932-2",
**date_of_cough_onset(condition, None, timestamp_datetime)
}]
}
if "LOINC:54564-0" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:54564-0",
**shortness_of_breath(condition, None, timestamp_datetime)
}]
}
if "LOINC:LP128504-0" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:LP128504-0",
**autoimmune_disease(condition, None, timestamp_datetime)
}]
}
if "LOINC:54542-6" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:54542-6",
**pulmonary_disease(condition, None, timestamp_datetime)
}]
}
if "LOINC:LP172921-1" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:LP172921-1",
**cardiovascular_disease(condition, None, timestamp_datetime)
}]
}
if "LOINC:56799-0" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:56799-0",
**address(patient, None, timestamp_datetime)
}]
}
if "LOINC:LP21258-6" in requested_patient_variable_ids:
yield {
"values": [{
"id": "LOINC:54542-6",
**oxygen_saturation(observation, None, timestamp_datetime)
}]
}
| [
"[email protected]"
] | |
206494e27a4e33018dcfbb23b90c5fa250dea24c | eda67cc12434d1b661da46771ce4280842798bf9 | /google-cloud-sdk/lib/googlecloudsdk/api_lib/compute/instance_template_utils.py | d86efba15894b77281d3fcbb6bb137c11602be46 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | hexingren/data-pipeline | b48870618bbbcb428dd060b64f91e049815e9980 | ea1eda9977bb21b1bf58df4e74655640c50cb080 | refs/heads/master | 2021-01-11T16:48:46.202556 | 2017-05-14T23:10:38 | 2017-05-14T23:10:38 | 79,674,630 | 0 | 3 | null | 2020-07-25T05:43:03 | 2017-01-21T21:40:38 | Python | UTF-8 | Python | false | false | 8,248 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convenience functions for dealing with instance templates."""
from googlecloudsdk.api_lib.compute import alias_ip_range_utils
from googlecloudsdk.api_lib.compute import constants
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.command_lib.compute import scope as compute_scope
from googlecloudsdk.command_lib.compute.networks.subnets import flags as subnet_flags
EPHEMERAL_ADDRESS = object()
# TODO(user): Add unit tests for utilities
def CreateNetworkInterfaceMessage(
resources, scope_lister, messages, network, region, subnet, address,
alias_ip_ranges_string=None):
"""Creates and returns a new NetworkInterface message.
Args:
resources: generates resource references,
scope_lister: function, provides scopes for prompting subnet region,
messages: GCE API messages,
network: network,
region: region for subnetwork,
subnet: regional subnetwork,
address: specify static address for instance template
* None - no address,
* EPHEMERAL_ADDRESS - ephemeral address,
* string - address name to be fetched from GCE API.
alias_ip_ranges_string: command line string specifying a list of alias
IP ranges.
Returns:
network_interface: a NetworkInterface message object
"""
# By default interface is attached to default network. If network or subnet
# are specified they're used instead.
network_interface = messages.NetworkInterface()
if subnet is not None:
subnet_ref = subnet_flags.SubnetworkResolver().ResolveResources(
[subnet], compute_scope.ScopeEnum.REGION, region, resources,
scope_lister=scope_lister)[0]
network_interface.subnetwork = subnet_ref.SelfLink()
if network is not None:
network_ref = resources.Parse(network, collection='compute.networks')
network_interface.network = network_ref.SelfLink()
elif subnet is None:
network_ref = resources.Parse(
constants.DEFAULT_NETWORK, collection='compute.networks')
network_interface.network = network_ref.SelfLink()
if address:
access_config = messages.AccessConfig(
name=constants.DEFAULT_ACCESS_CONFIG_NAME,
type=messages.AccessConfig.TypeValueValuesEnum.ONE_TO_ONE_NAT)
# If the user provided an external IP, populate the access
# config with it.
if address != EPHEMERAL_ADDRESS:
access_config.natIP = address
network_interface.accessConfigs = [access_config]
if alias_ip_ranges_string:
network_interface.aliasIpRanges = (
alias_ip_range_utils.CreateAliasIpRangeMessagesFromString(
messages, False, alias_ip_ranges_string))
return network_interface
def CreateNetworkInterfaceMessages(
resources, scope_lister, messages, network_interface_arg, region):
"""Create network interface messages.
Args:
resources: generates resource references,
scope_lister: function, provides scopes for prompting subnet region,
messages: creates resources.
network_interface_arg: CLI argument specifying network interfaces.
region: region of the subnetwork.
Returns:
list, items are NetworkInterfaceMessages.
"""
result = []
if network_interface_arg:
for interface in network_interface_arg:
address = interface.get('address', None)
# pylint: disable=g-explicit-bool-comparison
if address == '':
address = EPHEMERAL_ADDRESS
result.append(CreateNetworkInterfaceMessage(
resources, scope_lister, messages, interface.get('network', None),
region,
interface.get('subnet', None),
address,
interface.get('aliases', None)))
return result
def CreatePersistentAttachedDiskMessages(messages, disks):
"""Returns a list of AttachedDisk messages and the boot disk's reference.
Args:
messages: GCE API messages,
disks: disk objects - contains following properties
* name - the name of disk,
* mode - 'rw' (R/W), 'ro' (R/O) access mode,
* boot - whether it is a boot disk,
* autodelete - whether disks is deleted when VM is deleted,
* device-name - device name on VM.
Returns:
list of API messages for attached disks
"""
disks_messages = []
for disk in disks:
name = disk['name']
# Resolves the mode.
mode_value = disk.get('mode', 'rw')
if mode_value == 'rw':
mode = messages.AttachedDisk.ModeValueValuesEnum.READ_WRITE
else:
mode = messages.AttachedDisk.ModeValueValuesEnum.READ_ONLY
boot = disk.get('boot') == 'yes'
auto_delete = disk.get('auto-delete') == 'yes'
attached_disk = messages.AttachedDisk(
autoDelete=auto_delete,
boot=boot,
deviceName=disk.get('device-name'),
mode=mode,
source=name,
type=messages.AttachedDisk.TypeValueValuesEnum.PERSISTENT)
# The boot disk must end up at index 0.
if boot:
disks_messages = [attached_disk] + disks_messages
else:
disks_messages.append(attached_disk)
return disks_messages
def CreatePersistentCreateDiskMessages(scope_prompter, messages, create_disks):
"""Returns a list of AttachedDisk messages.
Args:
scope_prompter: Scope prompter object,
messages: GCE API messages,
create_disks: disk objects - contains following properties
* name - the name of disk,
* mode - 'rw' (R/W), 'ro' (R/O) access mode,
* disk-size - the size of the disk,
* disk-type - the type of the disk (HDD or SSD),
* image - the name of the image to initialize from,
* image-family - the image family name,
* image-project - the project name that has the image,
* auto-delete - whether disks is deleted when VM is deleted,
* device-name - device name on VM.
Returns:
list of API messages for attached disks
"""
disks_messages = []
for disk in create_disks or []:
name = disk.get('name')
# Resolves the mode.
mode_value = disk.get('mode', 'rw')
if mode_value == 'rw':
mode = messages.AttachedDisk.ModeValueValuesEnum.READ_WRITE
else:
mode = messages.AttachedDisk.ModeValueValuesEnum.READ_ONLY
auto_delete = disk.get('auto-delete') == 'yes'
disk_size_gb = utils.BytesToGb(disk.get('size'))
image_uri, _ = scope_prompter.ExpandImageFlag(
image=disk.get('image'),
image_family=disk.get('image-family'),
image_project=disk.get('image-project'),
return_image_resource=False)
create_disk = messages.AttachedDisk(
autoDelete=auto_delete,
boot=False,
deviceName=disk.get('device-name'),
initializeParams=messages.AttachedDiskInitializeParams(
diskName=name,
sourceImage=image_uri,
diskSizeGb=disk_size_gb,
diskType=disk.get('type')),
mode=mode,
type=messages.AttachedDisk.TypeValueValuesEnum.PERSISTENT)
disks_messages.append(create_disk)
return disks_messages
def CreateDefaultBootAttachedDiskMessage(
messages, disk_type, disk_device_name, disk_auto_delete, disk_size_gb,
image_uri):
"""Returns an AttachedDisk message for creating a new boot disk."""
return messages.AttachedDisk(
autoDelete=disk_auto_delete,
boot=True,
deviceName=disk_device_name,
initializeParams=messages.AttachedDiskInitializeParams(
sourceImage=image_uri,
diskSizeGb=disk_size_gb,
diskType=disk_type),
mode=messages.AttachedDisk.ModeValueValuesEnum.READ_WRITE,
type=messages.AttachedDisk.TypeValueValuesEnum.PERSISTENT)
| [
"[email protected]"
] | |
33df19f351ae1e38a5fef7a942b3eaaee767871b | 6e46a850cc4ece73476a350e676ea55ce72b200a | /aliyun-python-sdk-reid/aliyunsdkreid/request/v20190928/ImportSpecialPersonnelRequest.py | fb397d0415577135ad8be89532374fbb0d1edd62 | [
"Apache-2.0"
] | permissive | zhxfei/aliyun-openapi-python-sdk | fb3f22ca149988d91f07ba7ca3f6a7a4edf46c82 | 15890bf2b81ce852983f807e21b78a97bcc26c36 | refs/heads/master | 2022-07-31T06:31:24.471357 | 2020-05-22T17:00:17 | 2020-05-22T17:00:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,552 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkreid.endpoint import endpoint_data
class ImportSpecialPersonnelRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'reid', '2019-09-28', 'ImportSpecialPersonnel')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_UkId(self):
return self.get_body_params().get('UkId')
def set_UkId(self,UkId):
self.add_body_params('UkId', UkId)
def get_Description(self):
return self.get_body_params().get('Description')
def set_Description(self,Description):
self.add_body_params('Description', Description)
def get_ExternalId(self):
return self.get_body_params().get('ExternalId')
def set_ExternalId(self,ExternalId):
self.add_body_params('ExternalId', ExternalId)
def get_PersonType(self):
return self.get_body_params().get('PersonType')
def set_PersonType(self,PersonType):
self.add_body_params('PersonType', PersonType)
def get_Urls(self):
return self.get_body_params().get('Urls')
def set_Urls(self,Urls):
self.add_body_params('Urls', Urls)
def get_PersonName(self):
return self.get_body_params().get('PersonName')
def set_PersonName(self,PersonName):
self.add_body_params('PersonName', PersonName)
def get_StoreIds(self):
return self.get_body_params().get('StoreIds')
def set_StoreIds(self,StoreIds):
self.add_body_params('StoreIds', StoreIds)
def get_Status(self):
return self.get_body_params().get('Status')
def set_Status(self,Status):
self.add_body_params('Status', Status) | [
"[email protected]"
] | |
0565407cdfc58de77957fbefb50611a9c24c4748 | d9720a7b4bfe713426f766547062aaeacdfa2566 | /models/city.py | 44ade30d3614b334699e4c1bc26318d31b39b2b7 | [
"MIT"
] | permissive | AlisonQuinter17/AirBnB_clone | b90a96bc2256e32f648bb2b9a8e1dbdba90ca4eb | c890e3b4f9eb7a3ded96ac756387109351e6b13f | refs/heads/main | 2023-01-19T05:10:39.635975 | 2020-11-18T17:36:59 | 2020-11-18T17:36:59 | 308,370,255 | 1 | 2 | MIT | 2020-11-03T16:03:36 | 2020-10-29T15:27:01 | Python | UTF-8 | Python | false | false | 144 | py | #!/usr/bin/python3
from models.base_model import BaseModel
class City(BaseModel):
""" city attributes """
state_id = ""
name = ""
| [
"[email protected]"
] | |
2b117cb43b2993dc5748ae809156750eb0e3a3f7 | 6bf005128fb95ea21994325ace59cf0664d0159e | /U3DAutomatorClient/script/windows/PPT3DTestCase/FirstStageTestCase/InsertWordArtTestCase.py | 101fe499da4ad6957383cee6f9715c1d14d63a4c | [] | no_license | Bigfishisbig/U3DAutomatorTest | 5ab4214fc6cda678a5f266fb013f7dd7c52fcaf8 | 93a73d8995f526f998ff50b51a77ef0bbf1b4ff8 | refs/heads/master | 2023-01-07T11:59:19.025497 | 2019-09-20T06:06:55 | 2019-09-20T06:06:55 | 209,458,914 | 0 | 0 | null | 2022-12-27T15:35:30 | 2019-09-19T03:58:43 | Python | UTF-8 | Python | false | false | 1,796 | py | #!/usr/bin/env python
# coding=utf-8
"""
文件名称:InsertWordArtTestCase.py
作者:ycy
版本:PPTPro
创建时间:2019/1/18 15:51
修改时间:
软件:PyCharm
"""
from script.windows.Operation import *
from script.windows.SystemDialog import SystemDiaglog
from script.windows.PPT3DTestCase.Action import Action
from script.windows.PPT3DSetting.SourcePath import SourcePath
reload(sys)
sys.setdefaultencoding('UTF-8') # 将脚本编码格式转化未置顶的编码格式
class InsertWordArtTestCase(Action, Operation, SystemDiaglog):
'''插入艺术字'''
def test_main(self):
'''插入艺术字'''
self.OperationSetting()
self.Init3DPPT()
self.SetTag("插入艺术字", time.time())
tag = (self.__class__.__doc__ or u"测试") + "_" + self.__class__.__name__
self.startScene(tag)
self.InputPara()
self.InputStr(u"黑夜给了你黑色的眼睛,你却用它来寻找光明。")
wordart = [SourcePath.File_Img_WordArt_Text_1, SourcePath.File_Img_WordArt_Text_2, SourcePath.File_Img_WordArt_Text_3]
for i in range(3):
self.OneClick("BtnFormat")
path = self.getText()
# self.OneClickL(path, 50)
self.ListClick("RotateByZAxisNor")
self.ListClick("WordArtStyle", i)
self.s_witForImg(wordart[i], 10, "艺术字插入失败", None, 0.4)
self.OneClick("BtnStart")
self.OneClick("BtnRevert")
self.s_waitForImgVanish(wordart[i], 10, "撤销艺术字失败", 0.4)
self.OneClick("BtnStart")
self.OneClick("BtnRecover")
self.s_witForImg(wordart[i], 10, "艺术字插入失败")
self.endScene(tag)
time.sleep(1)
self.EndTag()
| [
"[email protected]"
] | |
e9e5d9560e3538cf8acd44dda5426de0b90d8434 | 2d7237e1b35a7feb659c34c04da7e3069b1ed1ee | /virtual/bin/easy_install | ccf28af20e903607266746a93b5af7a4f57b5faa | [
"MIT"
] | permissive | Juru-10/STA | c204dfc6d58f5322eece46e84ad038ba51bc6f88 | df9fdef0c70dece49cca09018ad4f57583d05b73 | refs/heads/master | 2020-04-21T06:20:39.004444 | 2019-05-08T10:20:35 | 2019-05-08T10:20:35 | 169,363,398 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 267 | #!/home/wecode/Desktop/DJANGO/STA/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
6c3521f7f8735e45cd7fa0cd7ff651fbf0bf0d51 | 717171ed7a14ad60dd42d62fe0dd217a0c0c50fd | /19年7月/7.02/base64处理图形验证码.py | bd54377e9b458358c19bb62f61375ac74e346fcc | [] | no_license | friedlich/python | 6e9513193227e4e9ee3e30429f173b55b9cdb85d | 1654ef4f616fe7cb9fffe79d1e6e7d7721c861ac | refs/heads/master | 2020-09-04T14:34:48.237404 | 2019-11-18T14:54:44 | 2019-11-18T14:54:44 | 219,756,451 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,693 | py | import requests,base64,sys,csv
from PIL import Image
address_url = 'https://www.ele.me/restapi/bgs/poi/search_poi_nearby?'
place = input('请输入你的收货地址:')
params = {
'geohash': 'wtw3sjq6n6um',
'keyword': place,
'latitude': '31.23037',
'limit': '20',
'longitude': '121.473701',
'type': 'nearby'
}
headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36'}
address_res = requests.get(address_url,headers=headers,params=params)
address_json = address_res.json()
print('以下,是与'+place+'相关的位置信息:\n')
n=0
for address in address_json:
print(str(n)+'. '+address['name']+':'+address['short_address']+'\n')
n = n+1
address_num = int(input('请输入您选择位置的序号:'))
final_address = address_json[address_num]
session = requests.session()
url_1 = 'https://h5.ele.me/restapi/eus/login/mobile_send_code'
tel = input('请输入手机号:')
data_1 = {
'captcha_hash':'',
'captcha_value':'',
'mobile': tel,
'scf': "ms"
}
login = session.post(url_1,headers=headers,data=data_1)
code = login.status_code
print(type(login))
print(login.text)
print('status code of login:' + str(code))
if code == 200: #前三次登录没有图片验证过程
token = login.json()['validate_token']
url_2 = 'https://h5.ele.me/restapi/eus/login/login_by_mobile'
code = input('请输入手机验证码:')
data_2 = {
'mobile': tel,
'scf': 'ms',
'validate_code': code,
'validate_token': token
}
session.post(url_2,headers=headers,data=data_2)
elif code == 400: #登录超过3次,网站会要求图片验证
print('有图形验证码')
url_3 = 'https://h5.ele.me/restapi/eus/v3/captchas'
data_3 = {'captcha_str': tel}
# 提取验证码。
cap =session.post(url_3,headers=headers,data=data_3)
hash = cap.json()['captcha_hash']
value = cap.json()['captcha_image'].replace('data:image/jpeg;base64,','')
# 验证码字符串转图形文件保存到本地
x = base64.b64decode(value)
file = open(sys.path[0]+'\\captcha.jpg','wb')
file.write(x)
file.close()
im = Image.open(sys.path[0]+'\\captcha.jpg')
im.show() #展示验证码图形
captche_value = input('请输入验证码:')
#将图片验证码作为参数post到饿了吗服务器登录
url_1 = 'https://h5.ele.me/restapi/eus/login/mobile_send_code'
data_4 = {
'captcha_hash': hash,
'captcha_value': captche_value,
'mobile': tel,
'scf': "ms"
}
# 将验证码发送到服务器。
login = session.post(url_1,headers=headers,data=data_4)
print(login.json())
token = login.json()['validate_token']
url_2 = 'https://h5.ele.me/restapi/eus/login/login_by_mobile'
code = input('请输入手机验证码:')
data_2 = {
'mobile': tel,
'scf': 'ms',
'validate_code': code,
'validate_token': token
}
session.post(url_2,headers=headers,data=data_2)
restaurants_url = 'https://www.ele.me/restapi/shopping/restaurants'
params={
'extras[]': 'activities',
'geohash': final_address['geohash'],
'latitude': final_address['latitude'],
'limit': '24',
'longitude': final_address['longitude'],
'offset': '0',
'terminal': 'web'
}
restaurants_res = session.get(restaurants_url,headers=headers,params=params)
restaurants_json = restaurants_res.json()
with open(sys.path[0]+'\\restaurants.csv','w',newline='',encoding='utf_8_sig') as f:
writer = csv.writer(f)
for restaurant in restaurants_json:
writer.writerow(restaurant['name'])
| [
"[email protected]"
] | |
19acfeedde17e2b77e96900b01d98cd205499e10 | 35dbf8489dc1cb63087dd01ba9de57643e9b3aba | /ogb/io/save_dataset.py | c128812f1b2e3be2f34a5ec6971ef796d8831fd0 | [
"MIT"
] | permissive | edwardelson/ogb | 9c6c6fcfeb04ae042919c05b7a060c143c7f3d5c | c783060c5ada3641c0f08527acd1d53626f9f9c9 | refs/heads/master | 2023-06-02T05:12:08.056741 | 2021-06-16T17:55:42 | 2021-06-16T17:55:42 | 356,612,389 | 2 | 1 | MIT | 2021-06-16T17:55:43 | 2021-04-10T14:51:59 | Python | UTF-8 | Python | false | false | 28,886 | py | import torch
import pandas as pd
import os
import os.path as osp
from datetime import date
import shutil
from tqdm import tqdm
import numpy as np
from ogb.io.read_graph_raw import read_binary_graph_raw, read_binary_heterograph_raw
from ogb.utils.torch_util import all_numpy
class DatasetSaver(object):
'''
A class for saving graphs and split in OGB-compatible manner
Create submission_datasetname/ directory, and output the following two files:
- datasetname.zip (OGB-compatible zipped dataset folder)
- meta_dict.pt (torch files storing all the necessary dataset meta-information)
'''
def __init__(self, dataset_name, is_hetero, version, root = 'submission'):
# verify input
if not ('ogbn-' in dataset_name or 'ogbl-' in dataset_name or 'ogbg-' in dataset_name):
raise ValueError('Dataset name must have valid ogb prefix (e.g., ogbn-*).')
if not isinstance(is_hetero, bool):
raise ValueError('is_hetero must be of type bool.')
if not (isinstance(version, int) and version >= 0):
raise ValueError('version must be of type int and non-negative')
self.dataset_name = dataset_name
self.is_hetero = is_hetero
self.version = version
self.root = root
self.dataset_prefix = dataset_name.split('-')[0] # specify the task category
self.dataset_suffix = '_'.join(dataset_name.split('-')[1:])
self.submission_dir = self.root + '_' + self.dataset_prefix + '_' + self.dataset_suffix
self.dataset_dir = osp.join(self.submission_dir, self.dataset_suffix)
self.meta_dict_path = osp.join(self.submission_dir, 'meta_dict.pt')
if self.dataset_prefix == 'ogbg' and self.is_hetero:
raise NotImplementedError('Heterogeneous graph dataset object has not been implemented for graph property prediction yet.')
if osp.exists(self.dataset_dir):
if input(f'Found an existing submission directory at {self.submission_dir}/. \nWill you remove it? (y/N)\n').lower() == 'y':
shutil.rmtree(self.submission_dir)
print('Removed existing submission directory')
else:
print('Process stopped.')
exit(-1)
# make necessary dirs
self.raw_dir = osp.join(self.dataset_dir, 'raw')
os.makedirs(self.raw_dir, exist_ok=True)
os.makedirs(osp.join(self.dataset_dir, 'processed'), exist_ok=True)
# create release note
with open(osp.join(self.dataset_dir, f'RELEASE_v{version}.txt'), 'w') as fw:
fw.write(f'# Release note for {self.dataset_name}\n\n### v{version}: {date.today()}')
# check list
self._save_graph_list_done = False
self._save_split_done = False
self._copy_mapping_dir_done = False
if 'ogbl' == self.dataset_prefix:
self._save_target_labels_done = True # for ogbl, we do not need to give predicted labels
else:
self._save_target_labels_done = False # for ogbn and ogbg, need to give predicted labels
self._save_task_info_done = False
self._get_meta_dict_done = False
self._zip_done = False
def _save_graph_list_hetero(self, graph_list):
dict_keys = graph_list[0].keys()
# check necessary keys
if not 'edge_index_dict' in dict_keys:
raise RuntimeError('edge_index_dict needs to be provided in graph objects')
if not 'num_nodes_dict' in dict_keys:
raise RuntimeError('num_nodes_dict needs to be provided in graph objects')
print(dict_keys)
# Store the following files
# - edge_index_dict.npz (necessary)
# edge_index_dict
# - num_nodes_dict.npz (necessary)
# num_nodes_dict
# - num_edges_dict.npz (necessary)
# num_edges_dict
# - node_**.npz (optional, node_feat_dict is the default node features)
# - edge_**.npz (optional, edge_feat_dict the default edge features)
# extract entity types
ent_type_list = sorted([e for e in graph_list[0]['num_nodes_dict'].keys()])
# saving num_nodes_dict
print('Saving num_nodes_dict')
num_nodes_dict = {}
for ent_type in ent_type_list:
num_nodes_dict[ent_type] = np.array([graph['num_nodes_dict'][ent_type] for graph in graph_list]).astype(np.int64)
np.savez_compressed(osp.join(self.raw_dir, 'num_nodes_dict.npz'), **num_nodes_dict)
print(num_nodes_dict)
# extract triplet types
triplet_type_list = sorted([(h, r, t) for (h, r, t) in graph_list[0]['edge_index_dict'].keys()])
print(triplet_type_list)
# saving edge_index_dict
print('Saving edge_index_dict')
num_edges_dict = {}
edge_index_dict = {}
for triplet in triplet_type_list:
# representing triplet (head, rel, tail) as a single string 'head___rel___tail'
triplet_cat = '___'.join(triplet)
edge_index = np.concatenate([graph['edge_index_dict'][triplet] for graph in graph_list], axis = 1).astype(np.int64)
if edge_index.shape[0] != 2:
raise RuntimeError('edge_index must have shape (2, num_edges)')
num_edges = np.array([graph['edge_index_dict'][triplet].shape[1] for graph in graph_list]).astype(np.int64)
num_edges_dict[triplet_cat] = num_edges
edge_index_dict[triplet_cat] = edge_index
print(edge_index_dict)
print(num_edges_dict)
np.savez_compressed(osp.join(self.raw_dir, 'edge_index_dict.npz'), **edge_index_dict)
np.savez_compressed(osp.join(self.raw_dir, 'num_edges_dict.npz'), **num_edges_dict)
for key in dict_keys:
if key == 'edge_index_dict' or key == 'num_nodes_dict':
continue
if graph_list[0][key] is None:
continue
print(f'Saving {key}')
feat_dict = {}
if 'node_' in key:
# node feature dictionary
for ent_type in graph_list[0][key].keys():
if ent_type not in num_nodes_dict:
raise RuntimeError(f'Encountered unknown entity type called {ent_type}.')
# check num_nodes
for i in range(len(graph_list)):
if len(graph_list[i][key][ent_type]) != num_nodes_dict[ent_type][i]:
raise RuntimeError(f'num_nodes mistmatches with {key}[{ent_type}]')
# make sure saved in np.int64 or np.float32
dtype = np.int64 if 'int' in str(graph_list[0][key][ent_type].dtype) else np.float32
cat_feat = np.concatenate([graph[key][ent_type] for graph in graph_list], axis = 0).astype(dtype)
feat_dict[ent_type] = cat_feat
elif 'edge_' in key:
# edge feature dictionary
for triplet in graph_list[0][key].keys():
# representing triplet (head, rel, tail) as a single string 'head___rel___tail'
triplet_cat = '___'.join(triplet)
if triplet_cat not in num_edges_dict:
raise RuntimeError(f"Encountered unknown triplet type called ({','.join(triplet)}).")
# check num_edges
for i in range(len(graph_list)):
if len(graph_list[i][key][triplet]) != num_edges_dict[triplet_cat][i]:
raise RuntimeError(f"num_edges mismatches with {key}[({','.join(triplet)})]")
# make sure saved in np.int64 or np.float32
dtype = np.int64 if 'int' in str(graph_list[0][key][triplet].dtype) else np.float32
cat_feat = np.concatenate([graph[key][triplet] for graph in graph_list], axis = 0).astype(dtype)
feat_dict[triplet_cat] = cat_feat
else:
raise RuntimeError(f'Keys in graph object should start from either \'node_\' or \'edge_\', but \'{key}\' given.')
np.savez_compressed(osp.join(self.raw_dir, f'{key}.npz'), **feat_dict)
print('Validating...')
# testing
print('Reading saved files')
graph_list_read = read_binary_heterograph_raw(self.raw_dir, False)
print('Checking read graphs and given graphs are the same')
for i in tqdm(range(len(graph_list))):
for key0, value0 in graph_list[i].items():
if value0 is not None:
for key1, value1 in value0.items():
if isinstance(graph_list[i][key0][key1], np.ndarray):
assert(np.allclose(graph_list[i][key0][key1], graph_list_read[i][key0][key1], rtol=1e-04, atol=1e-04, equal_nan=True))
else:
assert(graph_list[i][key0][key1] == graph_list_read[i][key0][key1])
del graph_list_read
def _save_graph_list_homo(self, graph_list):
dict_keys = graph_list[0].keys()
# check necessary keys
if not 'edge_index' in dict_keys:
raise RuntimeError('edge_index needs to be provided in graph objects')
if not 'num_nodes' in dict_keys:
raise RuntimeError('num_nodes needs to be provided in graph objects')
print(dict_keys)
data_dict = {}
# Store the following keys
# - edge_index (necessary)
# - num_nodes_list (necessary)
# - num_edges_list (necessary)
# - node_** (optional, node_feat is the default node features)
# - edge_** (optional, edge_feat is the default edge features)
# saving num_nodes_list
num_nodes_list = np.array([graph['num_nodes'] for graph in graph_list]).astype(np.int64)
data_dict['num_nodes_list'] = num_nodes_list
# saving edge_index and num_edges_list
print('Saving edge_index')
edge_index = np.concatenate([graph['edge_index'] for graph in graph_list], axis = 1).astype(np.int64)
num_edges_list = np.array([graph['edge_index'].shape[1] for graph in graph_list]).astype(np.int64)
if edge_index.shape[0] != 2:
raise RuntimeError('edge_index must have shape (2, num_edges)')
data_dict['edge_index'] = edge_index
data_dict['num_edges_list'] = num_edges_list
for key in dict_keys:
if key == 'edge_index' or key == 'num_nodes':
continue
if graph_list[0][key] is None:
continue
if 'node_' == key[:5]:
# make sure saved in np.int64 or np.float32
dtype = np.int64 if 'int' in str(graph_list[0][key].dtype) else np.float32
# check num_nodes
for i in range(len(graph_list)):
if len(graph_list[i][key]) != num_nodes_list[i]:
raise RuntimeError(f'num_nodes mistmatches with {key}')
cat_feat = np.concatenate([graph[key] for graph in graph_list], axis = 0).astype(dtype)
data_dict[key] = cat_feat
elif 'edge_' == key[:5]:
# make sure saved in np.int64 or np.float32
dtype = np.int64 if 'int' in str(graph_list[0][key].dtype) else np.float32
# check num_edges
for i in range(len(graph_list)):
if len(graph_list[i][key]) != num_edges_list[i]:
raise RuntimeError(f'num_edges mistmatches with {key}')
cat_feat = np.concatenate([graph[key] for graph in graph_list], axis = 0).astype(dtype)
data_dict[key] = cat_feat
else:
raise RuntimeError(f'Keys in graph object should start from either \'node_\' or \'edge_\', but \'{key}\' given.')
print('Saving all the files!')
np.savez_compressed(osp.join(self.raw_dir, 'data.npz'), **data_dict)
print('Validating...')
# testing
print('Reading saved files')
graph_list_read = read_binary_graph_raw(self.raw_dir, False)
print('Checking read graphs and given graphs are the same')
for i in tqdm(range(len(graph_list))):
# assert(graph_list[i].keys() == graph_list_read[i].keys())
for key in graph_list[i].keys():
if graph_list[i][key] is not None:
if isinstance(graph_list[i][key], np.ndarray):
assert(np.allclose(graph_list[i][key], graph_list_read[i][key], rtol=1e-4, atol=1e-4, equal_nan=True))
else:
assert(graph_list[i][key] == graph_list_read[i][key])
del graph_list_read
def save_task_info(self, task_type, eval_metric, num_classes = None):
'''
task_type (str): For ogbg and ogbn, either classification or regression.
eval_metric (str): the metric
if task_type is 'classification', num_classes must be given.
'''
if self.dataset_prefix == 'ogbn' or self.dataset_prefix == 'ogbg':
if not ('classification' in task_type or 'regression' in task_type):
raise ValueError(f'task type must contain eighther classification or regression, but {task_type} given')
self.task_type = task_type
print(self.task_type)
print(num_classes)
if 'classification' in self.task_type:
if not (isinstance(num_classes, int) and num_classes > 1):
raise ValueError(f'num_classes must be an integer larger than 1, {num_classes} given.')
self.num_classes = num_classes
else:
self.num_classes = -1 # in the case of regression, just set to -1
self.eval_metric = eval_metric
self._save_task_info_done = True
def save_target_labels(self, target_labels):
'''
target_label (numpy.narray): storing target labels. Shape must be (num_data, num_tasks)
'''
if self.dataset_prefix == 'ogbl':
raise RuntimeError('ogbl link prediction dataset does not need to call save_target_labels')
if not self._save_graph_list_done:
raise RuntimeError('save_graph_list must be done beforehand.')
if self.is_hetero:
if not (isinstance(target_labels, dict) and len(target_labels) == 1):
raise ValueError(f'target label must be of dictionary type with single key')
key = list(target_labels.keys())[0]
if key not in self.num_data:
raise ValueError(f'Unknown entity type called {key}.')
if len(target_labels[key]) != self.num_data[key]:
raise RuntimeError(f'The length of target_labels ({len(target_labels[key])}) must be the same as the number of data points ({self.num_data[key]}).')
if self.dataset_prefix == 'ogbg':
raise NotImplementedError('hetero graph for graph-level prediction has not been implemented yet.')
elif self.dataset_prefix == 'ogbn':
np.savez_compressed(osp.join(self.raw_dir, 'node-label.npz'), **target_labels)
self.num_tasks = target_labels[key].shape[1]
else:
# check type and shape
if not isinstance(target_labels, np.ndarray):
raise ValueError(f'target label must be of type np.ndarray')
if len(target_labels) != self.num_data:
raise RuntimeError(f'The length of target_labels ({len(target_labels)}) must be the same as the number of data points ({self.num_data}).')
if self.dataset_prefix == 'ogbg':
np.savez_compressed(osp.join(self.raw_dir, 'graph-label.npz'), graph_label = target_labels)
elif self.dataset_prefix == 'ogbn':
np.savez_compressed(osp.join(self.raw_dir, 'node-label.npz'), node_label = target_labels)
self.num_tasks = target_labels.shape[1]
self._save_target_labels_done = True
def save_graph_list(self, graph_list):
if not all_numpy(graph_list):
raise RuntimeError('graph_list must only contain list/dict of numpy arrays, int, or float')
if self.dataset_prefix == 'ogbn' or self.dataset_prefix == 'ogbl':
if len(graph_list) > 1:
raise RuntimeError('Multiple graphs not supported for node/link property prediction.')
if self.is_hetero:
self._save_graph_list_hetero(graph_list)
self.has_node_attr = ('node_feat_dict' in graph_list[0]) and (graph_list[0]['node_feat_dict'] is not None)
self.has_edge_attr = ('edge_feat_dict' in graph_list[0]) and (graph_list[0]['edge_feat_dict'] is not None)
else:
self._save_graph_list_homo(graph_list)
self.has_node_attr = ('node_feat' in graph_list[0]) and (graph_list[0]['node_feat'] is not None)
self.has_edge_attr = ('edge_feat' in graph_list[0]) and (graph_list[0]['edge_feat'] is not None)
# later used for checking the shape of target_label
if self.dataset_prefix == 'ogbg':
self.num_data = len(graph_list) # number of graphs
elif self.dataset_prefix == 'ogbn':
if self.is_hetero:
self.num_data = graph_list[0]['num_nodes_dict'] # number of nodes
else:
self.num_data = graph_list[0]['num_nodes'] # number of nodes
else:
self.num_data = None
self._save_graph_list_done = True
def save_split(self, split_dict, split_name):
'''
Save dataset split
split_dict: must contain three keys: 'train', 'valid', 'test', where the values are the split indices stored in numpy.
split_name (str): the name of the split
'''
self.split_dir = osp.join(self.dataset_dir, 'split', split_name)
os.makedirs(self.split_dir, exist_ok=True)
# verify input
if not 'train' in split_dict:
raise ValueError('\'train\' needs to be given in save_split')
if not 'valid' in split_dict:
raise ValueError('\'valid\' needs to be given in save_split')
if not 'test' in split_dict:
raise ValueError('\'test\' needs to be given in save_split')
if not all_numpy(split_dict):
raise RuntimeError('split_dict must only contain list/dict of numpy arrays, int, or float')
## directly save split_dict
## compatible with ogb>=v1.2.3
torch.save(split_dict, osp.join(self.split_dir, 'split_dict.pt'))
self.split_name = split_name
self._save_split_done = True
def copy_mapping_dir(self, mapping_dir):
target_mapping_dir = osp.join(self.dataset_dir, 'mapping')
os.makedirs(target_mapping_dir, exist_ok=True)
file_list = [f for f in os.listdir(mapping_dir) if osp.isfile(osp.join(mapping_dir, f))]
if 'README.md' not in file_list:
raise RuntimeError(f'README.md must be included in mapping_dir {mapping_dir}')
# copy all the files in the mapping_dir to
for f in file_list:
shutil.copyfile(osp.join(mapping_dir, f), osp.join(target_mapping_dir, f))
self._copy_mapping_dir_done = True
def get_meta_dict(self):
'''
output:
meta_dict: a dictionary that stores meta-information about data, which can be directly passed to OGB dataset object.
Useful for debugging.
'''
# check everything is done before getting meta_dict
if not self._save_graph_list_done:
raise RuntimeError('save_graph_list not completed.')
if not self._save_split_done:
raise RuntimeError('save_split not completed.')
if not self._copy_mapping_dir_done:
raise RuntimeError('copy_mapping_dir not completed.')
if not self._save_target_labels_done:
raise RuntimeError('save_target_labels not completed.')
if not self._save_task_info_done:
raise RuntimeError('save_task_info not completed.')
meta_dict = {'version': self.version, 'dir_path': self.dataset_dir, 'binary': 'True'}
if not self.dataset_prefix == 'ogbl':
meta_dict['num tasks'] = self.num_tasks
meta_dict['num classes'] = self.num_classes
meta_dict['task type'] = self.task_type
meta_dict['eval metric'] = self.eval_metric
meta_dict['add_inverse_edge'] = 'False'
meta_dict['split'] = self.split_name
meta_dict['download_name'] = self.dataset_suffix
map_dict = {'ogbg': 'graphproppred', 'ogbn': 'nodeproppred', 'ogbl': 'linkproppred'}
meta_dict['url'] = f'https://snap.stanford.edu/ogb/data/{map_dict[self.dataset_prefix]}/' + meta_dict['download_name'] + '.zip'
meta_dict['add_inverse_edge'] = 'False'
meta_dict['has_node_attr'] = str(self.has_node_attr)
meta_dict['has_edge_attr'] = str(self.has_node_attr)
meta_dict['additional node files'] = 'None'
meta_dict['additional edge files'] = 'None'
meta_dict['is hetero'] = str(self.is_hetero)
# save meta-dict for submission
torch.save(meta_dict, self.meta_dict_path)
self._get_meta_dict_done = 'True'
return meta_dict
def zip(self):
# check everything is done before zipping
if not self._save_graph_list_done:
raise RuntimeError('save_graph_list not completed.')
if not self._save_split_done:
raise RuntimeError('save_split not completed.')
if not self._copy_mapping_dir_done:
raise RuntimeError('copy_mapping_dir not completed.')
if not self._save_target_labels_done:
raise RuntimeError('save_target_labels not completed.')
if not self._save_task_info_done:
raise RuntimeError('save_task_info not completed.')
if not self._get_meta_dict_done:
raise RuntimeError('get_meta_dict not completed.')
shutil.make_archive(self.dataset_dir, 'zip', self.dataset_dir)
self._zip_done = True
def cleanup(self):
if self._zip_done:
try:
shutil.rmtree(self.dataset_dir)
except FileNotFoundError:
print('Files already deleted.')
else:
raise RuntimeError('Clean up after calling zip()')
def test_datasetsaver():
# test on graph classification
# ogbg-molhiv
test_task = 'link'
# testing all the dataset objects are working.
if test_task == 'graph':
from ogb.graphproppred import PygGraphPropPredDataset, DglGraphPropPredDataset,GraphPropPredDataset
dataset_name = 'ogbg-molhiv'
dataset = PygGraphPropPredDataset(dataset_name)
dataset.get_idx_split()
dataset = DglGraphPropPredDataset(dataset_name)
dataset.get_idx_split()
dataset = GraphPropPredDataset(dataset_name)
dataset.get_idx_split()
elif test_task == 'node':
from ogb.nodeproppred import NodePropPredDataset, PygNodePropPredDataset, DglNodePropPredDataset
dataset_name = 'ogbn-arxiv' # test ogbn-proteins
dataset = PygNodePropPredDataset(dataset_name)
dataset.get_idx_split()
dataset = DglNodePropPredDataset(dataset_name)
dataset.get_idx_split()
dataset = NodePropPredDataset(dataset_name)
dataset.get_idx_split()
elif test_task == 'link':
from ogb.linkproppred import LinkPropPredDataset, PygLinkPropPredDataset, DglLinkPropPredDataset
dataset_name = 'ogbl-collab'
dataset = PygLinkPropPredDataset(dataset_name)
dataset.get_edge_split()
dataset = DglLinkPropPredDataset(dataset_name)
dataset.get_edge_split()
dataset = LinkPropPredDataset(dataset_name)
dataset.get_edge_split()
elif test_task == 'heteronode':
from ogb.nodeproppred import NodePropPredDataset, PygNodePropPredDataset, DglNodePropPredDataset
dataset_name = 'ogbn-mag'
dataset = PygNodePropPredDataset(dataset_name)
dataset.get_idx_split()
dataset = DglNodePropPredDataset(dataset_name)
dataset.get_idx_split()
dataset = NodePropPredDataset(dataset_name)
dataset.get_idx_split()
elif test_task == 'heterolink':
from ogb.linkproppred import LinkPropPredDataset, PygLinkPropPredDataset, DglLinkPropPredDataset
dataset_name = 'ogbl-biokg'
dataset = PygLinkPropPredDataset(dataset_name)
dataset.get_edge_split()
dataset = DglLinkPropPredDataset(dataset_name)
dataset.get_edge_split()
dataset = LinkPropPredDataset(dataset_name)
dataset.get_edge_split()
else:
raise ValueError('Invalid task category')
print(dataset[0])
if 'link' in test_task:
print(dataset.get_edge_split())
else:
print(dataset.get_idx_split())
if 'graph' in test_task:
graph_list = dataset.graphs
else:
graph_list = [dataset.graph]
if 'link' not in test_task:
labels = dataset.labels
is_hetero = 'hetero' in test_task
version = 2 if dataset_name == 'ogbn-mag' else 1
saver = DatasetSaver(dataset_name, is_hetero, version=version)
# saving graph objects
saver.save_graph_list(graph_list)
# saving target labels
if 'link' not in test_task:
saver.save_target_labels(labels)
# saving split
if 'link' in test_task:
split_idx = dataset.get_edge_split()
else:
split_idx = dataset.get_idx_split()
# second argument must be the name of the split
saver.save_split(split_idx, dataset.meta_info['split'])
# copying mapping dir
saver.copy_mapping_dir(f"dataset/{'_'.join(dataset_name.split('-'))}/mapping/")
saver.save_task_info(dataset.task_type, dataset.eval_metric, dataset.num_classes if hasattr(dataset, 'num_classes') else None)
meta_dict = saver.get_meta_dict()
print(meta_dict)
print('Now testing.')
if 'graph' in test_task:
print('library agnostic')
dataset = GraphPropPredDataset(dataset_name, meta_dict = meta_dict)
dataset = GraphPropPredDataset(dataset_name, meta_dict = meta_dict)
print(dataset[0])
print(dataset.get_idx_split())
print('Pytorch Geometric')
dataset = PygGraphPropPredDataset(dataset_name, meta_dict = meta_dict)
dataset = PygGraphPropPredDataset(dataset_name, meta_dict = meta_dict)
print(dataset[0])
print(dataset.get_idx_split())
print('DGL')
dataset = DglGraphPropPredDataset(dataset_name, meta_dict = meta_dict)
dataset = DglGraphPropPredDataset(dataset_name, meta_dict = meta_dict)
print(dataset[0])
print(dataset.get_idx_split())
elif 'node' in test_task:
print('library agnostic')
dataset = NodePropPredDataset(dataset_name, meta_dict = meta_dict)
dataset = NodePropPredDataset(dataset_name, meta_dict = meta_dict)
print(dataset[0])
print(dataset.get_idx_split())
print('Pytorch Geometric')
dataset = PygNodePropPredDataset(dataset_name, meta_dict = meta_dict)
dataset = PygNodePropPredDataset(dataset_name, meta_dict = meta_dict)
print(dataset[0])
print(dataset.get_idx_split())
print('DGL')
dataset = DglNodePropPredDataset(dataset_name, meta_dict = meta_dict)
dataset = DglNodePropPredDataset(dataset_name, meta_dict = meta_dict)
print(dataset[0])
print(dataset.get_idx_split())
elif 'link' in test_task:
print('library agnostic')
dataset = LinkPropPredDataset(dataset_name, meta_dict = meta_dict)
dataset = LinkPropPredDataset(dataset_name, meta_dict = meta_dict)
print(dataset[0])
# print(dataset.get_edge_split())
print('Pytorch Geometric')
dataset = PygLinkPropPredDataset(dataset_name, meta_dict = meta_dict)
dataset = PygLinkPropPredDataset(dataset_name, meta_dict = meta_dict)
print(dataset[0])
# print(dataset.get_edge_split())
print('DGL')
dataset = DglLinkPropPredDataset(dataset_name, meta_dict = meta_dict)
dataset = DglLinkPropPredDataset(dataset_name, meta_dict = meta_dict)
print(dataset[0])
# print(dataset.get_edge_split())
else:
raise ValueError('Invalid task category')
# zip
saver.zip()
print('Finished zipping!')
saver.cleanup()
if __name__ == '__main__':
test_datasetsaver()
| [
"[email protected]"
] | |
05be3d589bb0eef2a4cd064c43dcf7e93a68c7a2 | e8b38b8dfa348ff006eb197a7906ca8e491a23dc | /tests/conftest.py | 79144b69789b7a96597410997a9f0eea0252414d | [
"MIT"
] | permissive | pyccel/pyccel | d79a81dbdff1172839a6a1227abfcc1f97e6c97b | 1896b761ba662c90b14c195bbb6eb5cddc57cbfc | refs/heads/devel | 2023-08-30T12:15:25.244401 | 2023-08-28T09:31:32 | 2023-08-28T09:31:32 | 100,463,736 | 307 | 39 | MIT | 2023-09-14T19:29:26 | 2017-08-16T07:59:14 | Python | UTF-8 | Python | false | false | 2,423 | py | # pylint: disable=missing-function-docstring, missing-module-docstring
import logging
import os
import shutil
import pytest
from mpi4py import MPI
from pyccel.commands.pyccel_clean import pyccel_clean
github_debugging = 'DEBUG' in os.environ
if github_debugging:
import sys
sys.stdout = sys.stderr
@pytest.fixture( params=[
pytest.param("fortran", marks = pytest.mark.fortran),
pytest.param("c", marks = pytest.mark.c),
pytest.param("python", marks = pytest.mark.python)
],
scope = "session"
)
def language(request):
return request.param
def move_coverage(path_dir):
for root, _, files in os.walk(path_dir):
for name in files:
if name.startswith(".coverage"):
shutil.copyfile(os.path.join(root,name),os.path.join(os.getcwd(),name))
def pytest_runtest_teardown(item, nextitem):
path_dir = os.path.dirname(os.path.realpath(item.fspath))
move_coverage(path_dir)
config = item.config
xdist_plugin = config.pluginmanager.getplugin("xdist")
if xdist_plugin is None or "PYTEST_XDIST_WORKER_COUNT" not in os.environ \
or os.getenv('PYTEST_XDIST_WORKER_COUNT') == 1:
print("Tearing down!")
marks = [m.name for m in item.own_markers ]
if 'parallel' not in marks:
pyccel_clean(path_dir, remove_shared_libs = True)
else:
comm = MPI.COMM_WORLD
comm.Barrier()
if comm.rank == 0:
pyccel_clean(path_dir, remove_shared_libs = True)
comm.Barrier()
def pytest_addoption(parser):
parser.addoption("--developer-mode", action="store_true", default=github_debugging, help="Show tracebacks when pyccel errors are raised")
def pytest_sessionstart(session):
# setup_stuff
if session.config.option.developer_mode:
from pyccel.errors.errors import ErrorsMode
ErrorsMode().set_mode('developer')
if github_debugging:
logging.basicConfig()
logging.getLogger("filelock").setLevel(logging.DEBUG)
# Clean path before beginning but never delete anything in parallel mode
path_dir = os.path.dirname(os.path.realpath(__file__))
config = session.config
xdist_plugin = config.pluginmanager.getplugin("xdist")
if xdist_plugin is None:
marks = [m.name for m in session.own_markers ]
if 'parallel' not in marks:
pyccel_clean(path_dir)
| [
"[email protected]"
] | |
41a3917f248cec7eca19c81329335ccd0bd32c96 | 696799b824503429a3ac65ebdc28890bfbcaebe0 | /plugins/com.astra.ses.spell.gui.cots_4.0.2.201806070922/win32/spell/spell/lib/adapter/value.py | 4d84f6d6e65da20cf8515b2f88d5d692597a2fe7 | [] | no_license | CalypsoCubesat/SPELL_GUI_4.0.2_win32_x86 | a176886b48873b090ab270c189113a8b2c261a06 | 9275ecfff2195ca4d4c297f894d80c1bcfa609e3 | refs/heads/master | 2021-08-03T08:04:25.821703 | 2019-10-28T04:53:50 | 2019-10-28T04:53:50 | 217,968,357 | 0 | 0 | null | 2021-08-02T17:03:44 | 2019-10-28T04:50:59 | Python | UTF-8 | Python | false | false | 3,989 | py | ###############################################################################
"""
(c) SES-ASTRA 2008
PACKAGE
spell.lib.adapter.value
FILE
user.py
DESCRIPTION
Variant value helper class
COPYRIGHT
This software is the copyrighted work of SES ASTRA S.A.
All rights reserved.
PROJECT
UGCS/USL
AUTHOR
Rafael Chinchilla Camara (GMV)
DATE
02/10/2007
REVISION HISTORY
02/10/2007 10:30 Creation
"""
###############################################################################
from spell.lang.constants import *
from spell.lang.modifiers import *
###############################################################################
class ValueClass:
"""
This class implements a variant value with the following characteristics:
- value
- vtype (long, double...)
- radix (hex, dec, oct..)
- format (eng, raw)
- units (whatsoever)
"""
#==========================================================================
def __init__(self, value, format = ENG, radix = DEC, vtype = LONG, units = '', defCal = True):
self._value = value
self._vtype = vtype
if type(value)==int:
self._vtype = LONG
elif type(value)==float:
self._vtype = FLOAT
elif type(value)==str:
self._vtype = STRING
self._format = format
self._radix = radix
self._units = units
self._defCal = defCal
#==========================================================================
def set(self, value):
self._value = value
#==========================================================================
def get(self):
return self._value
#==========================================================================
def format(self, fmt = None):
if fmt is None:
return self._format
else:
self._format = fmt
#==========================================================================
def vtype(self, vt = None):
if vt is None:
return self._vtype
else:
self._vtype = vt
#==========================================================================
def radix(self, rd = None):
if rd is None:
return self._radix
else:
self._radix = rd
#==========================================================================
def units(self, u = None):
if u is None:
return self._units
else:
self._units = u
#==========================================================================
def __repr__(self):
return "[" + repr(self._value) + ",VType: " + self._vtype + ",Format: " +\
self._format + ", Radix: " + self._radix + ", Units: " + self._units + "]"
#==========================================================================
def evaluate(self, radix = DEC):
cnv = { DEC: '', HEX: '0x', OCT: '0' }
trns = { HEX: hex, OCT: oct }
res = None
try:
if isinstance(self._value, str):
if self._radix == BIN:
res = 0
for c in self._value:
res = res * 2 + eval(c)
elif self._radix in cnv:
res = eval(cnv[self._radix] + self._value)
elif isinstance(self._value, long) or isinstance(self._value, int) or isinstance(self._value, float):
res = self._value
except:
res = None
if res is None:
return None
if radix in trns:
res = trns[radix](res)
elif radix == BIN:
v = ''
while res > 0:
if res % 2 == 1: v = '1' + v
if res % 2 == 0: v = '0' + v
res >>= 1
res = '0b' + v
return res
| [
"[email protected]"
] | |
7969080b1179beb14ddaf543f8a32366a6d882ae | 253cd5d6074d322a233bda37da4b1c663b6027b3 | /cooking/timestamp/broadcast_utils/user_utils.py | 13c1fc1ea72b1fe54f12287338a2f130e94404fa | [] | no_license | ZandTree/idea_project | 85321156149f9365c6380537d34f05f98e8885ae | e48ea39ef05b54c197b635313fb7b5304bd5691c | refs/heads/main | 2023-08-29T11:12:48.561578 | 2021-11-15T16:18:18 | 2021-11-15T16:18:18 | 417,647,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | def get_ip(req):
"""
if x_forward present return it;
otherwise remote_addr or empty string
"""
try:
forward = req.META.get('HTTP_X_FORWARDED_FOR')
if forward:
return req.META.get('HTTP_X_FORWARDED_FOR', req.META.get('REMOTE_ADDR', '')).split(',')[0].strip()
else:
return req.META.get('REMOTE_ADDR')
except:
return ''
| [
"[email protected]"
] | |
6d73d4509904137b281d8d1e94290549eded70ac | dab869acd10a3dc76e2a924e24b6a4dffe0a875f | /Laban/LabanLib/dumpLabanRecognizer.py | 23f896b8fb16ae97cc5714311bc2eb2e59973fba | [] | no_license | ranBernstein/Laban | d82aff9b0483dd007e03a06e51f7d635f62ed05d | 54c88afa9493deacbdd182904cc5d180ecb208b4 | refs/heads/master | 2021-01-23T13:17:51.777880 | 2017-02-14T09:02:54 | 2017-02-14T09:02:54 | 25,508,010 | 3 | 1 | null | 2017-02-14T09:02:55 | 2014-10-21T07:16:01 | Tcl | UTF-8 | Python | false | false | 382 | py | import pickle
import LabanUtils.util as labanUtil
X = pickle.load( open( "X", "r" ) )
Y_laban = pickle.load( open( "Y_Laban", "r" ) )
labanClf, selectedIndices = labanUtil.getMultiTaskclassifier(X, Y_laban)
f = open('labanClf', 'w')
f.flush()
pickle.dump(labanClf, f)
f.close()
f = open('selectedIndices', 'w')
f.flush()
pickle.dump(selectedIndices, f)
f.close()
| [
"[email protected]"
] | |
eb03fe561672b829d8ba86e36d4ee415da5ad41c | 38258a7dd9acbfb7adf72983015de68a948a4826 | /B_15000~/B_15652.py | cc6422a80ae1ddbd9b06ca5b3cf7a4710db163d2 | [] | no_license | kangsm0903/Algorithm | 13a7fe5729039a1d0ce91a574c4755a8a92fb02b | 7d713d1c9e2e4dc30141d4f409ac1430a357065b | refs/heads/master | 2022-10-04T00:33:49.247977 | 2022-09-26T12:51:16 | 2022-09-26T12:51:16 | 219,265,010 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | # 중복 허용
# 비내림차순
N,M=list(map(int,input().split()))
result=[]
def is_promising():
base=int(result[0])
for i in range(1,len(result)):
if base>int(result[i]): # 비내림차순이 아닐 때
return False
else:
base=int(result[i]) # 비교값을 최신으로 갱신
continue
return True
def BruteForce():
global result
if len(result)==M:
print(' '.join(result))
return
for i in range(1,N+1):
result.append(str(i))
if is_promising():
BruteForce()
result.pop()
BruteForce() | [
"[email protected]"
] | |
0307636f3350b41783f6bc369c9b7562faa04092 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_136/1154.py | d57c56b69272d0a44af0fad344cc5e916a3e8b59 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | infile = open('D:\study\codejam\codejam2014\B-large.in','r')
outfile = open('D:\study\codejam\codejam2014\B-large.out','w')
def main():
T = int(infile.readline())
for case in range(1,T+1):
doCase(case)
infile.close()
outfile.close()
def doCase(case):
c,f,x = [float(x) for x in infile.readline().split()]
outfile.write('Case #'+str(case)+': '+str(check(c,f,x))+'\n')
#print('case #'+str(case)+' '+str(check(c,f,x)))
def check(c,f,x):
rate = 2
time1 = 0
while x/(rate+f)+c/rate < x/rate:
time1 += c/rate
rate += f
time = time1+x/rate
return round(time,7)
| [
"[email protected]"
] | |
5ed81142cd358de803a556ca744bc771369920b6 | 3db7d6e2aea7c47b68776443ba27f9fa68546e35 | /py/h2o_perf.py | 4853b21b06ac9dda9248be8276096289f75a268d | [
"Apache-2.0"
] | permissive | alixaxel/h2o | edb349168d1856ec0f6d2c6c33a4117e2229db24 | 0868c9df624edb3fd7d946dcd7d5092499bf96cc | refs/heads/master | 2021-01-16T22:44:49.012401 | 2013-04-20T22:55:36 | 2013-04-20T22:55:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,673 | py | import logging, psutil
import h2o
import time, os
class PerfH2O(object):
# so a test can create multiple logs
def change_logfile(self, subtest_name):
# change to another logfile after we've already been going
# just want the base name if we pointed to it from somewhere else
short_subtest_name = os.path.basename(subtest_name)
blog = 'benchmark_' + short_subtest_name + '.log'
print "\nSwitch. Now appending to %s." % blog, "Between tests, you may want to delete it if it gets too big"
# http://stackoverflow.com/questions/5296130/restart-logging-to-a-new-file-python
# manually reassign the handler
logger = logging.getLogger()
logger.handlers[0].stream.close()
logger.removeHandler(logger.handlers[0])
file_handler = logging.FileHandler(blog)
file_handler.setLevel(logging.CRITICAL) # like the init
formatter = logging.Formatter("%(asctime)s %(message)s") # date/time stamp
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
def init_logfile(self, subtest_name):
# default should just append thru multiple cloud builds.
# I guess sandbox is cleared on each cloud build. so don't build there.
# just use local directory? (python_test_name global set below before this)
short_subtest_name = os.path.basename(subtest_name)
blog = 'benchmark_' + short_subtest_name + '.log'
self.subtest_name = short_subtest_name
print "\nAppending to %s." % blog, "Between tests, you may want to delete it if it gets too big"
logging.basicConfig(filename=blog,
# we use CRITICAL for the benchmark logging to avoid info/warn stuff
# from other python packages
level=logging.CRITICAL,
format='%(asctime)s %(message)s') # date/time stamp
def __init__(self, python_test_name):
short_python_test_name = os.path.basename(python_test_name)
self.python_test_name = short_python_test_name
self.init_logfile(short_python_test_name)
self.MINCACHETOPRINT = 7
self.JSTACKINTERVAL = 20
self.IOSTATSINTERVAL = 10
# initialize state used for spot rate measurements during polling
statsList = ['read_bytes','write_bytes','read_time','write_time',
'bytes_sent','bytes_recv','dropin','dropout','errin','errout']
self.pollStats = {}
for s in statsList:
self.pollStats[s] = 0
self.pollStats['count'] = 0
self.snapshotTime = time.time()
self.pollStats['lastJstackTime'] = self.snapshotTime
self.pollStats['lastIOstatsTime'] = self.snapshotTime
self.pollStats['time'] = self.snapshotTime
self.elapsedTime = 0
def save(self, cpu_percent=None, dioc=None, nioc=None, jstack=None, iostats=None, snapshotTime=None):
# allow incremental update, or all at once
if cpu_percent:
self.pollStats['cpu_percent'] = cpu_percent
if dioc:
self.pollStats['read_bytes'] = dioc.read_bytes
self.pollStats['write_bytes'] = dioc.write_bytes
self.pollStats['read_time'] = dioc.read_time
self.pollStats['write_time'] = dioc.write_time
if nioc:
self.pollStats['bytes_sent'] = nioc.bytes_sent
self.pollStats['bytes_recv'] = nioc.bytes_recv
# self.pollStats['dropin'] = nioc.dropin
# self.pollStats['dropout'] = nioc.dropout
# self.pollStats['errin'] = nioc.errin
# self.pollStats['errout'] = nioc.errout
if jstack:
self.pollStats['lastJstackTime'] = self.snapshotTime
if iostats:
self.pollStats['lastIOstatsTime'] = self.snapshotTime
# this guy is the 'final'
if snapshotTime:
self.pollStats['time'] = self.snapshotTime
self.pollStats['count'] += 1
# just log a message..useful for splitting tests of files
def message(self, l):
logging.critical(l)
def log_jstack(self, initOnly=False):
# only do jstack if >= JSTACKINTERVAL seconds since lastLine one
if ((self.snapshotTime - self.pollStats['lastJstackTime']) < self.JSTACKINTERVAL):
return
# complicated because it's all one big string
# and lots of info we don't want.
jstackResult = h2o.nodes[0].jstack()
node0 = jstackResult['nodes'][0]
stack_traces = node0["stack_traces"]
# all one string
stackLines = stack_traces.split('\n')
# create cache
def init_cache(self):
self.cache = []
self.cacheHasJstack = False
self.cacheHasTCP = False
def log_and_init_cache(self):
if self.cacheHasTCP or (not self.cacheHasJstack and len(self.cache) >= self.MINCACHETOPRINT):
for c in self.cache:
logging.critical(c)
init_cache(self)
init_cache(self)
# pretend to start at stack trace break
lastLine = ""
for s in stackLines:
# look for gaps, if 7 lines in your cache, print them
if (lastLine==""):
log_and_init_cache(self)
else:
# put a nice "#" char for grepping out jstack stuff
self.cache.append("#" + s)
# always throw it away later if JStack cache
if 'JStack' in s:
self.cacheHasJstack = True
# always print it if it mentions TCP
if 'TCP' in s:
self.cacheHasTCP = True
lastLine = s
# check last one
log_and_init_cache(self)
self.pollStats['lastJstackTime'] = self.snapshotTime
self.save(jstack=True)
def log_cpu(self, snapShotTime, initOnly=False):
cpu_percent = psutil.cpu_percent(percpu=True)
l = "%s %s" % ("cpu_percent:", cpu_percent)
if not initOnly:
logging.critical(l)
self.save(cpu_percent=cpu_percent)
def log_disk(self, initOnly=False):
dioc = psutil.disk_io_counters()
diocSpotRdMBSec = (dioc.read_bytes - self.pollStats['read_bytes']) / (1e6 * self.elapsedTime)
diocSpotWrMBSec = (dioc.write_bytes - self.pollStats['write_bytes']) / (1e6 * self.elapsedTime)
diocSpotRdTime = (dioc.read_time - self.pollStats['read_time']) / 1e3
diocSpotWrTime = (dioc.write_time - self.pollStats['write_time']) / 1e3
l = "Disk. Spot RdMB/s: {:>6.2f} Spot WrMB/s: {:>6.2f} {!s} {!s} elapsed: {:<6.2f}".format(
diocSpotRdMBSec, diocSpotWrMBSec, diocSpotRdTime, diocSpotWrTime, self.elapsedTime)
if not initOnly:
logging.critical(l)
self.save(dioc=dioc)
def log_network(self, initOnly=False):
nioc = psutil.network_io_counters()
niocSpotSentMBSec = (nioc.bytes_sent - self.pollStats['bytes_sent'])/(1e6 * self.elapsedTime)
niocSpotRecvMBSec = (nioc.bytes_recv - self.pollStats['bytes_recv'])/(1e6 * self.elapsedTime)
# niocSpotDropIn = nioc.dropin - self.pollStats['dropin']
# niocSpotDropOut = nioc.dropout - self.pollStats['dropout']
# niocSpotErrIn = nioc.errin - self.pollStats['errin']
# niocSpotErrOut = nioc.errout - self.pollStats['errout']
# stuff doesn't exist on ec2?
niocSpotDropIn = 0
niocSpotDropOut = 0
niocSpotErrIn = 0
niocSpotErrOut = 0
l = "Network. Spot RecvMB/s: {:>6.2f} Spot SentMB/s: {:>6.2f} {!s} {!s} {!s} {!s}".format(
niocSpotRecvMBSec, niocSpotSentMBSec,\
niocSpotDropIn, niocSpotDropOut, niocSpotErrIn, niocSpotErrOut)
if not initOnly:
logging.critical(l)
self.save(nioc=nioc)
def log_iostats(self, initOnly=False):
if ((self.snapshotTime - self.pollStats['lastJstackTime']) < self.IOSTATSINTERVAL):
return
DO_IO_RW = True
DO_IOP = False
node = h2o.nodes[0]
stats = node.iostatus()
### h2o.verboseprint("log_iostats:", h2o.dump_json(stats))
histogram = stats['histogram']
def log_window(w):
if k['window'] == w:
i_o = k['i_o']
node = k['cloud_node_idx']
if k['r_w'] == 'read':
r_w = 'rd'
elif k['r_w'] == 'write':
r_w = 'wr'
else:
r_w = k['r_w']
for l,v in k.iteritems():
fmt = "iostats: window{:<2d} node {:d} {:s} {:s} {:s} MB/sec: {:6.2f}"
if 'peak' in l:
## logging.critical(fmt.format(w, node, i_o, r_w, "peak", (v/1e6)))
pass
if 'effective' in l:
logging.critical(fmt.format(w, node, i_o, r_w, "eff.", (v/1e6)))
if DO_IO_RW:
print "\nlog_iotstats probing node:", str(node.addr) + ":" + str(node.port)
for k in histogram:
### print k
log_window(10)
### log_window(30)
# we want to sort the results before we print them, so grouped by node
if DO_IOP:
iopList = []
raw_iops = stats['raw_iops']
### print
for k in raw_iops:
### print k
node = k['node']
i_o = k['i_o']
r_w = k['r_w']
size = k['size_bytes']
blocked = k['blocked_ns']
duration = k['duration_ms'] * 1e6 # convert to ns
if duration != 0:
blockedPct = "%.2f" % (100 * blocked/duration) + "%"
else:
blockedPct = "no duration"
iopMsg = "node: %s %s %s %d bytes. blocked: %s" % (node, i_o, r_w, size, blockedPct)
iopList.append([node, iopMsg])
iopList.sort(key=lambda iop: iop[0]) # sort by node
totalSockets = len(iopList)
# something wrong if 0?
if totalSockets == 0:
print "WARNING: is something wrong with this io stats response?"
print h2o.dump_json(stats)
logging.critical("iostats: " + "Total sockets: " + str(totalSockets))
for i in iopList:
logging.critical("iostats:" + i[1])
# don't save anything
self.save(iostats=True)
# call with init?
def get_log_save(self, benchmarkLogging=None, initOnly=False):
if not benchmarkLogging:
return
self.snapshotTime = time.time()
self.elapsedTime = self.snapshotTime - self.pollStats['time']
logEnable = {
'cpu': False,
'disk': False,
'network': False,
'jstack': False,
'iostats': False,
}
for e in benchmarkLogging:
logEnable[e] = True
if logEnable['jstack']:
self.log_jstack(initOnly=initOnly)
if logEnable['cpu']:
self.log_cpu(initOnly)
if logEnable['iostats']:
self.log_iostats(initOnly=initOnly)
# these do delta stats. force init if no delta possible
forceInit = self.pollStats['count'] == 0
if logEnable['disk']:
self.log_disk(initOnly=initOnly or forceInit)
if logEnable['network']:
self.log_network(initOnly=initOnly or forceInit)
# done!
self.save(snapshotTime=True)
| [
"[email protected]"
] | |
ab39ec8dc7ed3dc0a971ff1d720fcf1da8835483 | 5a01497e7c29e2488b6a4cb0478405239375eb66 | /apetools/commons/broadcaster.py | c2cb2070a7ee15ecdd67b7b8e8a1da9bc821e7bf | [
"Apache-2.0"
] | permissive | russell-n/oldape | 8b4d9e996181dc1c7175f72d75c6193443da591b | b4d1c77e1d611fe2b30768b42bdc7493afb0ea95 | refs/heads/master | 2021-05-30T20:02:18.895922 | 2016-03-27T04:38:18 | 2016-03-27T04:38:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,651 | py |
from apetools.baseclass import BaseClass
class Broadcaster(BaseClass):
"""
A broadcaster sends a single datum to multiple targets
"""
def __init__(self, receivers):
"""
:param:
- `receivers`: an iterable of callable receivers
"""
super(Broadcaster, self).__init__()
self._receivers = None
self.receivers = receivers
self._temp_receivers = None
return
@property
def receivers(self):
"""
:return: receivers of broadcast
"""
return self._receivers
@receivers.setter
def receivers(self, new_receivers):
"""
:param:
- `new_receivers`: iterable of callable receivers (or single receiver)
"""
try:
self._receivers = [receiver for receiver in new_receivers]
except TypeError as error:
self._receivers = [new_receivers]
self.logger.debug(error)
return
@property
def temp_receivers(self):
"""
:return: iterable of receivers to remove at next set-up
"""
if self._temp_receivers is None:
self._temp_receivers = []
return self._temp_receivers
@temp_receivers.setter
def temp_receivers(self, new_receivers):
"""
:param:
- `new_receivers`: iterable of callable receivers (or single receiver)
"""
try:
self._temp_receivers = [receiver for receiver in new_receivers]
except TypeError as error:
self._temp_receivers = [new_receivers]
self.logger.debug(error)
return
def subscribe(self, receiver):
"""
Adds a new receiver to the receivers (if it isn't already there)
"""
if receiver not in self.receivers:
self.logger.debug("subscribing {0}".format(receiver))
self.receivers.append(receiver)
return
def unsubscribe(self, receiver):
"""
:param:
- `receiver`: a receiver object to remove
"""
self._receivers = [r for r in self._receivers if r is not receiver]
return
def set_up(self, targets=None):
"""
The targets are removed the next time this is called.
:param:
- `targets`: a set of temporary targets
:postcondition: reset method for each permanent receiver called
"""
self._temp_receivers = None
if targets is not None:
self.temp_receivers = targets
for receiver in self.receivers:
try:
receiver.reset()
except AttributeError as error:
self.logger.debug(error)
self.logger.debug("Unable to reset {0}".format(receiver))
return
def reset(self):
"""
:postcondition: self.receivers is None
"""
self._receivers = None
return
def __contains__(self, receiver):
"""
:param:
- `receiver`: an object
:rtype: Boolean
:return: True if item in receivers
"""
return receiver in self.receivers
def __iter__(self):
"""
:return: iterator over self.receivers
"""
return iter(self.receivers)
def __call__(self, datum):
"""
Calls each receiver with the `datum`
:param:
- `datum`: A single data item
"""
for receiver in self.receivers:
receiver(datum)
return
# end class Broadcaster
| [
"[email protected]"
] | |
30119e16f12f09d9fa55d967a0bb62f049303183 | 2f5ab43956b947b836e8377370d786e5ee16e4b0 | /sklearn2code/sym/test/test_function.py | c0ae2740f50a0419fbe09bbe835b40e8516be96a | [
"MIT"
] | permissive | modusdatascience/sklearn2code | b175fb268fa2871c95f0e319f3cd35dd54561de9 | 3ab82d82aa89b18b18ff77a49d0a524f069d24b9 | refs/heads/master | 2022-09-11T06:16:37.604407 | 2022-08-24T04:43:59 | 2022-08-24T04:43:59 | 115,747,326 | 4 | 2 | MIT | 2018-05-01T00:11:51 | 2017-12-29T19:05:03 | Python | UTF-8 | Python | false | false | 3,495 | py | from sklearn2code.sym.function import Function
from nose.tools import assert_list_equal, assert_equal
from operator import __add__, __mul__, __sub__
from six import PY3
from sklearn2code.sym.expression import RealVariable, RealNumber
def test_map_symbols():
fun0 = Function(('x', 'y'), tuple(), (RealVariable('x') + RealVariable('y'),))
fun = Function(('x', 'y'), (((('z',), (fun0, ('x','y')))),), (RealVariable('x') / RealVariable('z'),))
mapped_fun = fun.map_symbols({'x': 'q'})
assert_list_equal(list(mapped_fun.inputs), list(map(RealVariable, ('q', 'y'))))
assert_equal(set(mapped_fun.calls[0][1][1]), set(map(RealVariable, ('q', 'y'))))
assert_equal(mapped_fun.outputs[0], RealVariable('q') / RealVariable('z'))
def test_compose():
fun0 = Function('x', tuple(), (RealVariable('x'), RealNumber(1) - RealVariable('x')))
fun = Function(('x', 'y'), tuple(), (RealVariable('x') / RealVariable('y'),))
composed_fun = fun.compose(fun0)
assert_equal(composed_fun.calls[0][1][0], fun0)
assert_equal(composed_fun.inputs, fun0.inputs)
assert_equal(fun.outputs, composed_fun.map_output_symbols(dict(zip(composed_fun.calls[0][0], fun.inputs))))
def test_from_expressions():
fun = Function.from_expressions((RealVariable('x'), RealVariable('x') + RealVariable('y')))
assert_equal(fun, Function(('x', 'y'), tuple(), (RealVariable('x'), RealVariable('x') + RealVariable('y'))))
def test_trim():
fun0 = Function('x', ((('u',), (Function.from_expression(RealVariable('x0') + RealVariable('x1')), ('x', 'x'))),),
(RealVariable('u'), RealNumber(1) - RealVariable('x')))
fun = Function(('x', 'y'), ((('z','w'), (fun0, ('y',))),), (RealVariable('x') / RealVariable('w'),)).trim()
assert_equal(fun.inputs, (RealVariable('x'), RealVariable('y')))
assert_equal(fun.outputs, (RealVariable('x') / RealVariable('w'),))
assert_equal(fun.calls, (((RealVariable('w'),), (Function(('x', ), tuple(), (RealNumber(1)-RealVariable('x'),)), (RealVariable('y'),))),))
class TestOps(object):
pass
def add_op(op):
def test_op(self):
fun0 = Function(('x', 'y'), tuple(), (RealVariable('x') + RealVariable('y'),))
fun = Function(('x', 'y'), (((('z',), (fun0, ('x','y')))),), (RealVariable('x') / RealVariable('z'),))
fun_op_two = op(fun, RealNumber(2))
assert_equal(fun_op_two.outputs[0], op(RealVariable('x') / RealVariable('z'), RealNumber(2)))
two_op_fun = op(RealNumber(2), fun)
assert_equal(two_op_fun.outputs[0], op(RealNumber(2), RealVariable('x') / RealVariable('z')))
fun_op_fun = op(fun, fun)
assert_equal(fun_op_fun.outputs[0], op(RealVariable('x') / RealVariable('z'), RealVariable('x') / RealVariable('z')))
assert_equal(fun_op_fun.inputs, fun.inputs)
assert_equal(fun_op_fun.calls, fun.calls)
test_name = 'test_%s' % op.__name__.strip('__')
test_op.__name__ = test_name
setattr(TestOps, test_name, test_op)
add_op(__add__)
add_op(__mul__)
add_op(__sub__)
if PY3:
from operator import __truediv__ # @UnresolvedImport
add_op(__truediv__)
else:
from operator import __div__ # @UnresolvedImport
add_op(__div__)
if __name__ == '__main__':
# This code will run the test in this file.'
import sys
import nose
module_name = sys.modules[__name__].__file__
result = nose.run(argv=[sys.argv[0],
module_name,
'-s', '-v'])
| [
"[email protected]"
] | |
f0ddcfc1386615bfe664efdcc8da103a73ee296d | 05cde6f12d23eb67258b5a21d4fb0c783bcafbe5 | /almebic/models/engine/db_engine.py | 1078b54006ced768772dd3efb6b54c9b7762b300 | [] | no_license | alejolo311/DataInMotion | f5aff692bcaf9a795969951146f6ab7dc6557b08 | 75014600785f9d7f8a4771a9bb24e322e812d08f | refs/heads/master | 2023-05-13T00:57:41.407175 | 2020-07-26T00:51:49 | 2020-07-26T00:51:49 | 267,895,607 | 3 | 2 | null | 2023-05-01T21:26:16 | 2020-05-29T15:46:04 | CSS | UTF-8 | Python | false | false | 2,083 | py | #!/usr/bin/python3
"""
Controls the ORM transactions using postgres db
"""
from models.base import BaseNode, Base
from models.user import User
from models.board import Board
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
class DBEngine:
__engine = None
__session = None
def __init__(self):
"""
Creates the engine object using environment variables
"""
user = 'data_im_dev'
password = 'dim_passwd'
host = '172.21.0.2'
db = 'data_im_dev_db'
self.__engine = create_engine('postgres://{}:{}@{}:5432/{}'.format(
user, password, host, db
))
def reload(self):
"""
Creates the Models based on metadata
"""
try:
Base.metadata.create_all(self.__engine)
sess_factory = sessionmaker(bind=self.__engine,
expire_on_commit=False)
Session = scoped_session(sess_factory)
self.__session = Session
except Exception as e:
print(e)
def all(self, cls=None):
"""
Returns all record, or all by class
"""
newdict = {}
objs = self.__session.query(cls).all()
for obj in objs:
key = obj.__class__.__name__ + '.' + obj.id
newdict[key] = obj
return (newdict)
def new(self, obj):
"""
Creates a new object
"""
self.__session.add(obj)
def save(self):
"""
Saves changes in session
"""
self.__session.commit()
def close(self):
"""
Remove the private session
"""
self.__session.remove()
def get(self, cls, id):
"""
Resturn a record by class and id
"""
objs = self.all(cls)
for obj in objs.values():
if obj.id == id:
return obj
return None
def delete(self, obj):
"""
Deletes a record
"""
self.__session.delete(obj)
| [
"[email protected]"
] | |
35fe9e8d12cff46a0e0ea7b51843e2426507bb4a | 59e87634c67508bf7eba8c8b9845354aefa57bc7 | /ML/naiveBayes/bayes-titanic.py | b9caec4be2a8acf3fb164902e7017e85f90efa1c | [] | no_license | Caohengrui/MLAndDL | 48729b94b2232e628b699cf8d0d4a6c6e81a36f5 | d0637f58f45e9c091cd90bbfe9c207223d0994f3 | refs/heads/master | 2023-03-16T01:06:03.316463 | 2020-04-14T07:44:15 | 2020-04-14T07:44:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,446 | py | """
Author:wucng
Time: 20200110
Summary: 朴素贝叶斯对titanic数据分类
源代码: https://github.com/wucng/MLAndDL
参考:https://cuijiahua.com/blog/2017/11/ml_4_bayes_1.html
"""
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler,MinMaxScaler
# from sklearn.neighbors import KNeighborsRegressor,KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score,auc
import pandas as pd
import numpy as np
from functools import reduce
from collections import Counter
import pickle,os,time
# 1.加载数据集(并做预处理)
def loadData(dataPath: str) -> tuple:
# 如果有标题可以省略header,names ;sep 为数据分割符
df = pd.read_csv(dataPath, sep=",")
# 填充缺失值
df["Age"] = df["Age"].fillna(df["Age"].median())
df['Embarked'] = df['Embarked'].fillna('S')
# df = df.fillna(0)
# 数据量化
# 文本量化
df.replace("male", 0, inplace=True)
df.replace("female", 1, inplace=True)
df.loc[df["Embarked"] == "S", "Embarked"] = 0
df.loc[df["Embarked"] == "C", "Embarked"] = 1
df.loc[df["Embarked"] == "Q", "Embarked"] = 2
# 划分出特征数据与标签数据
X = df.drop(["PassengerId","Survived","Name","Ticket","Cabin"], axis=1) # 特征数据
y = df.Survived # or df["Survived"] # 标签数据
# 数据归一化
X = (X - np.min(X, axis=0)) / (np.max(X, axis=0) - np.min(X, axis=0))
# 使用sklearn方式
# X = MinMaxScaler().transform(X)
# 查看df信息
# df.info()
# df.describe()
return (X.to_numpy(), y.to_numpy())
class NaiveBayesClassifier(object):
def __init__(self,save_file="model.ckpt"):
self.save_file = save_file
def fit(self,X:np.array,y:np.array):
if not os.path.exists(self.save_file):
# 计算分成每个类别的概率值
dict_y = dict(Counter(y))
dict_y = {k:v/len(y) for k,v in dict_y.items()}
# 计算每维特征每个特征值发生概率值
unique_label = list(set(y))
dict_feature_value={} # 每个特征每个值对应的概率
for col in range(len(X[0])):
data = X[...,col] # 每列特征
unique_val = list(set(data))
for val in unique_val:
dict_feature_value[str(col)+"_"+str(val)] = np.sum(data==val)/len(data)
dict_feature_value_label = {} # 每个类别发生对应的每个特征每个值的概率
for label in unique_label:
datas = X[y==label]
for col in range(len(datas[0])):
data = datas[..., col] # 每列特征
unique_val = list(set(data))
for val in unique_val:
dict_feature_value_label[str(label)+"_"+str(col)+"_"+str(val)]=np.sum(data==val)/len(data)
# save
result={"dict_y":dict_y,"dict_feature_value":dict_feature_value,
"dict_feature_value_label":dict_feature_value_label}
pickle.dump(result,open(self.save_file,"wb"))
# return dict_y,dict_feature_value,dict_feature_value_label
def __predict(self,X:np.array):
data = pickle.load(open(self.save_file,"rb"))
dict_y, dict_feature_value, dict_feature_value_label = data["dict_y"],data["dict_feature_value"],\
data["dict_feature_value_label"]
labels = sorted(list(dict_y.keys()))
# 计算每条数据分成每个类别的概率值
preds = np.zeros([len(X),len(labels)])
for i,x in enumerate(X):
for j,label in enumerate(labels):
p1 = 1
p2 = 1
for col,val in enumerate(x):
p1*= dict_feature_value_label[str(label)+"_"+str(col)+"_"+str(val)] if str(label)+"_"+str(col)+"_"+str(val) \
in dict_feature_value_label else self.__weighted_average(str(label)+"_"+str(col)+"_"+str(val),dict_feature_value_label) # self.__fixed_value()
p2*= dict_feature_value[str(col)+"_"+str(val)] if str(col)+"_"+str(val) in dict_feature_value else \
self.__weighted_average(str(col)+"_"+str(val),dict_feature_value) # self.__fixed_value()
preds[i,j] = p1*dict_y[label]/p2
return preds
def __fixed_value(self):
return 1e-3
def __weighted_average(self,key:str,data_dict:dict):
"""插值方式找到离该key对应的最近的data_dict中的key做距离加权平均"""
tmp = key.split("_")
value = float(tmp[-1])
if len(tmp)==3:
tmp_key = tmp[0]+"_"+tmp[1]+"_"
else:
tmp_key = tmp[0] + "_"
# 找到相关的key
# related_keys = []
values = [value]
for k in list(data_dict.keys()):
if tmp_key in k:
# related_keys.append(k)
values.append(float(k.split("_")[-1]))
# 做距离加权
values = sorted(values)
index = values.index(value)
# 取其前一个和后一个做插值
last = max(0,index-1)
next = min(index+1,len(values)-1)
if index==last or index==next:
return self.__fixed_value()
else:
d1=abs(values[last] - value)
d2=abs(values[next] - value)
v1 = data_dict[tmp_key+str(values[last])]
v2 = data_dict[tmp_key+str(values[next])]
# 距离加权 y=e^(-x)
return (np.log(d1)*v1+np.log(d2)*v2)/(np.log(d1)+np.log(d2))
def predict_proba(self,X:np.array):
return self.__predict(X)
def predict(self,X:np.array):
return np.argmax(self.__predict(X),-1)
def accuracy(self,y_true:np.array,y_pred:np.array)->float:
return round(np.sum(y_pred==y_true)/len(y_pred),5)
if __name__=="__main__":
dataPath = "../../dataset/titannic/train.csv"
X, y = loadData(dataPath)
# 划分训练集与测试集
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=40)
start = time.time()
clf = NaiveBayesClassifier()
clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
print("cost time:%.6f(s) acc:%.3f"%(time.time()-start,clf.accuracy(y_test,y_pred)))
# cost time:0.089734(s) acc:0.771
# 使用sklearn 的GaussianNB
start = time.time()
clf = GaussianNB()
clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
print("cost time:%.6f(s) acc:%.3f" % (time.time() - start, accuracy_score(y_test, y_pred)))
# cost time:0.001023(s) acc:0.810
# 使用sklearn 的DecisionTreeClassifier
start = time.time()
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print("cost time:%.6f(s) acc:%.3f" % (time.time() - start, accuracy_score(y_test, y_pred)))
# cost time:0.008215(s) acc:0.816
# 使用sklearn 的RandomForestClassifier
start = time.time()
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print("cost time:%.6f(s) acc:%.3f" % (time.time() - start, accuracy_score(y_test, y_pred)))
# cost time:0.018951(s) acc:0.782 | [
"[email protected]"
] | |
38a90854558605e5a014f7e6272b4f3c11060c65 | 265a07a2becd232b292872d1d7136789463874be | /lianxi代码/erchashu.py | 5543da1004e52bdcd18148677402156b24dcc306 | [] | no_license | Lz0224/Python-exercise | f4918b8cd5f7911f0c35c0458c2269959937d07d | 3d09f54aebc653f4a5b36765b25c7241e3960764 | refs/heads/master | 2020-12-24T22:20:55.573019 | 2017-08-11T07:18:16 | 2017-08-11T07:18:16 | 100,005,776 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,647 | py | #!/usr/bin/python
#coding=utf-8
'''
created bu zwg in 2017-7-8
'''
import copy
class node(object):
def __init__(self, name, data):
self.data = data
self.name = name
self.Rchild = None
self.Lchild = None
self.child_number = 0
self.parent = None
def add_Rchild(self, node):
if self.Rchild is not None:
self.Rchild = node
else:
self.Rchild = node
self.child_number += 1
node.set_parent(self)
def drop_Rchild(self):
self.Rchild = None
self.child_number -= 1
def set_parent(self, node):
self.parent = node
def add_Lchild(self, node):
if self.Lchild is not None:
self.Lchild = node
else:
self.Lchild = node
self.child_number += 1
node.set_parent(self)
def drop_Lchild(self):
self.Lchild = None
self.child_number -= 1
class tree(object):
def __init__(self, node):
self.parent = node
self.depth = 1
self.all_node =用递归访问子节 {node.name:node}
self.enable_node = {node.name:node}
c1 = node.Rchild
c2 = node.Lchild
C = [c1, c2]
B = [i for i in C if i is not None]
if len(B) == 2:
del self.enable_node[node.name]
while len(B) != 0:
self.depth += 1
C = copy.copy(B)
for i in B:
C.remove(i)
if i.Rchild is not None:
C.append(i.Rchild)
if i.Lchild is not None:
C.append(i.Lchild)
| [
"[email protected]"
] | |
b8e02a80dd4ae30959b434085ed27933a2f54964 | ae3d0e3c2fb614d96f6c787583c6e2e4cb654ad4 | /leetcode/89. 格雷编码.py | 6efb740a93fba7e0c11adf3290a8e415330f35cf | [] | no_license | Cjz-Y/shuati | 877c3f162ff75f764aa514076caccad1b6b43638 | 9ab35dbffed7865e41b437b026f2268d133357be | refs/heads/master | 2023-02-02T10:34:05.705945 | 2020-12-14T01:41:39 | 2020-12-14T01:41:39 | 276,884,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 921 | py | from typing import List
class Solution:
def grayCode(self, n: int) -> List[int]:
if n == 0:
return [0]
current = '0' * n
ans = [current]
use = set()
use.add(current)
while current:
next = None
sl = list(current)
for i in range(len(current)):
if sl[i] == '0':
sl[i] = '1'
else:
sl[i] = '0'
temp = ''.join(sl)
if temp not in use:
use.add(temp)
next = temp
ans.append(temp)
break
else:
if sl[i] == '0':
sl[i] = '1'
else:
sl[i] = '0'
current = next
ans = [int(item, 2) for item in ans]
return ans
| [
"[email protected]"
] | |
0b573c7d0218cd57688f0d50721997333fe6315d | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-cbn/aliyunsdkcbn/request/v20170912/CreateTransitRouteTableAggregationRequest.py | 33fc8ff3c8ce5418023c4de179e733b508294cf5 | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 4,095 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcbn.endpoint import endpoint_data
class CreateTransitRouteTableAggregationRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cbn', '2017-09-12', 'CreateTransitRouteTableAggregation')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_TransitRouteTableAggregationDescription(self): # String
return self.get_query_params().get('TransitRouteTableAggregationDescription')
def set_TransitRouteTableAggregationDescription(self, TransitRouteTableAggregationDescription): # String
self.add_query_param('TransitRouteTableAggregationDescription', TransitRouteTableAggregationDescription)
def get_TransitRouteTableAggregationName(self): # String
return self.get_query_params().get('TransitRouteTableAggregationName')
def set_TransitRouteTableAggregationName(self, TransitRouteTableAggregationName): # String
self.add_query_param('TransitRouteTableAggregationName', TransitRouteTableAggregationName)
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_TransitRouteTableAggregationScope(self): # String
return self.get_query_params().get('TransitRouteTableAggregationScope')
def set_TransitRouteTableAggregationScope(self, TransitRouteTableAggregationScope): # String
self.add_query_param('TransitRouteTableAggregationScope', TransitRouteTableAggregationScope)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_TransitRouteTableId(self): # String
return self.get_query_params().get('TransitRouteTableId')
def set_TransitRouteTableId(self, TransitRouteTableId): # String
self.add_query_param('TransitRouteTableId', TransitRouteTableId)
def get_TransitRouteTableAggregationCidr(self): # String
return self.get_query_params().get('TransitRouteTableAggregationCidr')
def set_TransitRouteTableAggregationCidr(self, TransitRouteTableAggregationCidr): # String
self.add_query_param('TransitRouteTableAggregationCidr', TransitRouteTableAggregationCidr)
| [
"[email protected]"
] | |
38038a954baf0435cc4b0471fb337429e94d0cc5 | 50a39c462fac7e889f6257cc2c3e3c84986e4bb2 | /RANSAC_example.py | b25e7be454f49d1f4943e0a29be496fb6270413e | [] | no_license | chickenbestlover/MSDN2 | 2b16f70eb58bcc67893ec65ed1a58db3f0dd79a9 | 58a0c6aa8e8e8953572567145ffecd5b10bdfb5a | refs/heads/master | 2020-03-25T07:37:54.873304 | 2018-11-05T05:46:02 | 2018-11-05T05:46:02 | 143,572,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,701 | py | import numpy as np
import pcl
points = np.array([[ 594.6663 , -1617.9456 , -797.36224],
[ 600.5656 , -1638.1005 , -806.5441 ],
[ 599.16583, -1638.3135 , -805.9235 ],
[ 597.76605, -1638.5264 , -805.3029 ],
[ 596.36633, -1638.7394 , -804.6823 ],
[ 594.96655, -1638.9523 , -804.06165],
[ 593.5668 , -1639.1652 , -803.44104],
[ 592.16705, -1639.3782 , -802.82043],
[ 590.7673 , -1639.5911 , -802.1998 ],
[ 589.36755, -1639.804 , -801.5792 ],
[ 587.9678 , -1640.017 , -800.9586 ],
[ 586.568 , -1640.2299 , -800.338 ],
[ 595.3116 , -1618.2574 , -796.01373],
[ 601.2189 , -1638.4161 , -805.1791 ],
[ 599.8191 , -1638.6292 , -804.5585 ],
[ 598.4193 , -1638.842 , -803.9378 ],
[ 597.0196 , -1639.055 , -803.3172 ],
[ 595.6198 , -1639.268 , -802.6966 ],
[ 594.22003, -1639.4808 , -802.076 ],
[ 592.8203 , -1639.6938 , -801.4554 ],
[ 591.42053, -1639.9067 , -800.8348 ],
[ 590.0208 , -1640.1196 , -800.2142 ],
[ 588.62103, -1640.3326 , -799.5935 ],
[ 587.22125, -1640.5455 , -798.9729 ],
[ 595.9569 , -1618.5692 , -794.6653 ],
[ 598.22314, -1628.7557 , -798.9331 ],
[ 596.8319 , -1628.9674 , -798.3162 ],
[ 599.0726 , -1639.1577 , -802.57275],
[ 597.67285, -1639.3707 , -801.95215],
[ 596.2731 , -1639.5836 , -801.33154],
[ 594.8733 , -1639.7965 , -800.71094],
[ 593.4736 , -1640.0095 , -800.0903 ],
[ 592.0738 , -1640.2224 , -799.46967],
[ 590.674 , -1640.4353 , -798.84906],
[ 589.2743 , -1640.6483 , -798.22845],
[ 584.31067, -1630.8721 , -792.7647 ],
[ 600.2637 , -1628.8579 , -798.1932 ],
[ 598.87244, -1629.0695 , -797.5763 ],
[ 597.4812 , -1629.2811 , -796.9595 ],
[ 599.7258 , -1639.4734 , -801.2077 ],
[ 598.3261 , -1639.6864 , -800.5871 ],
[ 596.92633, -1639.8993 , -799.96643],
[ 595.52655, -1640.1122 , -799.3458 ],
[ 594.12683, -1640.3252 , -798.7252 ],
[ 592.72705, -1640.5381 , -798.1046 ],
[ 587.74243, -1630.7626 , -792.6416 ],
[ 586.3512 , -1630.9742 , -792.0248 ],
[ 584.95996, -1631.1859 , -791.4079 ],
[ 600.91296, -1629.1716 , -796.83636],
[ 599.5217 , -1629.3833 , -796.21954],
[ 598.1305 , -1629.5948 , -795.6027 ],
[ 596.7392 , -1629.8065 , -794.98584],
[ 595.34796, -1630.0182 , -794.369 ],
[ 593.9567 , -1630.2299 , -793.7522 ],
[ 592.5655 , -1630.4414 , -793.1353 ],
[ 591.17426, -1630.6531 , -792.5185 ],
[ 589.78296, -1630.8647 , -791.9017 ],
[ 588.3917 , -1631.0764 , -791.28485],
[ 587.0005 , -1631.288 , -790.66797],
[ 585.60925, -1631.4996 , -790.05115],
[ 601.56226, -1629.4854 , -795.4796 ],
[ 600.171 , -1629.697 , -794.8628 ],
[ 598.7797 , -1629.9087 , -794.2459 ],
[ 597.3885 , -1630.1202 , -793.6291 ],
[ 595.99725, -1630.3319 , -793.01227],
[ 594.606 , -1630.5436 , -792.3954 ],
[ 593.2148 , -1630.7552 , -791.77856],
[ 591.82355, -1630.9668 , -791.16174],
[ 590.43225, -1631.1785 , -790.54486],
[ 589.041 , -1631.3901 , -789.92804],
[ 587.6498 , -1631.6018 , -789.3112 ],
[ 586.25854, -1631.8134 , -788.6944 ],
[ 598.53815, -1619.8167 , -789.27136],
[ 597.15546, -1620.027 , -788.6583 ],
[ 595.7727 , -1620.2373 , -788.0452 ],
[ 598.0378 , -1630.4341 , -792.27234],
[ 596.64655, -1630.6456 , -791.65546],
[ 595.2553 , -1630.8573 , -791.03864],
[ 593.8641 , -1631.069 , -790.4218 ],
[ 592.4728 , -1631.2806 , -789.80493],
[ 591.08154, -1631.4922 , -789.1881 ],
[ 589.6903 , -1631.7039 , -788.5713 ],
[ 588.2991 , -1631.9155 , -787.9544 ],
[ 583.32806, -1622.1304 , -782.52765],
[ 599.1835 , -1620.1284 , -787.9229 ],
[ 597.8007 , -1620.3387 , -787.3098 ],
[ 596.418 , -1620.5491 , -786.6968 ],
[ 598.6871 , -1630.7478 , -790.9155 ],
[ 597.29584, -1630.9595 , -790.2987 ],
[ 595.9046 , -1631.171 , -789.6819 ],
[ 594.5133 , -1631.3827 , -789.065 ],
[ 593.1221 , -1631.5944 , -788.4482 ],
[ 588.1216 , -1621.8112 , -783.0184 ],
[ 586.7389 , -1622.0215 , -782.40533],
[ 585.35614, -1622.2319 , -781.79224],
[ 583.9734 , -1622.4423 , -781.1792 ],
[ 599.8288 , -1620.4403 , -786.57446],
[ 598.44604, -1620.6506 , -785.96136],
[ 597.0633 , -1620.861 , -785.3483 ],
[ 595.6806 , -1621.0713 , -784.7352 ],
[ 594.29785, -1621.2816 , -784.1222 ],
[ 592.9151 , -1621.492 , -783.5091 ],
[ 591.5324 , -1621.7024 , -782.89606],
[ 590.14966, -1621.9127 , -782.28296],
[ 588.7669 , -1622.123 , -781.6699 ],
[ 587.38416, -1622.3334 , -781.0568 ],
[ 586.00146, -1622.5437 , -780.4438 ],
[ 584.6187 , -1622.754 , -779.8307 ],
[ 600.4741 , -1620.7521 , -785.22595],
[ 599.0914 , -1620.9624 , -784.6129 ],
[ 597.7086 , -1621.1729 , -783.9998 ],
[ 596.3259 , -1621.3832 , -783.3868 ],
[ 594.9432 , -1621.5935 , -782.7737 ],
[ 593.5604 , -1621.8038 , -782.16064],
[ 592.1777 , -1622.0142 , -781.54755],
[ 590.795 , -1622.2245 , -780.9345 ],
[ 589.41223, -1622.4348 , -780.3214 ],
[ 588.0295 , -1622.6453 , -779.7084 ],
[ 586.6467 , -1622.8556 , -779.0953 ],
[ 585.26404, -1623.0659 , -778.48224],
[ 601.11945, -1621.064 , -783.8775 ],
[ 599.7367 , -1621.2743 , -783.2644 ],
[ 598.35394, -1621.4846 , -782.65137],
[ 596.9712 , -1621.695 , -782.03827],
[ 595.5885 , -1621.9053 , -781.42523],
[ 594.20575, -1622.1157 , -780.81213],
[ 592.823 , -1622.326 , -780.1991 ],
[ 591.44025, -1622.5364 , -779.586 ],
[ 590.05756, -1622.7467 , -778.97296],
[ 588.6748 , -1622.957 , -778.35986],
[ 587.29205, -1623.1674 , -777.7468 ],
[ 582.3137 , -1613.3733 , -772.357 ],
[ 601.7647 , -1621.3757 , -782.529 ],
[ 600.382 , -1621.5862 , -781.91595],
[ 598.99927, -1621.7965 , -781.30286],
[ 597.6165 , -1622.0068 , -780.6898 ],
[ 596.23376, -1622.2172 , -780.0767 ],
[ 594.8511 , -1622.4275 , -779.4637 ],
[ 593.4683 , -1622.6378 , -778.8506 ],
[ 592.0856 , -1622.8481 , -778.23755],
[ 590.7028 , -1623.0586 , -777.6245 ],
[ 589.3201 , -1623.2689 , -777.0114 ],
[ 584.3293 , -1613.4741 , -771.6261 ],
[ 582.9551 , -1613.6832 , -771.01685],
[ 602.41003, -1621.6876 , -781.18054],
[ 601.0273 , -1621.898 , -780.5675 ],
[ 599.6446 , -1622.1083 , -779.9544 ],
[ 598.26184, -1622.3186 , -779.3414 ],
[ 596.8791 , -1622.529 , -778.7283 ],
[ 595.49634, -1622.7394 , -778.11523],
[ 594.11365, -1622.9497 , -777.50214],
[ 592.7309 , -1623.16 , -776.8891 ],
[ 591.34814, -1623.3704 , -776.276 ],
[ 589.96545, -1623.5807 , -775.66296],
[ 584.9706 , -1613.784 , -770.28595],
[ 583.5964 , -1613.9932 , -769.67664],
[ 603.05536, -1621.9995 , -779.8321 ],
[ 601.6726 , -1622.2098 , -779.219 ],
[ 600.28986, -1622.4202 , -778.60596],
[ 598.90717, -1622.6305 , -777.99286],
[ 597.5244 , -1622.8408 , -777.3798 ],
[ 596.14166, -1623.0511 , -776.7667 ],
[ 591.1088 , -1613.2578 , -771.38293],
[ 589.7346 , -1613.4668 , -770.7737 ],
[ 588.3604 , -1613.6759 , -770.16437],
[ 586.98615, -1613.885 , -769.55505],
[ 585.61194, -1614.094 , -768.9458 ],
[ 584.23773, -1614.3031 , -768.3365 ],
[ 603.7007 , -1622.3113 , -778.4836 ],
[ 602.31793, -1622.5216 , -777.87054],
[ 600.9352 , -1622.7319 , -777.25745],
[ 599.5525 , -1622.9423 , -776.6444 ],
[ 598.16974, -1623.1527 , -776.0313 ],
[ 596.787 , -1623.363 , -775.4183 ],
[ 591.7502 , -1613.5677 , -770.0428 ],
[ 590.376 , -1613.7767 , -769.4335 ],
[ 589.0017 , -1613.9858 , -768.8242 ],
[ 587.6275 , -1614.1948 , -768.2149 ],
[ 586.2533 , -1614.4039 , -767.6056 ],
[ 584.8791 , -1614.6129 , -766.99634]], dtype=np.float32) | [
"[email protected]"
] | |
6c6dace090ac4698a71aa96258aa378ca9e059f0 | aec9a1f3d1d36f19724e745ca4d09a20f67208dc | /matching/migrations/0006_auto_20210114_2030.py | 799634392703d79e913d7c68e61a37828e2927c9 | [] | no_license | endlessor/open-united-backend | b1b1c3411d0d48bc79b35895c70f24d773ac7344 | 86f6905cce14b834b6bf059fd33157249978bd14 | refs/heads/main | 2023-04-29T13:35:28.529360 | 2021-05-17T14:16:39 | 2021-05-17T14:16:39 | 368,211,786 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | # Generated by Django 3.1 on 2021-01-15 20:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('matching', '0005_auto_20210113_1839'),
]
operations = [
migrations.RemoveField(
model_name='taskclaimrequest',
name='status',
),
migrations.AddField(
model_name='taskclaimrequest',
name='kind',
field=models.IntegerField(choices=[(0, 'New'), (1, 'Approved'), (2, 'Rejected')], default=0),
),
migrations.AlterField(
model_name='taskclaim',
name='kind',
field=models.IntegerField(choices=[(0, 'Done'), (1, 'Active'), (2, 'Failed')], default=0),
),
]
| [
"[email protected]"
] | |
22c03deb0d8157383f3c1246c029e58a5a3f8e90 | 6ab67facf12280fedf7cc47c61ae91da0bcf7339 | /service/yowsup/yowsup/layers/protocol_media/protocolentities/message_media_vcard.py | 2d57fbcbadbf58e9a66ad180420dd4e70030640f | [
"MIT",
"GPL-3.0-only",
"GPL-3.0-or-later"
] | permissive | PuneethReddyHC/whatsapp-rest-webservice | 2f035a08a506431c40b9ff0f333953b855f9c461 | 822dfc46b80e7a26eb553e5a10e723dda5a9f77d | refs/heads/master | 2022-09-17T14:31:17.273339 | 2017-11-27T11:16:43 | 2017-11-27T11:16:43 | 278,612,537 | 0 | 1 | MIT | 2020-07-10T11:04:42 | 2020-07-10T11:04:41 | null | UTF-8 | Python | false | false | 5,305 | py | from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from .message_media import MediaMessageProtocolEntity
class VCardMediaMessageProtocolEntity(MediaMessageProtocolEntity):
'''
<message t="{{TIME_STAMP}}" from="{{CONTACT_JID}}"
offline="{{OFFLINE}}" type="text" id="{{MESSAGE_ID}}" notify="{{NOTIFY_NAME}}">
<media type="vcard">
<vcard name="Hany Yasser">
BEGIN:VCARD
VERSION:3.0
N:Yasser;Hany;;;
FN:Hany Yasser
PHOTO;BASE64:/9j/4AAQSkZJRgABAQEASABIAAD/4QBYRXhpZgAATU0AKgAAAAgAAgESAAMAAAABAAEAAIdpAAQAAAABAAAAJgAAAAAAA6ABAAMAAAABAAEAAKACAAQAAAABAAAAQKADAAQAAAABAAAAQAAAAAD/7QA4UGhvdG9zaG9wIDMuMAA4QklNBAQAAAAAAAA4QklNBCUAAAAAABDUHYzZjwCyBOmACZjs+EJ+/8AAEQgAQABAAwEiAAIRAQMRAf/EAB8AAAEFAQEBAQEBAAAAAAAAAAABAgMEBQYHCAkKC//EALUQAAIBAwMCBAMFBQQEAAABfQECAwAEEQUSITFBBhNRYQcicRQygZGhCCNCscEVUtHwJDNicoIJChYXGBkaJSYnKCkqNDU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6g4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2drh4uPk5ebn6Onq8fLz9PX29/j5+v/EAB8BAAMBAQEBAQEBAQEAAAAAAAABAgMEBQYHCAkKC//EALURAAIBAgQEAwQHBQQEAAECdwABAgMRBAUhMQYSQVEHYXETIjKBCBRCkaGxwQkjM1LwFWJy0QoWJDThJfEXGBkaJicoKSo1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoKDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uLj5OXm5+jp6vLz9PX29/j5+v/bAEMABgYGBgYGCgYGCg4KCgoOEg4ODg4SFxISEhISFxwXFxcXFxccHBwcHBwcHCIiIiIiIicnJycnLCwsLCwsLCwsLP/bAEMBBwcHCwoLEwoKEy4fGh8uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLv/dAAQABP/aAAwDAQACEQMRAD8A83lPGaqzn/iXgnqZB/WpWbKjNV7kgWC5/wCen9DXix3PoGtCreFJG3OcbVFZmx2XL8A9PoOa9u0b4TDVLH+0tavDZMyBkiUDKqRkGQsQBkc49O9ebeJ9Am8PXX2bzkuYJAWhmjOVcA4Pc4I7jNelCm1BOx5M6kXNpM5VnX77EEn17D6Vt6aVaNtnABxitnwn4DvPEUS3lxIIoH5HG5yPUL059zVTxLoUPhDUYGs7gzRO+yXOCB6A7eOlTUSa5U9SqbcXzNaGdenbYxhevymsPc0rGVyDg9O1a96d9uPT5RWK/C/d6ck0qK0HXd5H/9Dy2U9B2rpPCNgmp6xp9vKgeNJWmdSMgrGN3P44qponhvVvE9ybLSYw8iKXYs21VHTk+56V7B4T8F3nhSKS91gx/anHlxqjbgqty2TgcnA/KvNo0m2n0PYr1oxi431F8R3d7Jef6MbaZ964huDhSCBlsZ5OfXp2rwzxZdyS6rLC0C26xuRhCNrkHbvAHTpivUvEdrdiaWZ4DIXXarrwVJ/oQce9eZXfg3WLgNc22ySNSIzufawc9Bz6116uTucbUYwSRreFb23sLCG6v72RraFjGbVOQwOeo78HjvTtavfDdvpyRWNo4LyIx3sSTg5xz3Hfr9a4n7Bd6bfW9orxSSSyBAqncpYnGDxx161614T8JXet3/8AbXidRHZaVuxDu3FmXLMWPp+XtxQqTk9Be2UYnj94ymFB64zWSxDnJ5UenGas3bmaWRkG1Gdii+iknA/AVQKsoyRwO1ONJxVmTKrGTuj/0e3+D9iLfR5tRZcSXUu0H/ZjxjH4k165fQG4tXRADJ/Dnpn3ri/BVt9h8OaXCMf6lSw772BY/wDoVdm90qSCPHJ6VUI2gkE581RyPNdQQJKVkj3smCpYZYY6Ae+elcT43e78M+F43twI57u4+Y4B25BYgA8cYHNe3ytbtK7lFLttwcc8nHX8K8V+OF5EdK0+BOrXJP4BD/jQkrlTk7aHjPgmztp/E8FxetsgtUluZH7hYULZ+oOK7XQEsNN+G2ra/bNMLu8mNmC8hI2uwwMdCdpJJOTnPSvOdNuPI0/V5lOG+wOg/wC2ksSH9Ca7DXwNH8A6Fpak7rxpL6X6kAL+QJrVLTQwe5545Qc9u1Z104cbe1Pkl3fSqW4szj8qzbLSP//S+ghGIfJjAA2gDHpgY49qZIxN2T2Rf1NULK5XVL66u4+YLaQ20ZH8Tp/rD+BO38DUyzlndWHclT6r2rVkR3KV7eLb3cELIx8zI3DGAM/mcdT6DmvBPjZdfvNLj6bvMfHoOAP0r6JMqujxnoyH9P8A9dfK/wAZrozeILeFOTHbDA9NzMSfyAqLblyZ57arv0vUmzjbbZ/8ixj+ddd8QbxpW0W0PHk6ZASB0G8Fq86ecx2c8Y6SIqn6bg39K6TxS0pv7dpTnNjabfZREuBWqfumdtTmpG2rmqUT/vDnvU07YGKpx4EoySvuKyZZ/9k=
BDAY;value=date:1989-01-05
ORG:Vodafone Egypt;
item1.EMAIL;type=INTERNET:[email protected]
item1.X-ABLabel:INTERNET
item2.EMAIL;type=INTERNET:[email protected]
item2.X-ABLabel:INTERNET
item3.ADR;type=HOME:;;Heliopolis;Cairo;Al Qahirah;;Egypt
item4.ADR;type=HOME:;;;cairo;;;Egypt
item5.URL:http://www.facebook.com/profile.php?id=626850952
item5.X-ABLabel:_$!<HomePage>!$_
X-FACEBOOK:hany.yasser1
END:VCARD
</vcard>
</media>
</message>
'''
def __init__(self, name, card_data, _id = None, _from = None, to = None, notify = None, timestamp = None, participant = None,
preview = None, offline = None, retry = None):
super(VCardMediaMessageProtocolEntity, self).__init__("vcard", _id, _from, to, notify, timestamp, participant, preview, offline, retry)
self.setVcardMediaProps(name,card_data)
def __str__(self):
out = super(MediaMessageProtocolEntity, self).__str__()
out += "Name: %s\n" % self.name
out += "Card Data: %s\n" % self.card_data
return out
def getName(self):
return self.name
def getCardData(self):
return self.card_data
def setVcardMediaProps(self, name, card_data):
self.name = name
self.card_data = card_data
def toProtocolTreeNode(self):
node = super(VCardMediaMessageProtocolEntity, self).toProtocolTreeNode()
mediaNode = node.getChild("media")
mediaNode["type"] = "vcard"
vcardNode = ProtocolTreeNode("vcard", {"name":self.name}, None,self.card_data)
mediaNode.addChild(vcardNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = MediaMessageProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = VCardMediaMessageProtocolEntity
mediaNode = node.getChild("media")
entity.setVcardMediaProps(
mediaNode.getAllChildren()[0].getAttributeValue('name'),
mediaNode.getChild("vcard").getData()
)
return entity | [
"[email protected]"
] | |
c737c2c9df7e4e431e045cdd97aecd4aa4483742 | dcbb4a526f6cf6f490063a6e4b5f1353fda48a1f | /tf_agents/drivers/tf_driver.py | de16194c74cf80ce5e092c4607046a47a02b73ac | [
"Apache-2.0"
] | permissive | Bhaney44/agents | 91baf121188f35024c09435276d108600ba6f07e | 792d7c6e769d708f8b08d71926ccb9e8a880efef | refs/heads/master | 2023-08-09T03:51:16.188708 | 2023-07-21T17:50:18 | 2023-07-21T17:50:18 | 177,231,436 | 0 | 0 | Apache-2.0 | 2019-03-23T01:46:03 | 2019-03-23T01:46:02 | null | UTF-8 | Python | false | false | 5,527 | py | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Driver that steps a TF environment using a TF policy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Callable, Optional, Sequence, Tuple
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.drivers import driver
from tf_agents.environments import tf_environment
from tf_agents.policies import tf_policy
from tf_agents.trajectories import time_step as ts
from tf_agents.trajectories import trajectory
from tf_agents.typing import types
from tf_agents.utils import common
class TFDriver(driver.Driver):
"""A driver that runs a TF policy in a TF environment."""
def __init__(
self,
env: tf_environment.TFEnvironment,
policy: tf_policy.TFPolicy,
observers: Sequence[Callable[[trajectory.Trajectory], Any]],
transition_observers: Optional[Sequence[Callable[[trajectory.Transition],
Any]]] = None,
max_steps: Optional[types.Int] = None,
max_episodes: Optional[types.Int] = None,
disable_tf_function: bool = False):
"""A driver that runs a TF policy in a TF environment.
**Note** about bias when using batched environments with `max_episodes`:
When using `max_episodes != None`, a `run` step "finishes" when
`max_episodes` have been completely collected (hit a boundary).
When used in conjunction with environments that have variable-length
episodes, this skews the distribution of collected episodes' lengths:
short episodes are seen more frequently than long ones.
As a result, running an `env` of `N > 1` batched environments
with `max_episodes >= 1` is not the same as running an env with `1`
environment with `max_episodes >= 1`.
Args:
env: A tf_environment.Base environment.
policy: A tf_policy.TFPolicy policy.
observers: A list of observers that are notified after every step
in the environment. Each observer is a callable(trajectory.Trajectory).
transition_observers: A list of observers that are updated after every
step in the environment. Each observer is a callable((TimeStep,
PolicyStep, NextTimeStep)). The transition is shaped just as
trajectories are for regular observers.
max_steps: Optional maximum number of steps for each run() call. For
batched or parallel environments, this is the maximum total number of
steps summed across all environments. Also see below. Default: 0.
max_episodes: Optional maximum number of episodes for each run() call. For
batched or parallel environments, this is the maximum total number of
episodes summed across all environments. At least one of max_steps or
max_episodes must be provided. If both are set, run() terminates when at
least one of the conditions is
satisfied. Default: 0.
disable_tf_function: If True the use of tf.function for the run method is
disabled.
Raises:
ValueError: If both max_steps and max_episodes are None.
"""
common.check_tf1_allowed()
max_steps = max_steps or 0
max_episodes = max_episodes or 0
if max_steps < 1 and max_episodes < 1:
raise ValueError(
'Either `max_steps` or `max_episodes` should be greater than 0.')
super(TFDriver, self).__init__(env, policy, observers, transition_observers)
self._max_steps = max_steps or np.inf
self._max_episodes = max_episodes or np.inf
if not disable_tf_function:
self.run = common.function(self.run, autograph=True)
def run( # pytype: disable=signature-mismatch # overriding-parameter-count-checks
self, time_step: ts.TimeStep,
policy_state: types.NestedTensor = ()
) -> Tuple[ts.TimeStep, types.NestedTensor]:
"""Run policy in environment given initial time_step and policy_state.
Args:
time_step: The initial time_step.
policy_state: The initial policy_state.
Returns:
A tuple (final time_step, final policy_state).
"""
num_steps = tf.constant(0.0)
num_episodes = tf.constant(0.0)
while num_steps < self._max_steps and num_episodes < self._max_episodes:
action_step = self.policy.action(time_step, policy_state)
next_time_step = self.env.step(action_step.action)
traj = trajectory.from_transition(time_step, action_step, next_time_step)
for observer in self._transition_observers:
observer((time_step, action_step, next_time_step))
for observer in self.observers:
observer(traj)
num_episodes += tf.math.reduce_sum(
tf.cast(traj.is_boundary(), tf.float32))
num_steps += tf.math.reduce_sum(tf.cast(~traj.is_boundary(), tf.float32))
time_step = next_time_step
policy_state = action_step.state
return time_step, policy_state
| [
"[email protected]"
] | |
9e25a76b082548ee94432dc821353a29a8e5f423 | 107973063f26b791ccd6deca0026acb338eb4d6b | /harvest.py | 8631b158987a039be018791b790f53b2a123623b | [] | no_license | sonya-sa/melon-objects | 322b46138ee9287b74cf8eb50bae64f56eb50e23 | a035db0be16e749a0654cc8518315f408efc72bc | refs/heads/master | 2020-03-10T10:15:07.606336 | 2018-04-13T01:09:39 | 2018-04-13T01:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,986 | py | ############
# Part 1 #
############
class MelonType(object):
"""A species of melon at a melon farm."""
def __init__(self, code, first_harvest, color, is_seedless, is_bestseller, name):
"""Initialize a melon."""
self.code = code
self.first_harvest = first_harvest
self.color = color
self.is_seedless = is_seedless
self.is_bestseller = is_bestseller
self.name = name
self.pairings = []
# Fill in the rest
def add_pairing(self, pairing):
"""Add a food pairing to the instance's pairings list."""
self.pairings.extend(pairing)
# Fill in the rest
def update_code(self, new_code):
"""Replace the reporting code with the new_code."""
self.code = new_code
# Fill in the rest
def make_melon_types():
"""Returns a listmy of current melon types."""
all_melon_types = []
musk = MelonType('musk', 1998, 'green',
True, True, 'Muskmelon')
musk.add_pairing(['mint'])
all_melon_types.append(musk)
casaba = MelonType('cas', 2003, 'orange',
True, False, 'Casaba')
casaba.add_pairing(['mint', 'strawberries'])
all_melon_types.append(casaba)
crenshaw = MelonType('cren', 1996, 'green', True, False, 'Crenshaw')
crenshaw.add_pairing(['proscuitto'])
all_melon_types.append(crenshaw)
yellow_watermelon = MelonType('yw', 2013, 'yellow', True, True, 'Yellow Watermelon')
yellow_watermelon.add_pairing(['ice cream'])
all_melon_types.append(yellow_watermelon)
return all_melon_types
def print_pairing_info(melon_types):
"""Prints information about each melon type's pairings."""
# Fill in the rest
for melon_type in melon_types:
print "{} pairs well with".format(melon_type.name)
pairings = melon_type.pairings
for pairing in pairings:
print "- {}".format(pairing)
print ""
def make_melon_type_lookup(melon_types):
"""Takes a list of MelonTypes and returns a dictionary of melon type by code."""
codes = {}
for melon_type in melon_types:
codes[melon_type.code] = melon_type
# Fill in the rest
return codes
############
# Part 2 #
############
# all_melon_types = make_melon_types()
# make_melon
class Melon(object):
"""A melon in a melon harvest."""
self.all_melon_types = make_melon_type_lookup(make_melon_types())
def __init__ (self, melon_code, shape_rating, color_rating, from_field, harvested_by):
self.melon_type = self.all_melon_types[melon_code]
self.shape_rating = shape_rating
self.color_rating = color_rating
self.from_field = from_field
self.harvested_by = harvested_by
def is_sellable():
if (self.from_field != 3) and (self.shape_rating >= 5) and (self.color_rating >= 5):
return True
return False
# Fill in the rest
# Needs __init__ and is_sellable methods
def make_melons(melon_types):
"""Returns a list of Melon objects."""
# Fill in the rest
melon_objects = []
melon1 = Melon('yw', 8, 7, 2, 'Sheila')
melon_objects.append(melon1)
melon2 = Melon('yw', 3, 4, 2, 'Shei1a')
melon_objects.append(melon2)
melon3 = Melon('yw', 9, 8, 3, 'Sheila')
melon_objects.append(melon3)
melon4 = Melon('cas', 10, 6, 35, 'Sheila')
melon_objects.append(melon4)
melon5 = Melon('cren',8,9,35,'Michael')
melon_objects.append(melon5)
melon6 = Melon('cren', 8, 2, 35, 'Michael')
melon_objects.append(melon6)
melon7 = Melon('cren', 6,7,4, 'Michael')
melon_objects.append(melon7)
melon8 = Melon('musk', 6,7,4, 'Michael')
melon_objects.append(melon8)
melon9 = Melon('yw',7,10,3,'Sheila')
melon_objects.append(melon9)
return melon_objects
def get_sellability_report(melons):
"""Given a list of melon object, prints whether each one is sellable."""
# Fill in the rest
| [
"[email protected]"
] | |
5c8494e379adb3963beead9dc40e803a8116cb46 | 3de3dae722829727edfdd6cc3b67443a69043475 | /cave/com.raytheon.viz.gfe/localization/gfe/userPython/procedures/SnowAmtQPFPoPWxCheck.py | 9d7c3a3c8577a8c463d088e2adc2ec0c09f4fe61 | [
"LicenseRef-scancode-public-domain",
"Apache-2.0"
] | permissive | Unidata/awips2 | 9aee5b7ec42c2c0a2fa4d877cb7e0b399db74acb | d76c9f96e6bb06f7239c563203f226e6a6fffeef | refs/heads/unidata_18.2.1 | 2023-08-18T13:00:15.110785 | 2023-08-09T06:06:06 | 2023-08-09T06:06:06 | 19,332,079 | 161 | 75 | NOASSERTION | 2023-09-13T19:06:40 | 2014-05-01T00:59:04 | Java | UTF-8 | Python | false | false | 61,851 | py | ##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
#----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# SnowAmtQPFPoPWxCheck
#
# Author: Jay Smith, WFO Fairbanks, [email protected], 907-458-3721
# Version: 1.0.0, 09/14/2006 - Initial version
# 1.0.1, 10/12/2006 - Added PoP/QPF check at request of DSPAC
# 1.0.2, 10/18/2006 - Changed PoP/QPF check to treat the PoP as
# floating. Instead of checking each individual PoP grid
# against its corresponding QPF grid, the max of all the
# PoP grids overlapping a QPF grid will be checked.
# 1.1.0, 01/25/2007 - Added options to choose which checks to run.
# Reorganized code so that each check is its own method.
# Added a check for QPF and Wx. Added highlighting for the
# created temporary grids.
# 1.1.1, 02/01/2007 - Changed the SnowAmt/Wx check to return
# consistent results for SnowAmt > 0 and Wx grid containing
# S, SW, or IP regardless of whether the frozen precip is
# mixed with freezing and/or liquid precip.
# 1.2.0, 02/13/2007 - Added a configuration option to provide a CWA
# edit area to run the procedure over. A bad edit area or no
# edit area will result in running over the whole domain.
# Modified the SnowAmt/Wx and QPF/Wx checks to handle two
# cases. Case 1: The SnowAmt/QPF grid is 6-hr long and starts
# at 00, 06, 12, or 18 UTC. Then only one of the corresponding
# Wx grids has to meet the consistency rule. Case 2: The
# SnowAmt/QPF grid does not meet the case 1 definition. Then
# all of the corresponding Wx grids must meet the consistency
# rule.
# The procedure performs the following checks:
# 1. If SnowAmt present and >= 0.5 inches, then corresponding QPF grids
# must add up to 0.01 inches.
# 2. If SnowAmt >= 0.1 inches, then there are two cases:
# a. If the SnowAmt grid is exactly 6 hours long and starts at 00, 06, 12,
# or 18 UTC, then at least one of the corresponding Wx grids must have
# S, SW, or IP.
# b. If the SnowAmt grid does not adhere to the time constraints listed in
# in the previous paragraph, then all of the corresponding Wx grids
# must have S, SW, or IP. This more stringent test is required because
# with grids offset from the NDFD time constraints, it's possible for
# the GFE to evaluate the grids as consistent using an "any"
# criteria but have the NDFD flag those same grids as inconsistent.
# 3. If QPF > 0, then at least one of the corresponding PoP grids must be > 0
# 4. If QPF > 0, then there are two cases:
# a. If the QPF grid is exactly 6 hours long and starts at 00, 06, 12, or 18
# UTC, then at least one of the corresponding Wx grids must have R, RW,
# S, SW, RS, IP, L, ZR, ZL.
# b. If the QPF grid does not adhere to the time constraints listed in the
# previous paragraph, then all corresponding Wx grids must contain a
# precipitating weather type. This more stringent test is required
# because with grids offset from the NDFD time constraints, it's
# possible for the GFE to evaluate grids as consistent using an "any"
# criteria but have the NDFD flag those same grids as inconsistent.
# For all of the checks above, if the initial threshold is not exceeded, then
# the two grids are consistent by definition. In other words:
# 1. If SnowAmt < 0.5, then SnowAmt and QPF are always consistent.
# 2. If SnowAmt < 0.1, then SnowAmt and Wx are always consistent.
# 3. If QPF = 0, then QPF and PoP are always consistent.
# 4. If QPF = 0, then QPF and Wx are always consistent.
# For the Wx checks above, only the Wx type is considered.
#
# ****** NOTE NOTE NOTE NOTE ******
# At this time, the check for two 6-hour QPF grids vs. one 12-hr PoP grid
# is not implemented because neither of those grid definitions is implemented
# in the GFE baseline. I don't know how to do a check on grids that don't
# exist.
# ****** NOTE NOTE NOTE NOTE ******
#
# If discrepancies are found, then the "bad" grids will be highlighted.
# Temporary grids showing where the discrepancies occur will be created and
# also highlighted.
#
# Dealing with QPF and SnowAmt is always a pain, because they are "cumulative"
# elements. This procedure will account for the possibility that the SnowAmt and
# QPF grids are not the same duration. It will also account for the possibilty
# that the SnowAmt and QPF grids are not aligned on either or both ends.
# The only sane way to handle either situation is to believe that the QPF
# accumulation happens uniformally across the grid's duration and to use
# the proportional amount of the QPF that corresponds the SnowAmt grid's
# duration. Some examples:
# 1. The QPF grid is 3 hours long and there are 3, 1-hour, SnowAmt grids.
# Each SnowAmt grid will be compared to 1/3 the value of the QPF grid.
# 2. The last two hours of a 3-hour QPF grid overlaps a 2-hour SnowAmt grid.
# The SnowAmt grid will be compared to 2/3 the value of the QPF grid.
# 3. Two 3-hour QPF grids align with one 6-hour SnowAmt grid. The first QPF
# grid will be compared to the SnowAmt grid. If the consistency check passes
# on that comparison, the program will continue. If the consistency check
# fails, then the sum of the two QPF grids will be compared to the SnowAmt
# grid.
# 4. The last four hours of a 6-hour QPF grid and the first two hours of a
# 3-hour QPF grid overlap a 6-hour SnowAmt grid. The SnowAmt grid will be
# compared to 2/3 of the first QPF grid. If the consistency check passes,
# the program will continue. If the consistency check fails, then 2/3 of the
# first QPF grid will be added to 2/3 of the second QPF grid and that QPF
# sum will be compared against the SnowAmt grid.
#
# Confused yet? Of course, all of these gyrations can be avoided if the
# QPF and SnowAmt grids are aligned and of the same duration.
#
# Unfortunately, the GFE does not provide a way to deal with proportional
# amounts of the accumulative grids, so I have done this.
#
# I've written this code such that it's optimized to minimize memory usage
# (at least I think I've done that). As a result, it's not particularly
# optimized for ifpServer database access. In fact, I retrieve the various
# grids from the ifpServer database many times during the procedure's run.
# This will have an impact on how fast the procedure runs (it'll run slower
# than if I had optimized for ifpServer database access). The choice to favor
# memory optimization comes from my belief that there are still "memory leak"
# problems in the GFE and that the consequences of those problems will be most
# manifest when this procedure is most likely to be run (near the end of the
# shift). Funky memory problems are a prime cause of funky application
# behavior like application crashes or spontaneous logouts. So, this procedure
# basically reads a grid into memory, keeps it as long as it's needed, and
# then discards it.
#
# Finally, this procedure is also intended to provide an example to other
# developers of how to write and document code. I have reservations as to how
# well I've succeeded at that task. The code is heavily documented, probably
# excessively so. Also, it's not as well as organized as it could be. As you
# look through the various methods, it should become quickly apparent that
# there is a lot of repeated code. I've consciously left the code this way in
# the hopes that it will be easier to understand by more novice programmers
# and because the code hasn't quite grown to the point where updating the
# repeating code is onerous or overly error-prone. It would be better to
# capture the repeating code in separate methods, but keeping track of the
# where you are in the code becomes harder the more you have to jump around
# from method to method. As with all things, there are trade-offs involved.
# ----------------------------------------------------------------------------
##
# This is an absolute override file, indicating that a higher priority version
# of the file will completely replace a lower priority version of the file.
##
MenuItems = ["Consistency"]
VariableList = []
VariableList.append(('Check_Cleanup', 'Check', 'radio', ['Check', 'Cleanup']))
VariableList.append(('Run SnowAmt/QPF Check?', ['Yes'], 'check', ['Yes']))
VariableList.append(('Run SnowAmt/Wx Check?', ['Yes'], 'check', ['Yes']))
VariableList.append(('Run QPF/PoP Check?', ['Yes'], 'check', ['Yes']))
VariableList.append(('Run QPF/Wx Check?', ['Yes'], 'check', ['Yes']))
VariableList.append(('If "Cleanup" is selected, then only cleanup actions will run.\nNo checks will be made, regardless of the above settings.', '', 'label'))
#### Config section
# Both the QPF and SnowAmt grids have values which are floating point
# numbers. This means comparisons must use a tolerance value. In other
# words, 0.5 may be represented in machine numbers as 0.49999999999 or
# 0.500000000001. By specifying a tolerance value, we account for the
# vagaries of machine representation of floating point numbers while
# keeping the precision of the comparisons to acceptable levels. Depending
# on the comparison being done, the tolerance value will be added to or
# subtracted from the comparison value to allow for machine error in the
# floating point number representation.
# By default in the GFE, QPF precision is to the nearest one-hundredth while
# SnowAmt precision is to the nearest tenth.
qpfTol = 0.00001 # 1/100,000 tolerance vs 1/100 precision
snowAmtTol = 0.0001 # 1/10,000 tolerance vs 1/10 precision
# Inconsistent grid highlight color. One size fits all. To turn off
# highlighting, set the variable to the empty string, ''.
inconGridColor = 'red'
# Temporary grid highlight color. One size fits all. To turn off highlighting,
# set the variable to the empty string, ''.
tempGridColor = 'orange'
# Name of CWA edit area to use instead of running the procedure over the
# whole domain. Set to the empty string, '', if you want the procedure to
# always run over the whole domain. If the procedure has a problem with the
# edit area you provide, it will run over the whole domain. You should probably
# choose an edit area that is slightly larger than your entire CWA. It's
# possible that when mapping your GFE grids to NDFD grids that the NDFD thinks
# some GFE grid cells are in your CWA that the GFE does not think are in your
# CWA. Using an edit area slightly larger than the CWA, like the ISC_Send_Area
# which is the mask used when sending grids to the NDFD, should eliminate the
# possibibilty of the NDFD intermittently flagging CWA border "points" as
# inconsistent. Note: running the procedure over a subset of the entire GFE
# domain does not really provide any performance gains. Given the way the
# underlying array data structure works, calculations are almost always made
# at every single grid point first and then a mask is applied to limit the
# meaningful results to the edit area. For the purposes of this procedure, the
# values outside the edit area are set to the appropriate "consistent" result.
# The real benefit of this option is it limits the inconsistent results to the
# areas the forecaster really cares about, which should lessen the workload of
# using this procedure. Marine Offices: Make sure the edit area provided
# includes your marine zones.
cwaEditArea = 'ISC_Send_Area'
#### Config section end
import SmartScript
from numpy import *
class Procedure (SmartScript.SmartScript):
def __init__(self, dbss):
SmartScript.SmartScript.__init__(self, dbss)
def __cleanup(self, timeRange):
# Remove any temporary grids created previously.
for element in (
'SnowAmtQPFInconsistent', 'SnowAmtWxInconsistent',
'QPFPoPInconsistent', 'QPFWxInconsistent'):
try:
# From SmartScript
self.unloadWE('Fcst', element, 'SFC')
except:
# A failure is almost certainly no grids to unload.
pass
# Turn off any highlights. From SmartScript
self.highlightGrids('Fcst', 'SnowAmt', 'SFC', timeRange, inconGridColor, on=0)
self.highlightGrids('Fcst', 'QPF', 'SFC', timeRange, inconGridColor, on=0)
self.highlightGrids('Fcst', 'Wx', 'SFC', timeRange, inconGridColor, on=0)
self.highlightGrids('Fcst', 'PoP', 'SFC', timeRange, inconGridColor, on=0)
return
def __checkConfigValueTypes(self):
import types
message = ''
badValues = False
if not type(inconGridColor) is types.StringType:
message = '%sThe "inconGridColor" variable is not defined as a string value. Please contact your IFPS focal point to fix this problem.\n' % message
badValues = True
if not type(tempGridColor) is types.StringType:
message = '%sThe "tempGridColor" variable is not defined as a string value. Please contact your IFPS focal point to fix this problem.\n' % message
badValues = True
if not type(cwaEditArea) is types.StringType:
message = '%sThe "cwaEditArea" variable is not defined as a string value. Please contact your IFPS focal point to fix this problem.\n' % message
badValues = True
if badValues:
message = '%sYou will not be able to run the procedure until the problem is corrected.' % message
# The next two commands are from SmartScript
self.statusBarMsg(message, 'U')
self.cancel()
return
def _runSnowAmtQPFCheck(self, timeRange):
# This method implements the check that if SnowAmt >= 0.5, then
# QPF must be >= 0.01.
# There can be a significant difference between the values stored
# in memory and the values returned from the database. This is because
# when values are saved, the element's precision (as defined in
# serverConfig.py/localConfig.py) is enforced. Values in memory do not
# have the element's precision enforced; in fact, they have the
# machine precision of the underlying data type.
# If there are locks, post an urgent message and return from the method.
message = ''
# lockedByMe is from SmartScript
if self.lockedByMe('QPF', 'SFC'):
message = '%sYou have the QPF grid locked. Please save the QPF grid.\n' % message
if self.lockedByMe('SnowAmt', 'SFC'):
message = '%sYou have the SnowAmt grid locked. Please save the SnowAmt grid.\n' % message
# lockedByOther is from SmartScript
if self.lockedByOther('QPF', 'SFC'):
message = '%sThe QPF grid is locked by someone else. Please have that person save the QPF grid.\n' % message
if self.lockedByOther('SnowAmt', 'SFC'):
message = '%sThe SnowAmt grid is locked by someone else. Please have that person save the SnowAmt grid.\n' % message
if message:
message = '%sThe SnowAmt/QPF Check was not run.' % message
self.statusBarMsg(message, 'U')
# I return instead of aborting because the user may have asked for
# other tests that do not have locked grid problems.
return
# Make sure there are actually SnowAmt grids in the time range.
# The self.getGrids command will return None if there are no grids
# in the time range for mode='First' and noDataError=0. The None
# variable cannot be iterated over. Rather than trap in a try/except,
# I'll just check for the condititon. This may not be the most
# Pythonic way of doing things, but it allows me to avoid having
# a bunch of code indented beneath a try statement. If no SnowAmt
# grids are found, post an urgent message and return from the method.
# getGrids is from SmartScript
snowAmtInfoList = self.getGridInfo('Fcst', 'SnowAmt', 'SFC', timeRange)
if [] == snowAmtInfoList:
message = 'There are no SnowAmt grids in the time range you selected.\nThe SnowAmt/QPF Check did not run.'
self.statusBarMsg(message, 'U')
# I return instead of aborting because the user may have asked for
# other tests that do not have missing grid problems.
return
# getGridInfo is from SmartScript
# One might ask why I don't just return the result of self.getGrids
# to a variable and iterate over that. I'm trying to minimize the
# memory footprint of the procedure. Reading all the grids into a
# variable could be a fairly large memory hit. The construct below
# only reads one SnowAmt grid at a time into memory, the one that's
# being checked. By using the cache=0 switch on all the self.getGrids
# command, I prevent the GFE from saving the grids into memory for me.
# The Python builtin command enumerate loops over an iterable object
# and returns a 2-tuple containing the current index of the
# iteration and the object at that index. In cases where I need
# both the index and the object, I think this construct is more
# elegant than:
# for i in xrange(len(iterableObject)):
# object = iterableObject[i]
snowAmtGrids = self.getGrids('Fcst', 'SnowAmt', 'SFC',
timeRange, mode='List', noDataError=0,cache=0)
for snowAmtIndex, snowAmtGrid in enumerate(snowAmtGrids):
# greater_equal is from Numeric. For the given array and
# threshold, a new array of the same dimensions as the input
# array is returned. The new array has the value 1 where the
# input array was greater than or equal to the threshold and
# has the value 0 elsewhere.
halfInchMask = greater_equal(snowAmtGrid, 0.5 - snowAmtTol)
gridTR = snowAmtInfoList[snowAmtIndex].gridTime()
# zeros is from Numeric. It creates an array of all zeros for
# the given dimensions and numeric type.
qpfSum = self.empty()
qpfGrids = self.getGrids(
'Fcst', 'QPF', 'SFC', gridTR, mode='List', noDataError=0,
cache=0)
if qpfGrids is None:
message = '''There are no QPF grids in time range %s.
The SnowAmt/QPF Check skipped the time range.''' % gridTR
self.statusBarMsg(message, 'U')
continue
qpfInfoList = self.getGridInfo('Fcst', 'QPF', 'SFC', gridTR)
for qpfIndex, qpfGrid in enumerate(qpfGrids):
snowAmtGridStartTime = gridTR.startTime().unixTime()
qpfGridTR = qpfInfoList[qpfIndex].gridTime()
qpfGridStartTime = qpfGridTR.startTime().unixTime()
fraction = 1.0
if qpfGridStartTime < snowAmtGridStartTime:
diff = snowAmtGridStartTime - qpfGridStartTime
fraction -= (float(diff) / qpfGridTR.duration())
snowAmtGridEndTime = gridTR.endTime().unixTime()
qpfGridEndTime = qpfGridTR.endTime().unixTime()
if qpfGridEndTime > snowAmtGridEndTime:
diff = qpfGridEndTime - snowAmtGridEndTime
fraction -= (float(diff) / qpfGridTR.duration())
# For some reason, the construct:
# qpfSum = qpfSum + (qpfGrid * fraction)
# doesn't assign the expression evaluation back to qpfSum.
# Thus, I use a temporary variable.
qpfTemp = qpfSum + (qpfGrid * fraction)
qpfSum = qpfTemp
del qpfTemp
# less is from Numeric. It behaves analogously to greater_equal,
# described above.
qpfMask = less(qpfSum, 0.01 + qpfTol)
# The following is the "truth" table for the logical
# comparison.
# SnowAmt >= 0.5, 1; SnowAmt < 0.5, 0
# QPF < 0.01, 1; QPF >= 0.01, 0
# SnowAmt >= 0.5 (1) and QPF < 0.01 (1) = 1 (Bad result)
# SnowAmt >= 0.5 (1) and QPF >= 0.01 (0) = 0 (Good result)
# SnowAmt < 0.5 (0) and QPF < 0.01 (1) = 0 (Good result)
# SnowAmt < 0.5 (0) and QPF >= 0.01 (0) = 0 (Good result)
# logical_and is from Numeric
consistMask = logical_and(halfInchMask, qpfMask)
# Now, apply the CWA mask. There's an assumption here that
# all offices will use a mask and provide a valid one, which
# means this step does something meaningful. If that assumption
# does not hold, then the next statement doesn't actually
# change anything, even though each and every grid point has a
# comparison check made.
# where is from Numeric. The first argument is a mask.
# The second argument is/are the value/values to use at the
# array points where the mask is one. The third argument
# is/are the value/values to use at the array points
# where the mask is zero. For this comparison, I want
# the values of consistMask where self.cwaMask is one and
# I want the "good result", which is zero, where
# self.cwaMask is zero.
consistMask[logical_not(self.cwaMask)] = 0
# ravel and sometrue are from Numeric.
if not sometrue(ravel(consistMask)):
# This is the good result, even though it may not be
# intuitive. The ravel function reduces the rank of the
# array by one. Since we had a 2-d array, the ravel
# function creates a 1-d array (a vector) such that
# reading the 2-d array from left-to-right, top-to-
# bottom returns the same values as reading the 1-d
# array from left-to-right. The sometrue function
# performs a logical or on subsequent element pairs
# in the 1-d array and returns the final result. If
# there's no inconsistency, the result will be 0.
# Thus, negating the sometrue result gives us the
# positive outcome. Phew.
# Since QPF is an accumulative element, we don't need
# to continue the loop once the QPF sum meets the
# threshold.
break
else:
# This block will only execute if the for loop runs to
# completion, i.e., the break statement is not executed.
# So, if we get here, we have an inconsistency and need to
# highlight the appropriate grids.
if inconGridColor:
self.highlightGrids(
'Fcst', 'SnowAmt', 'SFC', gridTR, inconGridColor)
self.highlightGrids(
'Fcst', 'QPF', 'SFC', gridTR, inconGridColor)
# createGrid is from SmartScript
# Since this block of code only executes if the for loop
# runs to completion, then the value of consistMask from
# the for loop will contain all of the inconsistencies.
self.createGrid(
'Fcst', 'SnowAmtQPFInconsistent', 'SCALAR', consistMask,
gridTR, descriptiveName='SnowAmtQPFInconsistent',
minAllowedValue=0, maxAllowedValue=1, units='Good/Bad')
if tempGridColor:
self.highlightGrids(
'Fcst', 'SnowAmtQPFInconsistent', 'SFC', gridTR,
tempGridColor)
self.inconsistent = True
# While not required, I like to terminate my methods with a return
# statement to make it clear this is where the method ends.
return
def _runSnowAmtWxCheck(self, timeRange):
# This implements the check that if SnowAmt >= 0.1, then the Wx grid
# must contain S, SW, or IP, regardless of whether or not there is
# any freezing or liquid types. Finally, the check does not look at
# anything other than the Wx type. In other words, the check will be
# okay if SnowAmt != 0 and Wx has Chc:S:- or Def:SW:-- or Lkly:S:+.
# There can be a significant difference between the values stored
# in memory and the values returned from the database. This is because
# when values are saved, the element's precision (as defined in
# serverConfig.py/localConfig.py) is enforced. Values in memory do not
# have the element's precision enforced; in fact, they have the
# machine precision of the underlying data type.
# If there are locks, post an urgent message and return from the method.
message = ''
# lockedByMe is from SmartScript
if self.lockedByMe('Wx', 'SFC'):
message = '%sYou have the Wx grid locked. Please save the Wx grid.\n' % message
if self.lockedByMe('SnowAmt', 'SFC'):
message = '%sYou have the SnowAmt grid locked. Please save the SnowAmt grid.\n' % message
# lockedByOther is from SmartScript
if self.lockedByOther('Wx', 'SFC'):
message = '%sThe Wx grid is locked by someone else. Please have that person save the Wx grid.\n' % message
if self.lockedByOther('SnowAmt', 'SFC'):
message = '%sThe SnowAmt grid is locked by someone else. Please have that person save the SnowAmt grid.\n' % message
if message:
message = '%sThe SnowAmt/Wx Check was not run.' % message
self.statusBarMsg(message, 'U')
# I return instead of aborting because the user may have asked for
# other tests that do not have locked grid problems.
return
# Make sure there are actually SnowAmt grids in the time range.
# The self.getGrids command will return None if there are no grids
# in the time range for noDataError=0. The None
# variable cannot be iterated over. Rather than trap in a try/except,
# I'll just check for the condititon. This may not be the most
# Pythonic way of doing things, but it allows me to avoid having
# a bunch of code indented beneath a try statement. If no SnowAmt
# grids are found, post an urgent message and return from the method.
# getGrids is from SmartScript
snowAmtInfoList = self.getGridInfo('Fcst', 'SnowAmt', 'SFC', timeRange)
if [] == snowAmtInfoList:
message = 'There are no SnowAmt grids in the time range you selected.\nThe SnowAmt/Wx Check did not run.'
self.statusBarMsg(message, 'U')
# I return instead of aborting because the user may have asked for
# other tests that do not have missing grid problems.
return
snowAmtGrids = self.getGrids(
'Fcst', 'SnowAmt', 'SFC', timeRange, mode='List', noDataError=0,
cache=0)
for snowAmtIndex, snowAmtGrid in enumerate(snowAmtGrids):
nonZeroMask = greater_equal(snowAmtGrid, 0.1 - snowAmtTol)
gridTR = snowAmtInfoList[snowAmtIndex].gridTime()
wxInfoList = self.getGridInfo('Fcst', 'Wx', 'SFC', gridTR)
if [] == wxInfoList:
message = '''There are no Wx grids in time range %s.
The SnowAmt/Wx Check skipped the time range.''' % gridTR
self.statusBarMsg(message, 'U')
continue
# There are two cases, which I'll capture in individual methods
# If the SnowAmt grid is exactly 6 hours long and starts at
# 00, 06, 12, or 18 UTC, then only one overlapping Wx grid needs
# to match. Otherwise, all overlapping Wx grids need to match.
if gridTR.duration() / 3600 == 6 and \
gridTR.startTime().hour in (0, 6, 12, 18):
self._snowAmtWxCheckLocked(nonZeroMask, gridTR, wxInfoList)
else:
self._snowAmtWxCheckUnlocked(nonZeroMask, gridTR, wxInfoList)
return
def _snowAmtWxCheckLocked(self, nonZeroMask, gridTR, wxInfoList):
# The "Locked" comes from the idea that if the SnowAmt grid meets
# the duration and start time constraints, then it's been "locked".
# I need to capture the consistency masks for each individual Wx grid
# just in case I end up with inconsistencies.
consistMaskList = []
for wxIndex, wxGrid in enumerate(self.getGrids(
'Fcst', 'Wx', 'SFC', gridTR, mode='List', noDataError=0,
cache=0)):
# wxMask is from SmartScript
sMask = self.wxMask(wxGrid, ':S:')
swMask = self.wxMask(wxGrid, ':SW:')
ipMask = self.wxMask(wxGrid, ':IP:')
snowMask = logical_or(logical_or(sMask, swMask), ipMask)
del (sMask, swMask, ipMask)
wxMask = logical_not(snowMask)
# "Truth" table for the logical comparison follows
# SnowAmt >= 0.1, 1; SnowAmt < 0.1, 0
# Wx has S, SW, or IP, 0; Wx doesn't have S, SW, or IP, 1
# SnowAmt >= 0.1 (1) and Wx has (0) = 0 (Good result)
# SnowAmt >= 0.1 (1) and Wx doesn't have (1) = 1 (Bad result)
# SnowAmt < 0.1 (0) and Wx has (0) = 0 (Good result)
# SnowAmt < 0.1 (0) and Wx doesn't have (1) = 0 (Good result)
#
consistMask = logical_and(nonZeroMask, wxMask)
consistMask[logical_not(self.cwaMask)] = 0
consistMaskList.append(consistMask)
if not sometrue(ravel(consistMask)):
# There were no inconsistencies with this Wx grid. Since only
# one needs to be consistent, we don't need to do any more
# checks.
break
else:
# This block will only execute if the for loop runs to
# completion, i.e., the break statement is not executed.
# So, if we get here, we have an inconsistency and need to
# highlight the appropriate grids.
if inconGridColor:
self.highlightGrids(
'Fcst', 'SnowAmt', 'SFC', gridTR, inconGridColor)
self.highlightGrids(
'Fcst', 'Wx', 'SFC', gridTR, inconGridColor)
# createGrid is from SmartScript
for index in xrange(len(wxInfoList)):
# Create temporary grids for each Wx grid. Limit the start and
# end times of the temporary grids so that they don't extend
# beyond the start and end times of the corresponding SnowAmt
# grid.
wxGridTR = wxInfoList[index].gridTime()
tempGridStartTime = wxGridTR.startTime().unixTime()
if tempGridStartTime < gridTR.startTime().unixTime():
tempGridStartTime = gridTR.startTime().unixTime()
tempGridEndTime = wxGridTR.endTime().unixTime()
if tempGridEndTime > gridTR.endTime().unixTime():
tempGridEndTime = gridTR.endTime().unixTime()
tempGridDur = (tempGridEndTime - tempGridStartTime) / 3600
offset = (tempGridStartTime - \
self.timeRange0_1.startTime().unixTime()) / 3600
# Because the time range may be different for the temporary
# grid, I need to create and use that time range when
# creating the temporary grid.
tempGridTR = self.createTimeRange(
offset, offset+tempGridDur, 'Zulu')
self.createGrid(
'Fcst', 'SnowAmtWxInconsistent', 'SCALAR',
consistMaskList[index], tempGridTR,
descriptiveName='SnowAmtWxInconsistent',
minAllowedValue=0, maxAllowedValue=1, units='Good/Bad')
if tempGridColor:
self.highlightGrids(
'Fcst', 'SnowAmtWxInconsistent', 'SFC', gridTR,
tempGridColor)
self.inconsistent = True
return
def _snowAmtWxCheckUnlocked(self, nonZeroMask, gridTR, wxInfoList):
# The "Unlocked" comes from the idea that if the SnowAmt grid does
# not meet the duration and start time constraints, then it's been
# left "unlocked".
for wxIndex, wxGrid in enumerate(self.getGrids(
'Fcst', 'Wx', 'SFC', gridTR, mode='List', noDataError=0,
cache=0)):
# wxMask is from SmartScript
sMask = self.wxMask(wxGrid, ':S:')
swMask = self.wxMask(wxGrid, ':SW:')
ipMask = self.wxMask(wxGrid, ':IP:')
snowMask = logical_or(logical_or(sMask, swMask), ipMask)
del (sMask, swMask, ipMask)
wxMask = logical_not(snowMask)
# "Truth" table for the logical comparison follows
# SnowAmt >= 0.1, 1; SnowAmt < 0.1, 0
# Wx has S, SW, or IP, 0; Wx doesn't have S, SW, or IP, 1
# SnowAmt >= 0.1 (1) and Wx has (0) = 0 (Good result)
# SnowAmt >= 0.1 (1) and Wx doesn't have (1) = 1 (Bad result)
# SnowAmt < 0.1 (0) and Wx has (0) = 0 (Good result)
# SnowAmt < 0.1 (0) and Wx doesn't have (1) = 0 (Good result)
#
# All Wx grids overlapping the SnowAmt grid must be consistent.
consistMask = logical_and(nonZeroMask, wxMask)
consistMask[logical_not(self.cwaMask)] = 0
if sometrue(ravel(consistMask)):
# I'll highlight the SnowAmt grids and Wx grids in
# gridTR as I did with QPF. However, I'll make
# temporary grids here using the Wx grid's time
# range but, the temporary grid cannot start before
# the start of the corresponding SnowAmt grid nor can
# it end after the end of the corresponding SnowAmt grid.
wxGridTR = wxInfoList[wxIndex].gridTime()
tempGridStartTime = wxGridTR.startTime().unixTime()
if tempGridStartTime < gridTR.startTime().unixTime():
# Clip to start of SnowAmt grid
tempGridStartTime = gridTR.startTime().unixTime()
tempGridEndTime = wxGridTR.endTime().unixTime()
if tempGridEndTime > gridTR.endTime().unixTime():
# Clip to end of SnowAmtGrid
tempGridEndTime = gridTR.endTime().unixTime()
tempGridDur = (tempGridEndTime - tempGridStartTime) / 3600
offset = (tempGridStartTime - \
self.timeRange0_1.startTime().unixTime()) / 3600
# Since either the front or end of the Wx grid's
# time range may have been clipped, create a time
# range using those values.
tempGridTR = self.createTimeRange(
offset, offset+tempGridDur, 'Zulu')
self.createGrid(
'Fcst', 'SnowAmtWxInconsistent', 'SCALAR', consistMask,
tempGridTR, descriptiveName='SnowAmtWxInconsistent',
minAllowedValue=0, maxAllowedValue=1, units='Good/Bad')
if tempGridColor:
self.highlightGrids(
'Fcst', 'SnowAmtWxInconsistent', 'SFC', gridTR,
tempGridColor)
if inconGridColor:
self.highlightGrids(
'Fcst', 'SnowAmt', 'SFC', gridTR, inconGridColor)
self.highlightGrids(
'Fcst', 'Wx', 'SFC', wxGridTR, inconGridColor)
self.inconsistent = True
return
def _runQPFPoPCheck(self, timeRange):
# This method implements the check that if any QPF grid is non zero
# then one of the corresponding floating PoP grids must also be non
# zero.
# There can be a significant difference between the values stored
# in memory and the values returned from the database. This is because
# when values are saved, the element's precision (as defined in
# serverConfig.py/localConfig.py) is enforced. Values in memory do not
# have the element's precision enforced; in fact, they have the
# machine precision of the underlying data type.
# If there are locks, post an urgent message and return from the method.
message = ''
# lockedByMe is from SmartScript
if self.lockedByMe('QPF', 'SFC'):
message = '%sYou have the QPF grid locked. Please save the QPF grid.\n' % message
if self.lockedByMe('PoP', 'SFC'):
message = '%sYou have the PoP grid locked. Please save the PoP grid.\n' % message
# lockedByOther is from SmartScript
if self.lockedByOther('QPF', 'SFC'):
message = '%sThe QPF grid is locked by someone else. Please have that person save the QPF grid.\n' % message
if self.lockedByOther('PoP', 'SFC'):
message = '%sThe PoP grid is locked by someone else. Please have that person save the PoP grid.\n' % message
if message:
message = '%sThe QPF/PoP Check was not run.' % message
self.statusBarMsg(message, 'U')
# I return instead of aborting because the user may have asked for
# other tests that do not have locked grid problems.
return
# Make sure there are actually QPF grids in the time range.
# The self.getGrids command will return None if there are no grids
# in the time range for mode='First' and noDataError=0. The None
# variable cannot be iterated over. Rather than trap in a try/except,
# I'll just check for the condititon. This may not be the most
# Pythonic way of doing things, but it allows me to avoid having
# a bunch of code indented beneath a try statement. If no SnowAmt
# grids are found, post an urgent message and return from the method.
# getGrids is from SmartScript
qpfInfoList = self.getGridInfo('Fcst', 'QPF', 'SFC', timeRange)
if [] == qpfInfoList:
message = 'There are no QPF grids in the time range you selected.\nThe QPF/PoP Check did not run.'
self.statusBarMsg(message, 'U')
# I return instead of aborting because the user may have asked for
# other tests that do not have missing grid problems.
return
qpfGrids = self.getGrids(
'Fcst', 'QPF', 'SFC', timeRange, mode='List', noDataError=0,
cache=0)
for qpfIndex, qpfGrid in enumerate(qpfGrids):
gridTR = qpfInfoList[qpfIndex].gridTime()
popGrid = self.getGrids(
'Fcst', 'PoP', 'SFC', gridTR, mode='Max', noDataError=0,
cache=0)
if popGrid is None:
message = '''There are no PoP grids in time range %s.
The QPF/PoP Check skipped the time range.''' % gridTR
self.statusBarMsg(message, 'U')
continue
qpfNonZeroMask = greater(qpfGrid, qpfTol)
popZeroMask = equal(popGrid, 0)
# popZeroMask = 1 if PoP = 0; popZeroMask = 0 if PoP != 0
# qpfNonZeroMask = 1 if QPF > 0; qpfNonZeroMask = 0 if QPF = 0
# PoP = 0 (1) and QPF = 0 (0) => 0 (Good result)
# PoP != 0 (0) and QPF = 0 (0) => 0 (Good result)
# PoP != 0 (0) and QPF > 0 (1) => 0 (Good result)
# PoP = 0 (1) and QPF > 0 (1) => 1 (Bad result)
consistMask = logical_and(qpfNonZeroMask, popZeroMask)
consistMask[logical_not(self.cwaMask)] = 0
if sometrue(ravel(consistMask)):
# The good result is if the logical_and returns zeros
# for every grid point, that is "none true". So, if
# the sometrue method evaluates True, there are
# inconsistencies.
self.createGrid(
'Fcst', 'QPFPoPInconsistent', 'SCALAR', consistMask, gridTR,
descriptiveName='QPFPoPInconsistent',
minAllowedValue=0, maxAllowedValue=1, units='Good/Bad')
if tempGridColor:
self.highlightGrids(
'Fcst', 'QPFPoPInconsistent', 'SFC', gridTR,
tempGridColor)
if inconGridColor:
self.highlightGrids(
'Fcst', 'QPF', 'SFC', gridTR, inconGridColor)
self.highlightGrids(
'Fcst', 'PoP', 'SFC', gridTR, inconGridColor)
self.inconsistent = True
##### Edited by Rob Radzanowski (WFO-CTP) 03-16-2009 to add missing NDFD check for QPF=0 & PoP > 50
##### which is causing unexplained yellow banners due to lack of checking for this error.
qpfZeroMask = equal(qpfGrid, 0)
popGrid = self.getGrids(
'Fcst', 'PoP', 'SFC', gridTR, mode='Max', noDataError=0, cache=0)
popGreater50Mask = greater(popGrid, 50)
# popGreater50Mask = 1 if PoP > 50; popGreater50Mask = 0 if PoP <= 50
# qpfZeroMask = 0 if QPF > 0; qpfZeroMask = 1 if QPF = 0
# PoP > 50 (1) and QPF > 0 (0) => 0 (Good result)
# PoP > 50 (1) and QPF = 0 (1) => 1 (Bad result)
# PoP <= 50 (0) and QPF > 0 (0) => 0 (Good/Irrelevant result)
# PoP <= 50 (0) and QPF = 0 (1) => 0 (Good result)
consistMask2 = logical_and(qpfZeroMask, popGreater50Mask)
consistMask2[logical_not(self.cwaMask)] = 0
if sometrue(ravel(consistMask2)):
# The good result is if the logical_and returns zeros
# for every grid point, that is "none true". So, if
# the sometrue method evaluates True, there are
# inconsistencies.
self.createGrid(
'Fcst', 'QPFPoPInconsistent', 'SCALAR', consistMask2, gridTR,
descriptiveName='QPFPoPInconsistent',
minAllowedValue=0, maxAllowedValue=1, units='Good/Bad')
if tempGridColor:
self.highlightGrids('Fcst', 'QPFPoPInconsistent', 'SFC', gridTR, tempGridColor)
if inconGridColor:
self.highlightGrids('Fcst', 'QPF', 'SFC', gridTR, inconGridColor)
self.highlightGrids('Fcst', 'PoP', 'SFC', gridTR, inconGridColor)
self.inconsistent = True
return
def _runQPFWxCheck(self, timeRange):
# This method implements the check that if QPF non zero, then the
# corresponding Wx grids must contain a precipitable Wx type. Note:
# the method only checks the Wx type, no cov/prob, no inten, etc.
# There can be a significant difference between the values stored
# in memory and the values returned from the database. This is because
# when values are saved, the element's precision (as defined in
# serverConfig.py/localConfig.py) is enforced. Values in memory do not
# have the element's precision enforced; in fact, they have the
# machine precision of the underlying data type.
# If there are locks, post an urgent message and return from the method.
message = ''
# lockedByMe is from SmartScript
if self.lockedByMe('QPF', 'SFC'):
message = '%sYou have the QPF grid locked. Please save the QPF grid.\n' % message
if self.lockedByMe('Wx', 'SFC'):
message = '%sYou have the Wx grid locked. Please save the Wx grid.\n' % message
# lockedByOther is from SmartScript
if self.lockedByOther('QPF', 'SFC'):
message = '%sThe QPF grid is locked by someone else. Please have that person save the QPF grid.\n' % message
if self.lockedByOther('Wx', 'SFC'):
message = '%sThe Wx grid is locked by someone else. Please have that person save the Wx grid.\n' % message
if message:
message = '%sThe QPF/Wx Check was not run.' % message
self.statusBarMsg(message, 'U')
# I return instead of aborting because the user may have asked for
# other tests that do not have locked grid problems.
return
# Make sure there are actually QPF grids in the time range.
# I'll just check for the condititon. If no SnowAmt
# grids are found, post an urgent message and return from the method.
qpfInfoList = self.getGridInfo('Fcst', 'QPF', 'SFC', timeRange)
if [] == qpfInfoList:
message = 'There are no QPF grids in the time range you selected.\nThe QPF/PoP Check did not run.'
self.statusBarMsg(message, 'U')
# I return instead of aborting because the user may have asked for
# other tests that do not have missing grid problems.
return
for qpfIndex, qpfGrid in enumerate(self.getGrids(
'Fcst', 'QPF', 'SFC', timeRange, mode='List', noDataError=0,
cache=0)):
qpfNonZeroMask = greater(qpfGrid, qpfTol)
gridTR = qpfInfoList[qpfIndex].gridTime()
wxInfoList = self.getGridInfo('Fcst', 'Wx', 'SFC', gridTR)
if [] == wxInfoList:
message = '''There are no Wx grids in time range %s.
The QPF/Wx Check skipped the time range.''' % gridTR
self.statusBarMsg(message, 'U')
continue
# There are two cases. If the QPF grid is exactly 6 hours long and
# starts at 00, 06, 12, or 18 UTC, then only one of the
# corresponding Wx grids needs to be consistent. Otherwise, all the
# corresponding Wx grids need to be consistent.
if gridTR.duration() / 3600 == 6 and gridTR.startTime().hour in (0, 6, 12, 18):
self._qpfWxCheckLocked(qpfNonZeroMask, gridTR, wxInfoList)
else:
self._qpfWxCheckUnlocked(qpfNonZeroMask, gridTR, wxInfoList)
return
def _qpfWxCheckLocked(self, qpfNonZeroMask, gridTR, wxInfoList):
# The "Locked" comes from the idea that if the QPF grid is
# exactly 6 hours long and starts at 00, 06, 12, or 18 UTC, then it
# is "locked".
consistMaskList = []
for wxIndex, wxGrid in enumerate(self.getGrids(
'Fcst', 'Wx', 'SFC', gridTR, mode='List', noDataError=0,
cache=0)):
# wxMask is from SmartScript
sMask = self.wxMask(wxGrid, ':S:')
swMask = self.wxMask(wxGrid, ':SW:')
ipMask = self.wxMask(wxGrid, ':IP:')
snowMask = logical_or(logical_or(sMask, swMask), ipMask)
del (sMask, swMask, ipMask)
rMask = self.wxMask(wxGrid, ':R:')
rwMask = self.wxMask(wxGrid, ':RW:')
lMask = self.wxMask(wxGrid, ':L:')
zlMask = self.wxMask(wxGrid, ':ZL:')
zrMask = self.wxMask(wxGrid, ':ZR:')
# logical_or is from Numeric
rainMask = logical_or(
rMask, logical_or(
rwMask, logical_or(
lMask, logical_or(zlMask, zrMask))))
del (rMask, rwMask, lMask, zlMask, zrMask)
precipMask = logical_or(snowMask, rainMask)
del (snowMask, rainMask)
wxMask = logical_not(precipMask)
# QPF >= 0.01, 1; QPF < 0.01, 0
# Wx has precip, 0; Wx doesn't have precip, 1
# QPF >= 0.01 (1) and Wx has (0) = 0 (Good result)
# QPF >= 0.01 (1) and Wx doesn't have (1) = 1 (Bad result)
# QPF < 0.01 (0) and Wx has (0) = 0 (Good result)
# QPF < 0.01 (0) and Wx doesn't have (1) = 0 (Good result)
consistMask = logical_and(qpfNonZeroMask, wxMask)
consistMask[logical_not(self.cwaMask)] = 0
consistMaskList.append(consistMask)
if not sometrue(ravel(consistMask)):
# There were no inconsistencies with this Wx grid. Since only
# one needs to be consistent, we don't need to do any more
# checks.
break
else:
# This block will only execute if the for loop runs to
# completion, i.e., the break statement is not executed.
# So, if we get here, we have an inconsistency and need to
# highlight the appropriate grids.
if inconGridColor:
self.highlightGrids(
'Fcst', 'QPF', 'SFC', gridTR, inconGridColor)
self.highlightGrids(
'Fcst', 'Wx', 'SFC', gridTR, inconGridColor)
# createGrid is from SmartScript
for index in xrange(len(wxInfoList)):
# Create temporary grids for each Wx grid. Limit the time
# range of the temporary grid so that it doesn't start any
# earlier or any later than the corresponding QPF grid.
wxGridTR = wxInfoList[index].gridTime()
tempGridStartTime = wxGridTR.startTime().unixTime()
if tempGridStartTime < gridTR.startTime().unixTime():
tempGridStartTime = gridTR.startTime().unixTime()
tempGridEndTime = wxGridTR.endTime().unixTime()
if tempGridEndTime > gridTR.endTime().unixTime():
tempGridEndTime = gridTR.endTime().unixTime()
tempGridDur = (tempGridEndTime - tempGridStartTime) / 3600
offset = (tempGridStartTime - \
self.timeRange0_1.startTime().unixTime()) / 3600
# Since the temporary grid could have a different time range
# than the Wx grid, I need to create and use that time range
# when creating the temporary grid.
tempGridTR = self.createTimeRange(
offset, offset+tempGridDur, 'Zulu')
self.createGrid(
'Fcst', 'QPFWxInconsistent', 'SCALAR',
consistMaskList[index], tempGridTR,
descriptiveName='QPFWxInconsistent',
minAllowedValue=0, maxAllowedValue=1, units='Good/Bad')
if tempGridColor:
self.highlightGrids(
'Fcst', 'QPFWxInconsistent', 'SFC', gridTR,
tempGridColor)
self.inconsistent = True
return
def _qpfWxCheckUnlocked(self, qpfNonZeroMask, gridTR, wxInfoList):
# The "Unlocked" comes from the idea that if the QPF grid is not
# exactly 6 hours long and starting at 00, 06, 12, or 18 UTC, then it
# is "unlocked".
for wxIndex, wxGrid in enumerate(self.getGrids(
'Fcst', 'Wx', 'SFC', gridTR, mode='List', noDataError=0,
cache=0)):
# wxMask is from SmartScript
sMask = self.wxMask(wxGrid, ':S:')
swMask = self.wxMask(wxGrid, ':SW:')
ipMask = self.wxMask(wxGrid, ':IP:')
snowMask = logical_or(logical_or(sMask, swMask), ipMask)
del (sMask, swMask, ipMask)
rMask = self.wxMask(wxGrid, ':R:')
rwMask = self.wxMask(wxGrid, ':RW:')
lMask = self.wxMask(wxGrid, ':L:')
zlMask = self.wxMask(wxGrid, ':ZL:')
zrMask = self.wxMask(wxGrid, ':ZR:')
# logical_or is from Numeric
rainMask = logical_or(
rMask, logical_or(
rwMask, logical_or(
lMask, logical_or(zlMask, zrMask))))
del (rMask, rwMask, lMask, zlMask, zrMask)
precipMask = logical_or(snowMask, rainMask)
del (snowMask, rainMask)
wxMask = logical_not(precipMask)
# QPF >= 0.01, 1; QPF < 0.01, 0
# Wx has precip, 0; Wx doesn't have precip, 1
# QPF >= 0.01 (1) and Wx has (0) = 0 (Good result)
# QPF >= 0.01 (1) and Wx doesn't have (1) = 1 (Bad result)
# QPF < 0.01 (0) and Wx has (0) = 0 (Good result)
# QPF < 0.01 (0) and Wx doesn't have (1) = 0 (Good result)
#
# All Wx grids overlapping the SnowAmt grid must be consistent.
consistMask = logical_and(qpfNonZeroMask, wxMask)
consistMask[logical_not(self.cwaMask)] = 0
if sometrue(ravel(consistMask)):
wxGridTR = wxInfoList[wxIndex].gridTime()
tempGridStartTime = wxGridTR.startTime().unixTime()
if tempGridStartTime < gridTR.startTime().unixTime():
# Clip to start of QPF grid
tempGridStartTime = gridTR.startTime().unixTime()
tempGridEndTime = wxGridTR.endTime().unixTime()
if tempGridEndTime > gridTR.endTime().unixTime():
# Clip to end of QPF Grid
tempGridEndTime = gridTR.endTime().unixTime()
tempGridDur = (tempGridEndTime - tempGridStartTime) / 3600
offset = (tempGridStartTime - \
self.timeRange0_1.startTime().unixTime()) / 3600
# Since either the front or end of the Wx grid's
# time range may have been clipped, create a time
# range using those values.
tempGridTR = self.createTimeRange(
offset, offset+tempGridDur, 'Zulu')
self.createGrid(
'Fcst', 'QPFWxInconsistent', 'SCALAR', consistMask,
tempGridTR, descriptiveName='QPFWxInconsistent',
minAllowedValue=0, maxAllowedValue=1, units='Good/Bad')
if tempGridColor:
self.highlightGrids(
'Fcst', 'QPFWxInconsistent', 'SFC', gridTR,
tempGridColor)
if inconGridColor:
self.highlightGrids(
'Fcst', 'QPF', 'SFC', gridTR, inconGridColor)
self.highlightGrids(
'Fcst', 'Wx', 'SFC', wxGridTR, inconGridColor)
self.inconsistent = True
return
def _calcTolerance(self, gridInfo):
precision = gridInfo.gridParmInfo.getPrecision()
return pow(10, -precision)
def execute(self, timeRange, varDict):
# Make sure the configuration values are the correct types.
self.__checkConfigValueTypes()
# createTimeRange is from SmartScript
timeRange0_240 = self.createTimeRange(0, 241, 'Zulu')
checkCleanup = varDict.get('Check_Cleanup', 'Check')
self.__cleanup(timeRange0_240)
if checkCleanup == 'Cleanup':
message = 'SnowQPFPoPWxCheck complete.'
self.statusBarMsg(message, 'R')
self.cancel()
if timeRange.endTime().unixTime() - timeRange.startTime().unixTime() < \
3600: # No time range selected, use create a 0 to 240 hour range
timeRange = timeRange0_240
# If the user has a time range swept out, send an informational
# message.
if (timeRange.startTime().unixTime() != timeRange0_240.startTime().unixTime()) or \
(timeRange.endTime().unixTime() != timeRange0_240.endTime().unixTime()) or \
(timeRange.duration() != timeRange0_240.duration()):
message = 'The SnowAmtQPFPoPWxCheck procedure did not run over the 0 to 240 hour time period,\nit ran over %s. This may be what you desired.' % str(timeRange)
self.statusBarMsg(message, 'S')
# I'll need to know the unix time of 00Z so I can determine the
# start time of temporary grids later. I'll need this in more than
# one of the methods called later, so this will become an instance
# variable, i.e., prefixed with "self." I also need an instance
# variable that flags whether or not there were inconsistent grids.
self.timeRange0_1 = self.createTimeRange(0, 1, 'Zulu')
self.inconsistent = False
# A CWA edit area can be provided in the configuration section.
# Attempt to encode that edit area as a Numeric Python mask so that
# the later checks are limited to the edit area. The GFE is not very
# friendly if the encoding fails. The GFE will send a nasty message
# to the user, but continue executing the procedure. No trappable
# error is thrown. As of this writing, the GFE appears to create an
# array of shape (0, 0) if the encoding cannot be done, so I will
# check for that and, if I find it, then set the edit area to the
# domain.
# encodeEditArea comes from SmartScript. For the points that are in
# the edit area, a value of one is assigned. Otherwise, a value of
# zero is assigned.
if cwaEditArea:
self.cwaMask = self.encodeEditArea(cwaEditArea)
if self.cwaMask.shape == (0, 0):
# Use the getGridInfo command to get information about the
# SnowAmt grid. From this, the grid size can be extracted. I
# could use getGridInfo on any valid GFE grid.
# getGridInfo is from SmartScript
snowAmtInfoList = self.getGridInfo(
'Fcst', 'SnowAmt', 'SFC', timeRange)
# I painfully discovered that the array shape is (y, x)
gridSize = (snowAmtInfoList[0].gridLocation().gridSize().y,
snowAmtInfoList[0].gridLocation().gridSize().x)
# ones is from Numeric. It creates an array of the given size
# and data type where all values are one.
self.cwaMask = ones(gridSize, Int)
message = \
'''The procedure was not able to use the CWA edit area, %s, provided
in the configuration. You should inform the person responsible for procedures
of this problem. The procedure ran over the whole domain.''' % cwaEditArea
self.statusBarMsg(message, 'S')
else:
snowAmtInfoList = self.getGridInfo(
'Fcst', 'SnowAmt', 'SFC', timeRange)
gridSize = (snowAmtInfoList[0].gridLocation().gridSize().y,
snowAmtInfoList[0].gridLocation().gridSize().x)
self.cwaMask = ones(gridSize, Int)
# Based on the user's input, run the appropriate checks.
# By making each of these options a checkbox with only one option in
# the VariableList above, if an option is unchecked then an empty
# list, [], will be what's in varDict. If an option is checked then a
# list with the value "Yes", ["Yes"], will be what's in varDict. In
# Python, a conditional expression can be whether or not a data
# structure is empty. In these cases, an empty data structure,
# e.g., an empty list, an empty tuple, an empty dictionary,
# conditionally test to False while non empty data structures
# conditionally test to True. In the if statements below, every varDict
# lookup returns a list: either [] or ["Yes"]. I think the constructs
# below or more elegant and easier to understand.
if varDict['Run SnowAmt/QPF Check?']:
# Call the SnowAmt/QPF check method
self._runSnowAmtQPFCheck(timeRange)
if varDict['Run SnowAmt/Wx Check?']:
# Call the SnowAmt/Wx check method
self._runSnowAmtWxCheck(timeRange)
if varDict['Run QPF/PoP Check?']:
# Call the QPF/PoP check method
self._runQPFPoPCheck(timeRange)
if varDict['Run QPF/Wx Check?']:
# Call the QPF/Wx check method
self._runQPFWxCheck(timeRange)
message = 'SnowAmtQPFPoPWxCheck complete.'
if self.inconsistent:
message = '%s Inconsistencies found! Grids highlighted %s and %s.' % (
message, inconGridColor, tempGridColor)
self.statusBarMsg(message, 'S')
else:
self.statusBarMsg(message, 'R')
| [
"[email protected]"
] | |
caa61f239dd804cfd346a5bfbdd0c96f9db3019c | aa853a9094fff4b6e9b0ddc7469be29ad5f0f811 | /poi_account_discount/__init__.py | fbcb99378cb4816bc628da5b14f4c85a93bfbda9 | [] | no_license | blue-connect/illuminati | 40a13e1ebeaceee39f17caa360f79e8deeaebf58 | 6682e60630064641474ddb2d8cbc520e30f64832 | refs/heads/master | 2022-01-06T00:55:58.465611 | 2018-11-24T04:30:03 | 2018-11-24T04:30:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 Poiesis Consulting (<http://www.poiesisconsulting.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#import account
import order
import wizard | [
"[email protected]"
] | |
69ce65da047bda6776179e27ce17ebcda32a87e1 | 040a6cc313a6200da1d176191707bfb896053db4 | /descarteslabs/catalog/catalog_base.py | 0729318b9da2c1d50cf45c9f6c684f13b12fdd4c | [
"Apache-2.0"
] | permissive | aashish24/descarteslabs-python | 77747984994609205887262bafeec5e9d38fcd0c | 00149115e8ef6cd1f48b0a6c689f5da07f69c306 | refs/heads/master | 2022-11-19T02:02:00.959896 | 2020-07-24T16:23:55 | 2020-07-24T16:42:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,013 | py | from six import add_metaclass, iteritems, ensure_str, wraps
from types import MethodType
import json
from descarteslabs.client.exceptions import NotFoundError
from .attributes import (
AttributeMeta,
AttributeValidationError,
AttributeEqualityMixin,
DocumentState,
Timestamp,
ListAttribute,
ExtraPropertiesAttribute,
TypedAttribute,
)
from .catalog_client import CatalogClient, HttpRequestMethod
class DeletedObjectError(Exception):
"""Indicates that an action cannot be performed.
Raised when some action cannot be performed because the catalog object
has been deleted from the Descartes Labs catalog using the delete method
(e.g. :py:meth:`Product.delete`).
"""
pass
class UnsavedObjectError(Exception):
"""Indicate that an action cannot be performed.
Raised when trying to delete an object that hasn't been saved.
"""
pass
def check_deleted(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
if self.state == DocumentState.DELETED:
raise DeletedObjectError("This catalog object has been deleted.")
try:
return f(self, *args, **kwargs)
except NotFoundError as e:
self._deleted = True
raise DeletedObjectError(
"{} instance with id {} has been deleted".format(
self.__class__.__name__, self.id
)
).with_traceback(e.__traceback__) from None
return wrapper
def check_derived(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
if self._url is None:
raise TypeError(
"This method is only available for a derived class of 'CatalogObject'"
)
return f(self, *args, **kwargs)
return wrapper
def _new_abstract_class(cls, abstract_cls):
if cls is abstract_cls:
raise TypeError(
"You can only instantiate a derived class of '{}'".format(
abstract_cls.__name__
)
)
return super(abstract_cls, cls).__new__(cls)
class CatalogObjectMeta(AttributeMeta):
def __new__(cls, name, bases, attrs):
new_cls = super(CatalogObjectMeta, cls).__new__(cls, name, bases, attrs)
if new_cls._doc_type:
new_cls._model_classes_by_type_and_derived_type[
(new_cls._doc_type, new_cls._derived_type)
] = new_cls
if new_cls.__doc__ is not None and new_cls._instance_delete.__doc__ is not None:
# Careful with this; leading white space is very significant
new_cls.__doc__ += (
"""
Methods
-------
delete()
"""
+ new_cls._instance_delete.__doc__
)
return new_cls
@add_metaclass(CatalogObjectMeta)
class CatalogObjectBase(AttributeEqualityMixin):
"""A base class for all representations of top level objects in the Catalog API."""
# The following can be overridden by subclasses to customize behavior:
# JSONAPI type for this model (required)
_doc_type = None
# Path added to the base URL for a list request of this model (required)
_url = None
# List of related objects to include in read requests
_default_includes = []
# The derived type of this class
_derived_type = None
# Attribute to use to determine the derived type of an instance
_derived_type_switch = None
_model_classes_by_type_and_derived_type = {}
id = TypedAttribute(
str,
mutable=False,
serializable=False,
doc="""str, immutable: A unique identifier for this object.
Note that if you pass a string that does not begin with your Descartes Labs
user organization ID, it will be prepended to your `id` with a ``:`` as
separator. If you are not part of an organization, your user ID is used. Once
set, it cannot be changed.
""",
)
created = Timestamp(
readonly=True,
doc="""datetime, readonly: The point in time this object was created.
*Filterable, sortable*.
""",
)
modified = Timestamp(
readonly=True,
doc="""datetime, readonly: The point in time this object was last modified.
*Filterable, sortable*.
""",
)
def __new__(cls, *args, **kwargs):
return _new_abstract_class(cls, CatalogObjectBase)
def __init__(self, **kwargs):
self.delete = self._instance_delete
self._client = kwargs.pop("client", None) or CatalogClient.get_default_client()
self._attributes = {}
self._modified = set()
self._initialize(
id=kwargs.pop("id", None),
saved=kwargs.pop("_saved", False),
relationships=kwargs.pop("_relationships", None),
related_objects=kwargs.pop("_related_objects", None),
**kwargs
)
def __del__(self):
for attr_type in self._attribute_types.values():
attr_type.__delete__(self, validate=False)
def _clear_attributes(self):
self._mapping_attribute_instances = {}
self._clear_modified_attributes()
# This only applies to top-level attributes
sticky_attributes = {}
for name, value in self._attributes.items():
attribute_type = self._attribute_types.get(name)
if attribute_type._sticky:
sticky_attributes[name] = value
self._attributes = sticky_attributes
def _initialize(
self,
id=None,
saved=False,
relationships=None,
related_objects=None,
deleted=False,
**kwargs
):
self._clear_attributes()
self._saved = saved
self._deleted = deleted
# This is an immutable attribute; can only be set once
if id:
self.id = id
for (name, val) in iteritems(kwargs):
# Only silently ignore unknown attributes if data came from service
attribute_definition = (
self._attribute_types.get(name)
if saved
else self._get_attribute_type(name)
)
if attribute_definition is not None:
attribute_definition.__set__(self, val, validate=not saved)
for name, t in iteritems(self._reference_attribute_types):
id_value = kwargs.get(t.id_field)
if id_value is not None:
object_value = kwargs.get(name)
if object_value and object_value.id != id_value:
message = (
"Conflicting related object reference: '{}' was '{}' "
"but '{}' was '{}'"
).format(t.id_field, id_value, name, object_value.id)
raise AttributeValidationError(message)
if related_objects:
related_object = related_objects.get(
(t.reference_class._doc_type, id_value)
)
if related_object is not None:
t.__set__(self, related_object, validate=not saved)
if saved:
self._clear_modified_attributes()
def __repr__(self):
name = ensure_str(self.name) if getattr(self, "name", None) is not None else ""
sections = [
# Document type and ID
"{}: {}\n id: {}".format(self.__class__.__name__, name, self.id)
]
# related objects and their ids
for name in sorted(self._reference_attribute_types):
t = self._reference_attribute_types[name]
# as a temporary hack for image upload, handle missing image_id field
sections.append(" {}: {}".format(name, getattr(self, t.id_field, None)))
if self.created:
sections.append(" created: {:%c}".format(self.created))
if self.state == DocumentState.DELETED:
sections.append("* Deleted from the Descartes Labs catalog.")
elif self.state != DocumentState.SAVED:
sections.append(
"* Not up-to-date in the Descartes Labs catalog. Call `.save()` to save or update this record."
)
return "\n".join(sections)
def __eq__(self, other):
if (
not isinstance(other, self.__class__)
or self.id != other.id
or self.state != other.state
):
return False
return super(CatalogObjectBase, self).__eq__(other)
def __setattr__(self, name, value):
if not (name.startswith("_") or isinstance(value, MethodType)):
# Make sure it's a proper attribute
self._get_attribute_type(name)
super(CatalogObjectBase, self).__setattr__(name, value)
@property
def is_modified(self):
"""bool: Whether any attributes were changed (see `state`).
``True`` if any of the attribute values changed since the last time this
catalog object was retrieved or saved. ``False`` otherwise.
Note that assigning an identical value does not affect the state.
"""
return bool(self._modified)
@classmethod
def _get_attribute_type(cls, name):
try:
return cls._attribute_types[name]
except KeyError:
raise AttributeError("{} has no attribute {}".format(cls.__name__, name))
@classmethod
def _get_model_class(cls, serialized_object):
class_type = serialized_object["type"]
klass = cls._model_classes_by_type_and_derived_type.get((class_type, None))
if klass._derived_type_switch:
derived_type = serialized_object["attributes"][klass._derived_type_switch]
klass = cls._model_classes_by_type_and_derived_type.get(
(class_type, derived_type)
)
return klass
@classmethod
def _serialize_filter_attribute(cls, name, value):
"""Serialize a single value for a filter.
Allow the given value to be serialized using the serialization logic
of the given attribute. This method should only be used to serialize
a filter value.
Parameters
----------
name : str
The name of the attribute used for serialization logic.
value : object
The value to be serialized.
Raises
------
AttributeValidationError
If the attribute is not serializable.
"""
attribute_type = cls._get_attribute_type(name)
if isinstance(attribute_type, ListAttribute):
attribute_type = attribute_type._attribute_type
return attribute_type.serialize(value)
def _set_modified(self, attr_name, changed=True, validate=True):
# Verify it is allowed to to be set
attr = self._get_attribute_type(attr_name)
if validate:
if attr._readonly:
raise AttributeValidationError(
"Can't set '{}' because it is a readonly attribute".format(
attr_name
)
)
if not attr._mutable and attr_name in self._attributes:
raise AttributeValidationError(
"Can't set '{}' because it is an immutable attribute".format(
attr_name
)
)
if changed:
self._modified.add(attr_name)
def _serialize(self, attrs, jsonapi_format=False):
serialized = {}
for name in attrs:
value = self._attributes[name]
attribute_type = self._get_attribute_type(name)
if attribute_type._serializable:
serialized[name] = attribute_type.serialize(
value, jsonapi_format=jsonapi_format
)
return serialized
@check_deleted
def update(self, ignore_errors=False, **kwargs):
"""Update multiple attributes at once using the given keyword arguments.
Parameters
----------
ignore_errors : bool, optional
``False`` by default. When set to ``True``, it will suppress
`AttributeValidationError` and `AttributeError`. Any given attribute that
causes one of these two exceptions will be ignored, all other attributes
will be set to the given values.
Raises
------
AttributeValidationError
If one or more of the attributes being updated are immutable.
AttributeError
If one or more of the attributes are not part of this catalog object.
DeletedObjectError
If this catalog object was deleted.
"""
original_values = dict(self._attributes)
original_modified = set(self._modified)
for (name, val) in iteritems(kwargs):
try:
# A non-existent attribute will raise an AttributeError
attribute_definition = self._get_attribute_type(name)
# A bad value will raise an AttributeValidationError
attribute_definition.__set__(self, val)
except (AttributeError, AttributeValidationError):
if ignore_errors:
pass
else:
self._attributes = original_values
self._modified = original_modified
raise
def serialize(self, modified_only=False, jsonapi_format=False):
"""Serialize the catalog object into json.
Parameters
----------
modified_only : bool, optional
Whether only modified attributes should be serialized. ``False`` by
default. If set to ``True``, only those attributes that were modified since
the last time the catalog object was retrieved or saved will be included.
jsonapi_format : bool, optional
Whether to use the ``data`` element for catalog objects. ``False`` by
default. When set to ``False``, the serialized data will directly contain
the attributes of the catalog object. If set to ``True``, the serialized
data will follow the exact JSONAPI with a top-level ``data`` element which
contains ``id``, ``type``, and ``attributes``. The latter will contain
the attributes of the catalog object.
"""
keys = self._modified if modified_only else self._attributes.keys()
attributes = self._serialize(keys, jsonapi_format=jsonapi_format)
if jsonapi_format:
return self._client.jsonapi_document(self._doc_type, attributes, self.id)
else:
return attributes
def _clear_modified_attributes(self):
self._modified = set()
@property
def state(self):
"""DocumentState: The state of this catalog object."""
if self._deleted:
return DocumentState.DELETED
if self._saved is False:
return DocumentState.UNSAVED
elif self.is_modified:
return DocumentState.MODIFIED
else:
return DocumentState.SAVED
@classmethod
def get(cls, id, client=None):
"""Get an existing object from the Descartes Labs catalog.
If the Descartes Labs catalog object is found, it will be returned in the
`~descarteslabs.catalog.DocumentState.SAVED` state. Subsequent changes will
put the instance in the `~descarteslabs.catalog.DocumentState.MODIFIED` state,
and you can use :py:meth:`save` to commit those changes and update the Descartes
Labs catalog object. Also see the example for :py:meth:`save`.
For bands, if you request a specific band type, for example
:meth:`SpectralBand.get`, you will only receive that type. Use :meth:`Band.get`
to receive any type.
Parameters
----------
id : str
The id of the object you are requesting.
client : CatalogClient, optional
A `CatalogClient` instance to use for requests to the Descartes Labs
catalog. The
:py:meth:`~descarteslabs.catalog.CatalogClient.get_default_client` will
be used if not set.
Returns
-------
:py:class:`~descarteslabs.catalog.CatalogObject` or None
The object you requested, or ``None`` if an object with the given `id`
does not exist in the Descartes Labs catalog.
Raises
------
ClientError or ServerError
:ref:`Spurious exception <network_exceptions>` that can occur during a
network request.
"""
try:
data, related_objects = cls._send_data(
method=HttpRequestMethod.GET, id=id, client=client
)
except NotFoundError:
return None
model_class = cls._get_model_class(data)
if not issubclass(model_class, cls):
return None
return model_class(
id=data["id"],
client=client,
_saved=True,
_relationships=data.get("relationships"),
_related_objects=related_objects,
**data["attributes"]
)
@classmethod
def get_or_create(cls, id, client=None, **kwargs):
"""Get an existing object from the Descartes Labs catalog or create a new object.
If the Descartes Labs catalog object is found, and the remainder of the
arguments do not differ from the values in the retrieved instance, it will be
returned in the `~descarteslabs.catalog.DocumentState.SAVED` state.
If the Descartes Labs catalog object is found, and the remainder of the
arguments update one or more values in the instance, it will be returned in
the `~descarteslabs.catalog.DocumentState.MODIFIED` state.
If the Descartes Labs catalog object is not found, it will be created and the
state will be `~descarteslabs.catalog.DocumentState.UNSAVED`. Also see the
example for :py:meth:`save`.
Parameters
----------
id : str
The id of the object you are requesting.
client : CatalogClient, optional
A `CatalogClient` instance to use for requests to the Descartes Labs
catalog. The
:py:meth:`~descarteslabs.catalog.CatalogClient.get_default_client` will
be used if not set.
kwargs : dict, optional
With the exception of readonly attributes (`created`, `modified`), any
attribute of a catalog object can be set as a keyword argument (Also see
`ATTRIBUTES`).
Returns
-------
:py:class:`~descarteslabs.catalog.CatalogObject`
The requested catalog object that was retrieved or created.
"""
obj = cls.get(id, client=client)
if obj is None:
obj = cls(id=id, client=client, **kwargs)
else:
obj.update(**kwargs)
return obj
@classmethod
def get_many(cls, ids, ignore_missing=False, client=None):
"""Get existing objects from the Descartes Labs catalog.
All returned Descartes Labs catalog objects will be in the
`~descarteslabs.catalog.DocumentState.SAVED` state. Also see :py:meth:`get`.
For bands, if you request a specific band type, for example
:meth:`SpectralBand.get_many`, you will only receive that type. Use
:meth:`Band.get_many` to receive any type.
Parameters
----------
ids : list(str)
A list of identifiers for the objects you are requesting.
ignore_missing : bool, optional
Whether to raise a `~descarteslabs.client.exceptions.NotFoundError`
exception if any of the requested objects are not found in the Descartes
Labs catalog. ``False`` by default which raises the exception.
client : CatalogClient, optional
A `CatalogClient` instance to use for requests to the Descartes Labs
catalog. The
:py:meth:`~descarteslabs.catalog.CatalogClient.get_default_client` will
be used if not set.
Returns
-------
list(:py:class:`~descarteslabs.catalog.CatalogObject`)
List of the objects you requested in the same order.
Raises
------
NotFoundError
If any of the requested objects do not exist in the Descartes Labs catalog
and `ignore_missing` is ``False``.
ClientError or ServerError
:ref:`Spurious exception <network_exceptions>` that can occur during a
network request.
"""
if not isinstance(ids, list) or any(not isinstance(id_, str) for id_ in ids):
raise TypeError("ids must be a list of strings")
id_filter = {"name": "id", "op": "eq", "val": ids}
raw_objects, related_objects = cls._send_data(
method=HttpRequestMethod.PUT,
client=client,
json={"filter": json.dumps([id_filter], separators=(",", ":"))},
)
if not ignore_missing:
received_ids = set(obj["id"] for obj in raw_objects)
missing_ids = set(ids) - received_ids
if len(missing_ids) > 0:
raise NotFoundError(
"Objects not found for ids: {}".format(", ".join(missing_ids))
)
objects = [
model_class(
id=obj["id"],
client=client,
_saved=True,
_relationships=obj.get("relationships"),
_related_objects=related_objects,
**obj["attributes"]
)
for obj in raw_objects
for model_class in (cls._get_model_class(obj),)
if issubclass(model_class, cls)
]
return objects
@classmethod
@check_derived
def exists(cls, id, client=None):
"""Checks if an object exists in the Descartes Labs catalog.
Parameters
----------
id : str
The id of the object.
client : CatalogClient, optional
A `CatalogClient` instance to use for requests to the Descartes Labs
catalog. The
:py:meth:`~descarteslabs.catalog.CatalogClient.get_default_client` will
be used if not set.
Returns
-------
bool
Returns ``True`` if the given ``id`` represents an existing object in
the Descartes Labs catalog and ``False`` if not.
Raises
------
ClientError or ServerError
:ref:`Spurious exception <network_exceptions>` that can occur during a
network request.
"""
client = client or CatalogClient.get_default_client()
r = None
try:
r = client.session.head(cls._url + "/" + id)
except NotFoundError:
return False
return r and r.ok
@classmethod
@check_derived
def search(cls, client=None):
"""A search query for all objects of the type this class represents.
Parameters
----------
client : CatalogClient, optional
A `CatalogClient` instance to use for requests to the Descartes Labs
catalog. The
:py:meth:`~descarteslabs.catalog.CatalogClient.get_default_client` will
be used if not set.
Returns
-------
Search
An instance of the :py:class:`~descarteslabs.catalog.Search`
class.
Example
-------
>>> search = Product.search().limit(10)
>>> for result in search:
print(result.name)
"""
from .search import Search
return Search(cls, client=client)
@check_deleted
def save(self, extra_attributes=None):
"""Saves this object to the Descartes Labs catalog.
If this instance was created using the constructor, it will be in the
`~descarteslabs.catalog.DocumentState.UNSAVED` state and is considered a new
Descartes Labs catalog object that must be created. If the catalog object
already exists in this case, this method will raise a
`~descarteslabs.client.exceptions.BadRequestError`.
If this instance was retrieved using :py:meth:`get`, :py:meth:`get_or_create`
or any other way (for example as part of a :py:meth:`search`), and any of its
values were changed, it will be in the
`~descarteslabs.catalog.DocumentState.MODIFIED` state and the existing catalog
object will be updated.
If this instance was retrieved using :py:meth:`get`, :py:meth:`get_or_create`
or any other way (for example as part of a :py:meth:`search`), and none of its
values were changed, it will be in the
`~descarteslabs.catalog.DocumentState.SAVED` state, and if no `extra_attributes`
parameter is given, nothing will happen.
Parameters
----------
extra_attributes : dict, optional
A dictionary of attributes that should be sent to the catalog along with
attributes already set on this object. Empty by default. If not empty,
and the object is in the `~descarteslabs.catalog.DocumentState.SAVED`
state, it is updated in the Descartes Labs catalog even though no attributes
were modified.
Raises
------
ConflictError
If you're trying to create a new object and the object with given ``id``
already exists in the Descartes Labs catalog.
BadRequestError
If any of the attribute values are invalid.
DeletedObjectError
If this catalog object was deleted.
ClientError or ServerError
:ref:`Spurious exception <network_exceptions>` that can occur during a
network request.
Example
-------
>>> new_product = Product(
... id="my-product",
... name="My Product",
... description="This is a test product"
... )
>>> new_product.state
<DocumentState.UNSAVED: 'unsaved'>
>>> new_product.save()
>>> # ids will be automatically prefixed by the Descartes Labs catalog
>>> # with your organization id
>>> new_product.id
my_org_id:my-product
>>> # Now you can retrieve the product and update it
>>> existing_product = Product.get(new_product.id)
>>> existing_product.state
<DocumentState.SAVED: 'saved'>
>>> existing_product.name = "My Updated Product"
>>> existing_product.state
<DocumentState.MODIFIED: 'modified'>
>>> existing_product.save()
>>> existing_product.state
<DocumentState.SAVED: 'saved'>
>>> # After you delete it...
>>> existing_product.delete()
True
>>> product.state
<DocumentState.DELETED: 'deleted'>
"""
if self.state == DocumentState.SAVED and not extra_attributes:
# Noop, already saved in the catalog
return
if self.state == DocumentState.UNSAVED:
method = HttpRequestMethod.POST
json = self.serialize(modified_only=False, jsonapi_format=True)
else:
method = HttpRequestMethod.PATCH
json = self.serialize(modified_only=True, jsonapi_format=True)
if extra_attributes:
json["data"]["attributes"].update(extra_attributes)
data, related_objects = self._send_data(
method=method, id=self.id, json=json, client=self._client
)
self._initialize(
id=data["id"],
saved=True,
relationships=data.get("relationships"),
related_objects=related_objects,
**data["attributes"]
)
@check_deleted
def reload(self):
"""Reload all attributes from the Descartes Labs catalog.
Refresh the state of this catalog object from the object in the Descartes Labs
catalog. This may be necessary if there are concurrent updates and the object
in the Descartes Labs catalog was updated from another client. The instance
state must be in the `~descarteslabs.catalog.DocumentState.SAVED` state.
If you want to revert a modified object to its original one, you should use
:py:meth:`get` on the object class with the object's `id`.
Raises
------
ValueError
If the catalog object is not in the ``SAVED`` state.
DeletedObjectError
If this catalog object was deleted.
ClientError or ServerError
:ref:`Spurious exception <network_exceptions>` that can occur during a
network request.
Example
-------
>>> p = Product("my_org_id:my_product_id")
>>> # Some time elapses and a concurrent change was made
>>> p.state
<DocumentState.SAVED: 'saved'>
>>> p.reload()
>>> # But once you make changes, you cannot use this method any more
>>> p.name = "My name has changed"
>>> p.reload()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python3/site-packages/descarteslabs/catalog/catalog_base.py", line 47, in wrapper
return f(self, *args, **kwargs)
File "/usr/lib/python3/site-packages/descarteslabs/catalog/catalog_base.py", line 879, in reload
\"""Reload all attributes from the Descartes Labs catalog.
ValueError: Product instance with id my_org_id:my_product_id has not been saved
>>> # But you can revert
>>> p = Product.get(p.id)
>>> p.state
<DocumentState.SAVED: 'saved'>
"""
if self.state != DocumentState.SAVED:
raise ValueError(
"{} instance with id {} has not been saved".format(
self.__class__.__name__, self.id
)
)
data, related_objects = self._send_data(
method=HttpRequestMethod.GET, id=self.id, client=self._client
)
# this will effectively wipe all current state & caching
self._initialize(
id=data["id"],
saved=True,
relationships=data.get("relationships"),
related_objects=related_objects,
**data["attributes"]
)
@classmethod
@check_derived
def delete(cls, id, client=None):
"""Delete the catalog object with the given `id`.
Parameters
----------
id : str
The id of the object to be deleted.
client : CatalogClient, optional
A `CatalogClient` instance to use for requests to the Descartes Labs
catalog. The
:py:meth:`~descarteslabs.catalog.CatalogClient.get_default_client` will
be used if not set.
Returns
-------
bool
``True`` if this object was successfully deleted. ``False`` if the
object was not found.
Raises
------
ConflictError
If the object has related objects (bands, images) that exist.
ClientError or ServerError
:ref:`Spurious exception <network_exceptions>` that can occur during a
network request.
Example
-------
>>> Image.delete('my-image-id')
"""
if client is None:
client = CatalogClient.get_default_client()
try:
client.session.delete(cls._url + "/" + id)
return True # non-200 will raise an exception
except NotFoundError:
return False
@check_deleted
def _instance_delete(self):
"""Delete this catalog object from the Descartes Labs catalog.
Once deleted, you cannot use the catalog object and should release any
references.
Raises
------
DeletedObjectError
If this catalog object was already deleted.
UnsavedObjectError
If this catalog object is being deleted without having been saved.
ClientError or ServerError
:ref:`Spurious exception <network_exceptions>` that can occur during a
network request.
"""
if self.state == DocumentState.UNSAVED:
raise UnsavedObjectError("You cannot delete an unsaved object.")
self._client.session.delete(self._url + "/" + self.id)
self._deleted = True # non-200 will raise an exception
@classmethod
@check_derived
def _send_data(cls, method, id=None, json=None, client=None):
client = client or CatalogClient.get_default_client()
session_method = getattr(client.session, method.lower())
url = cls._url
if method not in (HttpRequestMethod.POST, HttpRequestMethod.PUT):
url += "/" + id
if cls._default_includes:
url += "?include=" + ",".join(cls._default_includes)
r = session_method(url, json=json).json()
data = r["data"]
related_objects = cls._load_related_objects(r, client)
return data, related_objects
@classmethod
def _load_related_objects(cls, response, client):
related_objects = {}
related_objects_serialized = response.get("included")
if related_objects_serialized:
for serialized in related_objects_serialized:
model_class = cls._get_model_class(serialized)
if model_class:
related = model_class(
id=serialized["id"],
client=client,
_saved=True,
**serialized["attributes"]
)
related_objects[(serialized["type"], serialized["id"])] = related
return related_objects
class CatalogObject(CatalogObjectBase):
"""A base class for all representations of objects in the Descartes Labs catalog.
"""
owners = ListAttribute(
TypedAttribute(str),
doc="""list(str), optional: User, group, or organization IDs that own this object.
Defaults to [``user:current_user``, ``org:current_org``]. The owner can edit,
delete, and change access to this object. :ref:`See this note <product_note>`.
*Filterable*.
""",
)
readers = ListAttribute(
TypedAttribute(str),
doc="""list(str), optional: User, group, or organization IDs that can read this object.
Will be empty by default. This attribute is only available to the `owners`
of a catalog object. :ref:`See this note <product_note>`.
""",
)
writers = ListAttribute(
TypedAttribute(str),
doc="""list(str), optional: User, group, or organization IDs that can edit this object.
Writers will also have read permission. Writers will be empty by default.
See note below. This attribute is only available to the `owners` of a catalog
object. :ref:`See this note <product_note>`.
""",
)
extra_properties = ExtraPropertiesAttribute(
doc="""dict, optional: A dictionary of up to 50 key/value pairs.
The keys of this dictonary must be strings, and the values of this dictionary
can be strings or numbers. This allows for more structured custom metadata
to be associated with objects.
"""
)
tags = ListAttribute(
TypedAttribute(str),
doc="""list, optional: A list of up to 20 tags.
The tags may support the classification and custom filtering of objects.
*Filterable*.
""",
)
def __new__(cls, *args, **kwargs):
return _new_abstract_class(cls, CatalogObject)
| [
"[email protected]"
] | |
d5d7bc6f783064bdf9f3c5a83dec9a899defc356 | 060967fa3e6e390ac0504172e6dea8421ffb9d98 | /2022/python2022/aoc/day01.py | f8899599170d8fd6ebfed8fd5aa9f6cefed79066 | [] | no_license | mreishus/aoc | 677afd18521b62c9fd141a45fec4b7bc844be259 | e89db235837d2d05848210a18c9c2a4456085570 | refs/heads/master | 2023-02-22T12:00:52.508701 | 2023-02-09T04:37:50 | 2023-02-09T04:39:44 | 159,991,022 | 16 | 3 | null | 2023-01-05T10:00:46 | 2018-12-01T22:00:22 | Python | UTF-8 | Python | false | false | 901 | py | #!/usr/bin/env python
"""
Advent Of Code 2022 Day 1
https://adventofcode.com/2022/day/1
"""
from typing import List
import heapq
def parse(filename: str) -> List[int]:
"""
Parse the input file into a list of integers.
Each integer is the sum of the numbers in a block.
"""
with open(filename) as file:
lines = file.read().strip()
blocks = lines.split("\n\n")
return [parse_block(block) for block in blocks]
def parse_block(block: str) -> int:
"""
param block: '1000\n2000\n3000'
return: 6000
"""
return sum(int(line) for line in block.splitlines())
class Day01:
"""AoC 2022 Day 01"""
@staticmethod
def part1(filename: str) -> int:
data = parse(filename)
return max(data)
@staticmethod
def part2(filename: str) -> int:
data = parse(filename)
return sum(heapq.nlargest(3, data))
| [
"[email protected]"
] | |
9e011f833190c003c501b34093c98fea67323259 | 6bf492920985e3741440ba53e1c7f8426b66ac1f | /snakemake_rules/rules/gatk/gatk_combine_variants.smk | 4aeb72ab60e819d714f462e05f027c1fd761730a | [
"MIT"
] | permissive | ukaraoz/snakemake-rules | 5b2ba7c9ec19d88b56067a46f66fd0c72e48c368 | 07e96afeb39307cdf35ecc8482dc1f8b62c120b9 | refs/heads/master | 2020-03-31T15:20:44.444006 | 2018-09-07T08:53:47 | 2018-09-07T08:53:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | smk | # -*- snakemake -*-
include: 'gatk.settings.smk'
include: 'gatk_variant_snp_JEXL_filtration.smk'
include: 'gatk_variant_indel_JEXL_filtration.smk'
config_default = {'gatk': {'combine_variants': _gatk_config_rule_default.copy()}}
update_config(config_default, config)
config = config_default
cmd = re.sub("-Xmx[0-9a-zA-Z]+", "-Xmx{mem}".format(mem=config['gatk']['combine_variants']['java_mem']), config['gatk']['cmd'])
rule gatk_combine_variants:
"""Run GATK CombineVariants to combine variant files.
The default rule combines files with suffixes filteredSNP.vcf and
filteredINDEL.vcf.
"""
wildcard_constraints:
suffix = "(.vcf|.vcf.gz)"
params: cmd = cmd + " -T " + COMBINE_VARIANTS,
options = " ".join(["-R", config['gatk']['combine_variants']['ref'],
config['gatk']['combine_variants']['options']]),
runtime = config['gatk']['combine_variants']['runtime']
input: "{prefix}.snp.filteredSNP{suffix}", "{prefix}.indel.filteredINDEL{suffix}"
output: "{prefix}.variants{suffix}"
threads: config['gatk']['combine_variants']['threads']
conda: "env.yaml"
shell: "command=\"{params.cmd} {params.options} $(echo {input} | sed -e 's/[^ ][^ ]*/-V &/g') -o {output}\"; eval \"${{command}}\""
| [
"[email protected]"
] | |
d052fff3e9a8ca167ab284868d1d61e0dbb654ce | 23f6dbacd9b98fdfd08a6f358b876d3d371fc8f6 | /rootfs/usr/lib/pymodules/python2.6/papyon/sip/transport.py | 20fe3f535a9615f5870fc8c179f1e13f2a9f1010 | [] | no_license | xinligg/trainmonitor | 07ed0fa99e54e2857b49ad3435546d13cc0eb17a | 938a8d8f56dc267fceeb65ef7b867f1cac343923 | refs/heads/master | 2021-09-24T15:52:43.195053 | 2018-10-11T07:12:25 | 2018-10-11T07:12:25 | 116,164,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | /usr/share/pyshared/papyon/sip/transport.py | [
"[email protected]"
] | |
bcdd0abe6750285e7fa6b8a7a95cdf85baaf302a | 3bb1cf4309e0e6488aeb3e5ae8b78138cfdaa002 | /kyopro_tenkei/90_54.py | 8de332b75aa23b0227743cdd237feacaa92f0a7a | [] | no_license | show2214/atcoder | 18a2dd0c2167fadeda2725a67d2d68d593b0bef9 | 7aae17b41b07bece746b34258b9514e145186327 | refs/heads/master | 2022-06-27T19:17:46.514876 | 2022-06-19T23:21:48 | 2022-06-19T23:21:48 | 249,148,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | N, M = map(int, input().split())
g = [[] for _ in range(N + M)]
for i in range(M):
input()
for j in map(int, input().split()):
g[N + i] += j - 1,
g[j - 1] += N + i,
from collections import *
q = deque([0])
v = [0] + [-1] * (N + M)
while q:
c = q.popleft()
for b in g[c]:
if v[b] < 0:
v[b] = v[c] + 1
q += b,
print(*[i//2 for i in v[:N]]) | [
"[email protected]"
] | |
f1c5e69189bc8a90462b021c01db2e9eb96a1b0a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03239/s478967614.py | 7b473b8df981d413d6bb9ee6fe7d2eb9b2bdec4c | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | n, t = map(int, input().split())
ans = 100000
for i in range(n):
c, tt = map(int, input().split())
if tt <= t:
ans = min(ans, c)
if ans == 100000:
print("TLE")
else:
print(ans)
| [
"[email protected]"
] | |
01ba65d8da0f32d363289cae1846027df987e112 | 28a462a28f443c285ca5efec181ebe36b147c167 | /tests/compile/basic/es2017/EscapeRegExpPattern.spec | 2289280aaa2efa7ea17a57ad0c73afcb15409c9a | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | kaist-plrg/jstar | 63e71f9156860dc21cccc33a9f6c638dfee448ea | 1282919127ea18a7e40c7a55e63a1ddaaf7d9db4 | refs/heads/main | 2022-07-22T08:12:34.947712 | 2022-02-27T04:19:33 | 2022-02-27T11:06:14 | 384,045,526 | 6 | 4 | NOASSERTION | 2022-02-27T11:05:26 | 2021-07-08T07:53:21 | Python | UTF-8 | Python | false | false | 1,405 | spec | 1. Let _S_ be a String in the form of a |Pattern[~U]| (|Pattern[+U]| if _F_ contains `"u"`) equivalent to _P_ interpreted as UTF-16 encoded Unicode code points (<emu-xref href="#sec-ecmascript-language-types-string-type"></emu-xref>), in which certain code points are escaped as described below. _S_ may or may not be identical to _P_; however, the internal procedure that would result from evaluating _S_ as a |Pattern[~U]| (|Pattern[+U]| if _F_ contains `"u"`) must behave identically to the internal procedure given by the constructed object's [[RegExpMatcher]] internal slot. Multiple calls to this abstract operation using the same values for _P_ and _F_ must produce identical results.
1. The code points `/` or any |LineTerminator| occurring in the pattern shall be escaped in _S_ as necessary to ensure that the String value formed by concatenating the Strings `"/"`, _S_, `"/"`, and _F_ can be parsed (in an appropriate lexical context) as a |RegularExpressionLiteral| that behaves identically to the constructed regular expression. For example, if _P_ is `"/"`, then _S_ could be `"\\/"` or `"\\u002F"`, among other possibilities, but not `"/"`, because `///` followed by _F_ would be parsed as a |SingleLineComment| rather than a |RegularExpressionLiteral|. If _P_ is the empty String, this specification can be met by letting _S_ be `"(?:)"`.
1. Return _S_. | [
"[email protected]"
] | |
a18b89fb83c54798265c1232a5612a39c65e53ff | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /ke4FSMdG2XYxbGQny_5.py | 3eb2cfb5e413340d184121753557a8220852eae5 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 203 | py |
def even_odd_transform(lst, n):
l=lst
if len(l)==0:
return l
for i in range(n):
for j in range(len(l)):
if l[j]%2==0:
l[j]=l[j]-2
else:
l[j]=l[j]+2
return l
| [
"[email protected]"
] | |
a1dd46d126b3b32636fc69f0ddcb514cf076741c | ea35facf6d823e93706b5f551408250b1e089be9 | /共通問題/9_2.py | e241ba9b38b5616e7210f25d70710da375922582 | [] | no_license | YukiNGSM/PythonStudy | 7a2d24f4762e384531eadd691858296b00b6a6b3 | 26310d0e007745ff4920ccd0fc3e51771cb2d5f1 | refs/heads/master | 2023-07-19T00:06:29.061255 | 2021-09-22T01:29:49 | 2021-09-22T01:29:49 | 409,025,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 70 | py | def hello():
for i in range(10):
print(('Hello'))
hello() | [
"[email protected]"
] | |
9fce20f8fc036410b74c53272e3f3ba7e0bbea05 | 9468507c1beeb2cb69591889605ea155d2cb7a63 | /mysite/urls.py | 3c3cb29f215257dcd4b0b3f45a2b59dd078c5b1b | [] | no_license | nimal54/drf-polls | 2375e2f5b78670de40c72b51eb616a69e7f49a65 | 9b29230998146eb225e0cffa0703d6bed1cc876a | refs/heads/master | 2020-04-25T00:21:14.952917 | 2018-03-16T11:54:53 | 2018-03-16T11:54:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | from django.urls import include, path
from django.contrib import admin
urlpatterns = [
path('api/', include('polls.urls')),
path('admin/', admin.site.urls),
] | [
"[email protected]"
] | |
dc07e4c5023b62bbac3b5ed25bf1cbde99182341 | 54516826a15e4588decd4a040c3f3ae73b1f49df | /supplier/admin.py | d4cd50319ed56dfbd6c7cc180afdbbb36f403d02 | [] | no_license | boyombo/shylock | 9454b53ef285af692675be4fe7a176d1aa29ced1 | c63ac02b3ee18160ec94c9e8462165eaf7e0f3b5 | refs/heads/master | 2021-05-05T11:10:13.523616 | 2018-02-06T08:10:47 | 2018-02-06T08:10:47 | 118,116,949 | 0 | 1 | null | 2018-02-06T08:10:48 | 2018-01-19T11:24:14 | JavaScript | UTF-8 | Python | false | false | 147 | py | from django.contrib import admin
from supplier.models import Supplier
@admin.register(Supplier)
class SupplierAdmin(admin.ModelAdmin):
pass
| [
"[email protected]"
] | |
39ab273dae34141056fb99b2a557a0c095a9ee09 | 8cd90c5b92fe85158226de32b1fbb4c34ebd658b | /oscar_docdata/models.py | f3295ad74437b13549e68019e34d3e7aedc771ad | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | mvantellingen/django-oscar-docdata | 772ec3db372f9571cf62932ad2fe945c65fd2d7f | 983d3f8144e1feb67d4a2c5bb98b499e69e4ad44 | refs/heads/master | 2023-08-25T06:33:59.105290 | 2016-06-14T12:41:37 | 2016-06-14T12:41:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,043 | py | from decimal import Decimal as D
from django.db import models
from django.utils.translation import ugettext_lazy as _
from oscar_docdata.managers import DocdataOrderManager
from . import appsettings
try:
from polymorphic.models import PolymorphicModel # django-polymorphic 0.8
except ImportError:
from polymorphic import PolymorphicModel
class DocdataOrder(models.Model):
"""
Tracking of the order which is sent to docdata.
"""
# Simplified internal status codes.
# Lowercased on purpose to avoid mixing the statuses together.
STATUS_NEW = 'new' # Initial state
STATUS_IN_PROGRESS = 'in_progress' # In the redirect phase
STATUS_PENDING = 'pending' # Waiting for user to complete payment (e.g. credit cards)
STATUS_PAID = 'paid' # End of story, paid!
STATUS_PAID_REFUNDED = 'paid_refunded' # Paid, and performed a partial refund
STATUS_CANCELLED = 'cancelled' # End of story, cancelled
STATUS_CHARGED_BACK = 'charged_back' # End of story, consumer asked for charge back
STATUS_REFUNDED = 'refunded' # End of story, refunded, merchant refunded
STATUS_EXPIRED = 'expired' # No results of customer, order was closed.
STATUS_UNKNOWN = 'unknown' # Help!
STATUS_CHOICES = (
(STATUS_NEW, _("New")),
(STATUS_IN_PROGRESS, _("In Progress")),
(STATUS_PENDING, _("Pending")),
(STATUS_PAID, _("Paid")),
(STATUS_PAID_REFUNDED, _("Paid, part refunded")),
(STATUS_CANCELLED, _("Cancelled")),
(STATUS_CHARGED_BACK, _("Charged back")),
(STATUS_REFUNDED, _("Refunded")),
(STATUS_EXPIRED, _("Expired")),
(STATUS_UNKNOWN, _("Unknown")),
)
merchant_name = models.CharField(_("Docdata account"), max_length=100, default=appsettings.DOCDATA_MERCHANT_NAME)
merchant_order_id = models.CharField(_("Order ID"), max_length=100, default='')
order_key = models.CharField(_("Payment cluster ID"), max_length=200, default='', unique=True)
status = models.CharField(_("Status"), max_length=50, choices=STATUS_CHOICES, default=STATUS_NEW)
language = models.CharField(_("Language"), max_length=5, blank=True, default='en')
# Track sent information
total_gross_amount = models.DecimalField(_("Total gross amount"), max_digits=15, decimal_places=2)
currency = models.CharField(_("Currency"), max_length=10)
country = models.CharField(_("Country_code"), max_length=2, null=True, blank=True)
# Track received information
total_registered = models.DecimalField(_("Total registered"), max_digits=15, decimal_places=2, default=D('0.00'))
total_shopper_pending = models.DecimalField(_("Total shopper pending"), max_digits=15, decimal_places=2, default=D('0.00'))
total_acquirer_pending = models.DecimalField(_("Total acquirer pending"), max_digits=15, decimal_places=2, default=D('0.00'))
total_acquirer_approved = models.DecimalField(_("Total acquirer approved"), max_digits=15, decimal_places=2, default=D('0.00'))
total_captured = models.DecimalField(_("Total captured"), max_digits=15, decimal_places=2, default=D('0.00'))
total_refunded = models.DecimalField(_("Total refunded"), max_digits=15, decimal_places=2, default=D('0.00'))
total_charged_back = models.DecimalField(_("Total changed back"), max_digits=15, decimal_places=2, default=D('0.00'))
# Internal info.
created = models.DateTimeField(_("created"), auto_now_add=True)
updated = models.DateTimeField(_("updated"), auto_now=True)
objects = DocdataOrderManager()
class Meta:
ordering = ('-created', '-updated')
verbose_name = _("Docdata Order")
verbose_name_plural = _("Docdata Orders")
def __unicode__(self):
return self.order_key
def __repr__(self):
return "<DocdataOrder: {0}, {1} status={2}>".format(self.order_key, self.merchant_order_id, self.status)
@property
def latest_payment(self):
try:
return self.payments.order_by('-payment_id').all()[0]
except IndexError:
return None
def cancel(self):
"""
Cancel an order in Docdata.
"""
from .facade import get_facade
facade = get_facade()
facade.cancel_order(self)
cancel.alters_data = True
class DocdataPayment(PolymorphicModel):
"""
A reported Docdata payment.
This is a summarized version of a Docdata payment transaction,
as returned by the status API call.
Some payment types have additional fields, which are stored as subclass.
"""
docdata_order = models.ForeignKey(DocdataOrder, related_name='payments')
payment_id = models.CharField(_("Payment id"), max_length=100, default='', blank=True, primary_key=True)
# Note: We're not using choices here so that we can write unknown statuses if they are presented by Docdata.
status = models.CharField(_("status"), max_length=30, default='NEW')
# The payment method id from Docdata (e.g. IDEAL, MASTERCARD, etc)
payment_method = models.CharField(max_length=60, default='', blank=True)
# Track the various amounts associated with this source
confidence_level = models.CharField(_("Confidence level"), max_length=30, default='', editable=False)
amount_allocated = models.DecimalField(_("Amount Allocated"), decimal_places=2, max_digits=12, default=D('0.00'), editable=False)
amount_debited = models.DecimalField(_("Amount Debited"), decimal_places=2, max_digits=12, default=D('0.00'), editable=False)
amount_refunded = models.DecimalField(_("Amount Refunded"), decimal_places=2, max_digits=12, default=D('0.00'), editable=False)
amount_chargeback = models.DecimalField(_("Amount Changed back"), decimal_places=2, max_digits=12, default=D('0.00'), editable=False)
# Internal info.
created = models.DateTimeField(_("created"), auto_now_add=True)
updated = models.DateTimeField(_("updated"), auto_now=True)
def __unicode__(self):
return self.payment_id
class Meta:
ordering = ('payment_id',)
verbose_name = _("Payment")
verbose_name_plural = _("Payments")
# NOTE: currently unused.
# DirectDebit is used for periodic transfers (e.g. "Automatische incasso" in The Netherlands)
class DocdataDirectDebitPayment(DocdataPayment):
"""
Web direct debit direct payment.
"""
holder_name = models.CharField(max_length=35) # max_length from Docdata
holder_city = models.CharField(max_length=35) # max_length from Docdata
holder_country_code = models.CharField(_("Country_code"), max_length=2, null=True, blank=True)
# Note: there is django-iban for validated versions of these fields.
# Not needed here.
iban = models.CharField(max_length=34)
bic = models.CharField(max_length=11)
class Meta:
ordering = ('-created', '-updated')
verbose_name = _("Direct Debit Payment")
verbose_name_plural = _("Derect Debit Payments")
| [
"[email protected]"
] | |
264aa98cdced1e3a3b21e731910d92a4f81a7489 | 5db3d51ff9a0bd7647c2315a358cb4ec9299d9d5 | /analyzeBusReportFnv2.py | f24d495ec4b04167e7b50dce7763a807fe53f163 | [] | no_license | bikiranguha/Thesis_project | 866385f51bd476448730c8169eb0b3c1dacba84e | 1a52ba0fed86afb522bda067b8011b6940b4088d | refs/heads/master | 2020-03-31T06:52:16.627848 | 2018-12-28T02:59:33 | 2018-12-28T02:59:33 | 151,997,984 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,684 | py | """
Function which generates a bus flow report of comed buses
"""
def BusReport(flowReportFile,Raw):
from getBusDataFn import getBusData
BusDataDict = getBusData(Raw)
ComedPlusBoundarySet = set()
flowDict = {}
#FromBusLines = []
#ToBusLines = []
class flowReport(object):
def __init__(self):
self.toBusList = []
self.MWList = []
self.MVARList = []
self.MVAList = []
self.cktID = []
"""
with open(Raw,'r') as f:
filecontent = f.read()
fileLines = filecontent.split('\n')
branchStartIndex = fileLines.index('0 / END OF GENERATOR DATA, BEGIN BRANCH DATA') + 1
branchEndIndex = fileLines.index('0 / END OF BRANCH DATA, BEGIN TRANSFORMER DATA')
for i in range(branchStartIndex, branchEndIndex):
line = fileLines[i]
words = line.split(',')
Bus1 = words[0].strip()
Bus2 = words[1].strip()
try:
Bus1Area = BusDataDict[Bus1].area
Bus2Area = BusDataDict[Bus2].area
except: # for buses '243083' and '638082'
continue
if Bus1Area == '222' and Bus2Area == '222':
ComedPlusBoundarySet.add(Bus1)
ComedPlusBoundarySet.add(Bus2)
if Bus1Area == '222' and Bus2Area != '222':
ComedPlusBoundarySet.add(Bus1)
ComedPlusBoundarySet.add(Bus2)
if Bus1Area != '222' and Bus2Area == '222':
ComedPlusBoundarySet.add(Bus1)
ComedPlusBoundarySet.add(Bus2)
for Bus in BusDataDict:
area = BusDataDict[Bus].area
if area == '222':
ComedPlusBoundarySet.add(Bus)
"""
with open(flowReportFile,'r') as f:
filecontent = f.read()
fileLines = filecontent.split('\n')
indices = [i for i, line in enumerate(fileLines) if line.startswith('BUS')]
for i in indices:
#print i
line = fileLines[i]
FromBus = line[4:10].strip()
"""
if FromBus not in ComedPlusBoundarySet:
continue
"""
flowDict[FromBus] = flowReport()
i+=2
line = fileLines[i]
while not 'M I S M A T C H' in line:
if 'RATING' in line:
break
if 'GENERATION' in line or 'LOAD' in line or 'SHUNT' in line:
i+=1
line = fileLines[i]
continue
toBus = line[4:10].strip()
MW=float(line[34:42].strip())
MVAR=float(line[42:50].strip())
cktID = line[31:34]
#print toBus
flowDict[FromBus].toBusList.append(toBus)
flowDict[FromBus].MWList.append(MW)
flowDict[FromBus].MVARList.append(MVAR)
flowDict[FromBus].cktID.append(cktID)
#ToBusLines.append(toBus)
i+=1
if i >=len(fileLines):
break
line = fileLines[i]
return flowDict
"""
with open('tmp.txt','w') as f:
for Bus in ToBusLines:
f.write(Bus)
f.write('\n')
"""
if __name__ == '__main__':
flowReportFile = 'BusReportsRawCropped_0723.txt'
Raw = 'RawCropped_0723v2.raw'
flowDict = BusReport(flowReportFile,Raw) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.