blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9d09fcf610c797bccf89f3f24ef9afffc8933042 | 67d8173a716da10a7350213d98938aae9f2115ce | /LeetCode/LC_PY_ANSWERS/sort-an-array.py | 0cecad74707c151f8b38b2b110494d2e85eee7de | [
"MIT"
] | permissive | jxie0755/Learning_Python | 94490d41bdf93acf8396f843328e38b6da310b0f | 143422321cbc3715ca08f6c3af8f960a55887ced | refs/heads/master | 2021-11-02T22:47:35.790239 | 2021-09-26T04:26:23 | 2021-09-26T04:26:23 | 101,445,132 | 0 | 2 | null | 2019-02-19T15:48:44 | 2017-08-25T22:00:16 | Python | UTF-8 | Python | false | false | 2,412 | py | # Time: O(nlogn)
# Space: O(n)
# merge sort solution
class Solution(object):
def sortArray(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
def mergeSort(start, end, nums):
if end - start <= 1:
return
mid = start + (end - start) / 2
mergeSort(start, mid, nums)
mergeSort(mid, end, nums)
right = mid
tmp = []
for left in xrange(start, mid):
while right < end and nums[right] < nums[left]:
tmp.append(nums[right])
right += 1
tmp.append(nums[left])
nums[start:start + len(tmp)] = tmp
mergeSort(0, len(nums), nums)
return nums
# Time: O(nlogn), on average
# Space: O(logn)
import random
# quick sort solution
class Solution2(object):
def sortArray(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
def kthElement(nums, left, mid, right, compare):
def PartitionAroundPivot(left, right, pivot_idx, nums, compare):
new_pivot_idx = left
nums[pivot_idx], nums[right] = nums[right], nums[pivot_idx]
for i in xrange(left, right):
if compare(nums[i], nums[right]):
nums[i], nums[new_pivot_idx] = nums[new_pivot_idx], nums[i]
new_pivot_idx += 1
nums[right], nums[new_pivot_idx] = nums[new_pivot_idx], nums[right]
return new_pivot_idx
right -= 1
while left <= right:
pivot_idx = random.randint(left, right)
new_pivot_idx = PartitionAroundPivot(left, right, pivot_idx, nums, compare)
if new_pivot_idx == mid - 1:
return
elif new_pivot_idx > mid - 1:
right = new_pivot_idx - 1
else: # new_pivot_idx < mid - 1.
left = new_pivot_idx + 1
def quickSort(start, end, nums):
if end - start <= 1:
return
mid = start + (end - start) / 2
kthElement(nums, start, mid, end, lambda a, b: a < b)
quickSort(start, mid, nums)
quickSort(mid, end, nums)
quickSort(0, len(nums), nums)
return nums
| [
"[email protected]"
] | |
e58e9401d8429723764c02edf926adfbbd8758ca | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_drafts.py | 6da1b4da4e3df04fbf81a9bba113d2c83e568862 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py |
from xai.brain.wordbase.verbs._draft import _DRAFT
#calss header
class _DRAFTS(_DRAFT, ):
def __init__(self,):
_DRAFT.__init__(self)
self.name = "DRAFTS"
self.specie = 'verbs'
self.basic = "draft"
self.jsondata = {}
| [
"[email protected]"
] | |
1920724bea68c7268d4dc99408f617f42c248858 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5636311922769920_1/Python/aelg/prob4.py | 4fbd0b4d1fa1da615948253f8fc70a6d1a53b3c0 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | #!/usr/bin/python3
def solve():
inputList = list(map(int, input().split()))
k = inputList[0]
c = inputList[1]
s = inputList[2]
res = []
students = s
originalTile = 0
while students > 0:
studentPos = 0
for i in range(c-1, -1, -1):
studentPos += originalTile*(k**i)
if originalTile == k-1:
res.append(studentPos)
return " ".join(map(lambda x: str(x+1), res))
originalTile += 1
res.append(studentPos)
students -= 1
return 'IMPOSSIBLE'
def main():
cases = int(input())
for i in range(0, cases):
print("Case #%d: %s" % (i+1, solve()))
main() | [
"[email protected]"
] | |
74f545ddd8aead850b286517ff15de1cb279c2a1 | cbda89443b351bb2047180dad4e300c13dc3df7f | /Crystals/Morpurgo_all_atoms_Polbinds_qsplit_fittedscreens/Jobs_chelpg/PDIF-CN2/PDIF-CN2_cation_neut_inner3_outer0/PDIF-CN2_cation_neut_inner3_outer0.py | 0529c196b51dd6089ab4ee51b1366f451a48a8aa | [] | no_license | sheridanfew/pythonpolarisation | 080f52979f98d26360a46412a10c8e3f51ee4549 | 178e2684e9a239a8e60af5f7b1eb414ac5f31e92 | refs/heads/master | 2021-07-10T01:07:40.978790 | 2021-03-11T16:56:37 | 2021-03-11T16:56:37 | 96,101,351 | 0 | 0 | null | 2017-07-03T13:37:06 | 2017-07-03T10:54:52 | null | UTF-8 | Python | false | false | 7,012 | py | import sys
sys.path.append('../../../../../')
from BasicElements import *
from BasicElements.Register import GetRegister
from BasicElements.MoleculeFactory import ReadMoleculeType
from BasicElements.MoleculeFactory import GetMolecule
from BasicElements.Crystal import *
from Polarizability.GetDipoles import get_dipoles,split_dipoles_onto_atoms
from Polarizability import *
from Polarizability.GetEnergyFromDips import *
from Polarizability.JMatrix import JMatrix
import numpy as np
from math import *
from time import gmtime, strftime
import os
print strftime("%a, %d %b %Y %X +0000", gmtime())
qdict={"anion": -1.0, "neut": 0.0, "cation": 1.0}
name='PDIF-CN2_cation_neut_inner3_outer0'
#For crystals here, all cubic and centred at centre
insize=3
#number of TVs in each dir central mol is from edge of inner region
outsize=0
state='cation'
mols_cen=['PDIF_CN2_mola_cation_aniso_cifstruct_chelpg.xyz']
mols_sur=['PDIF_CN2_mola_neut_aniso_cifstruct_chelpg.xyz']
mols_outer=['sp_PDIFCN2_neut.xyz']
screenradius=2.5533199878
#From cif:
'''
PDIF-CN2
_cell_length_a 5.2320(14)
_cell_length_b 7.638(2)
_cell_length_c 18.819(5)
_cell_angle_alpha 92.512(5)
_cell_angle_beta 95.247(5)
_cell_angle_gamma 104.730(4)
_cell_volume 722.5(3)
'''
#Get translation vectors:
a=5.232014/0.5291772109217
b=7.6382/0.5291772109217
c=18.8195/0.5291772109217
alpha=92.5125*(pi/180)
beta=95.2475*(pi/180)
gamma=104.7304*(pi/180)
cif_unit_cell_volume=722.53/(a*b*c*(0.5291772109217**3))
cell_volume=sqrt(1 - (cos(alpha)**2) - (cos(beta)**2) - (cos(gamma)**2) + (2*cos(alpha)*cos(beta)*cos(gamma)))
#Converts frac coords to carts
matrix_to_cartesian=np.matrix( [[a, b*cos(gamma), c*cos(beta)],
[0, b*sin(gamma), c*(cos(alpha) - cos(beta)*cos(gamma))/sin(gamma)],
[0, 0, c*cell_volume/sin(gamma)]])
#carts to frac
matrix_to_fractional=matrix_to_cartesian.I
#TVs, TV[0,1,2] are the three translation vectors.
TV=matrix_to_cartesian.T
cut=8.0
totsize=insize+outsize
#number of TVs in each dir nearest c inner mol is from edge of outer region
cenpos=[totsize,totsize,totsize]
length=[2*totsize+1,2*totsize+1,2*totsize+1]
maxTVs=insize
outer_maxTVs=insize+outsize
#for diamond outer, don't specify for cube and will fill to cube edges.
print 'name: ',name,'mols_cen: ', mols_cen,' mols_sur: ',mols_sur,' TVs: ', TV
# Place Molecules
crystal=Crystal(name=name,mols_cen=mols_cen,mols_sur=mols_sur,cenpos=cenpos,length=length,TVs=TV,maxTVs=maxTVs,mols_outer=mols_outer,outer_maxTVs=outer_maxTVs)
crystal().ModifyPolarizabilityCry(jmtype='TholeExp',fittype='empirical')
#crystal._mols contains all molecules.
#mols[0] contains a list of all molecules in position a, mols[1] all mols in pos'n b, etc.
#mols[0][x,y,z] contains molecule a in position x,y,z
#mols may as such be iterated over in a number of ways to consider different molecules.
print 'state',state
#print 'q: ', qdict[state]
#for atom in crystal()._mols[0][crystal()._cenpos[0]][crystal()._cenpos[1]][crystal()._cenpos[2]]():
# atom()._crg=qdict[state]
crystal().print_posns()
#Calculate Properties:
print strftime("%a, %d %b %Y %X +0000", gmtime())
E0 = np.matrix([0.,0.,0.])
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc jm'
#screenradius=1.6623/(Natoms**2)
jm = JMatrix(jmtype='TholeExp',screenradius=screenradius)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc dips:'
d = get_dipoles(E0=E0,jm=jm._m,cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
Efield = get_electric_field(E0)
potential = get_potential()
print strftime("%a, %d %b %Y %X +0000", gmtime())
#print 'dips', d
print 'splitting dips onto atoms'
split_d = split_dipoles_onto_atoms(d)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'summing dips:'
tot = np.matrix([0.,0.,0.])
for dd in split_d:
tot += dd
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'total dip moment', tot
Uqq = np.multiply(get_U_qq(potential=potential),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqq', Uqq
Uqd = np.multiply(get_U_qdip(dips=d,Efield=Efield),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqd', Uqd
Udd = np.multiply(get_U_dipdip(jm=jm._m,dips=d.T),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Udd', Udd
energyev = Udd+Uqd+Uqq
print 'energyev', energyev
energy=energyev/27.211
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Making .dat cross sections for gnuplot'
# print TVs
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_TVs.dat' % name, 'w')
TVstr=str(str(TV[0,0]) + ' ' + str(TV[0,1]) + ' ' + str(TV[0,2]) + '\n' + str(TV[1,0]) + ' ' + str(TV[1,1]) + ' ' + str(TV[1,2]) + '\n' + str(TV[2,0]) + ' ' + str(TV[2,1]) + ' ' + str(TV[2,2])+ '\n')
f.write(TVstr)
f.flush()
f.close()
# print dipoles
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_dipoles.dat' % name, 'w')
for dd in split_d:
dstr=str(dd)
f.write(dstr)
f.write('\n')
f.flush()
f.close()
# print properties for charge in centrepos
time=strftime("%a, %d %b %Y %X +0000", gmtime())
f = open('%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\tenergyev\tUqq\tUqd\tUdd\tTotdip_x\tTotdip_y\tTotdip_z')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,energyev,Uqq,Uqd,Udd,tot[0,0],tot[0,1],tot[0,2]))
f.flush()
f.close()
# print header for polbinds
f = open('polbind_energies_%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\ta\tb\tc\tmolincell\tpolbind(eV)')
f.flush()
f.close()
# POL BIND ENERGIES
#Note that this assumes a cube, and values for which
for dist in range(0,(length[0]/2)+1,1):
print '\n\nDIST: ', dist, '\n'
for a in range(crystal()._cenpos[0]-dist,crystal()._cenpos[0]+dist+1,1):
for b in range(crystal()._cenpos[1]-dist,crystal()._cenpos[1]+dist+1,1):
for c in range(crystal()._cenpos[2]-dist,crystal()._cenpos[2]+dist+1,1):
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'a,b,c',a,b,c
for molincell in range(0,len(crystal()._mols),1):
crystal().calc_polbind(a1=crystal()._cenpos[0],b1=crystal()._cenpos[1],c1=crystal()._cenpos[2],molincell1=0,a2=a,b2=b,c2=c,molincell2=molincell,jm=jm._m,oldUqd=Uqd)
print 'polbind: ', crystal()._polbinds[molincell][a][b][c]
f = open('polbind_energies_%s_properties.csv' % name, 'a')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,a,b,c,molincell,crystal()._polbinds[molincell][a][b][c]))
f.flush()
f.close()
# Redo this and overwrite after each set to ensure we have some even if not all polbinds complete
crystal().print_polbinds()
print 'Job Completed Successfully.'
| [
"[email protected]"
] | |
ea5b9b937f47326657b9da399ad06bdf9c9d3f9f | 857d2653df85eec7b740a782005da2872d532bff | /training/reco/k_means/k_means.py | d2dc9088e57ba424bc95239998544ce1abec4e40 | [] | no_license | calzonelover/CMS_DC_ANOMALY | 1621924dc66ec2a80a2aa3af3bb29762bb558073 | 5a02ab59ec52c462c37111f83e286149dd86754b | refs/heads/master | 2020-05-31T15:12:50.074681 | 2019-08-22T15:05:42 | 2019-08-22T15:05:42 | 190,348,831 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,406 | py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
from sklearn import svm
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler, normalize, MinMaxScaler
# customize
from data.prompt_reco.setting import REDUCED_FEATURES, FEATURES, SELECT_PD
import data.prompt_reco.utility as utility
from model.reco.autoencoder import ( VanillaAutoencoder, SparseAutoencoder,
ContractiveAutoencoder, VariationalAutoencoder )
COLORS = ('green', 'blue')
GROUP_LABELS = ('A', 'B')
HUMAN_LABELS = ('Good', 'Bad')
def main():
# Setting
is_reduced_data = True
Autoencoder = VanillaAutoencoder
test_model = "Vanilla"
number_model = 1
BS = 256
N_FEATURES = len(REDUCED_FEATURES*7) if is_reduced_data else 2807
# data
files = utility.get_file_list(chosed_pd=SELECT_PD) # choosing only ZeroBias
feature_names = utility.get_feature_name(features=FEATURES)
reduced_feature_names = utility.get_feature_name(features=REDUCED_FEATURES)
data = pd.DataFrame(utility.get_data(files), columns=feature_names)
data["run"] = data["run"].astype(int)
data["lumi"] = data["lumi"].astype(int)
data.drop(["_foo", "_bar", "_baz"], axis=1, inplace=True)
if is_reduced_data:
not_reduced_column = feature_names
for intersected_elem in reduced_feature_names: not_reduced_column.remove(intersected_elem)
data.drop(not_reduced_column, axis=1, inplace=True)
data = data.sort_values(["run", "lumi"], ascending=[True,True])
data = data.reset_index(drop=True)
data["label"] = data.apply(utility.add_flags, axis=1)
# training
print("Preparing dataset...")
split = int(0.8*len(data))
# train set
df_train = data.iloc[:split].copy()
X_train = df_train.iloc[:, 0:N_FEATURES]
y_train = df_train["label"]
# test set
df_test = data.iloc[split:].copy()
X_test = df_test.iloc[:, 0:N_FEATURES]
y_test = df_test["label"]
X_test = pd.concat([X_train[y_train == 1], X_test])
y_test = pd.concat([y_train[y_train == 1], y_test])
X_train = X_train[y_train == 0]
y_train = y_train[y_train == 0]
print("Training KMeans")
# standardize data
# transformer = StandardScaler()
transformer = MinMaxScaler(feature_range=(0,1))
transformer.fit(X_train.values)
X_train = transformer.transform(X_train.values)
X_test = transformer.transform(X_test.values)
# X_train = normalize(X_train, norm='l1')
## combine
X = np.concatenate((X_train, X_test))
y = np.concatenate((y_train, y_test))
# training
kmeans_model = KMeans(n_clusters=2).fit(X)
y_pred = kmeans_model.predict(X)
# PCA
pca = PCA(n_components=2)
principal_components = pca.fit_transform(X)
# visualzie K-means
fig, ax = plt.subplots()
for i, group_label in enumerate(GROUP_LABELS):
scat_data = principal_components[y_pred == i]
ax.scatter(
scat_data[:, 0], scat_data[:, 1], alpha=0.8,
c = COLORS[i if i == 0 else 1],
label = GROUP_LABELS[i]
)
ax.legend()
plt.title('Clustering by K-Means, visual in Principal Basis (JetHT)')
plt.savefig('JetHT_kmeans.png')
# visual labeld
fig, ax = plt.subplots()
for i, group_label in enumerate(GROUP_LABELS):
scat_data = principal_components[y == i]
ax.scatter(
scat_data[:, 0], scat_data[:, 1], alpha=0.8,
c = COLORS[i],
label = HUMAN_LABELS[i]
)
ax.legend()
plt.xlabel("Principal component 1")
plt.ylabel("Principal component 2")
plt.title('Labeled by Human, visual in Principal Basis (JetHT)')
plt.savefig('JetHT_label.png')
# visual One-Class SVM cutoff
svm_model = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
svm_model.fit(X_train)
sampling_svm_dvs = -svm_model.decision_function(X)[:, 0]
min_sampling_svm_dvs, max_sampling_svm_dvs = min(sampling_svm_dvs), max(sampling_svm_dvs)
colors_svm_dvs = list(map(lambda x: [0.2, 1.0-((x-min_sampling_svm_dvs)/(max_sampling_svm_dvs-min_sampling_svm_dvs)), (x-min_sampling_svm_dvs)/(max_sampling_svm_dvs-min_sampling_svm_dvs)], sampling_svm_dvs))
colors_svm_cutoff = list(map(lambda x: [0, 0, 0.8] if x > 20.0 else [0, 1.0, 0], sampling_svm_dvs))
fig, ax = plt.subplots()
ax.scatter(
principal_components[:, 0], principal_components[:, 1],
alpha=0.8,
c = colors_svm_dvs
)
plt.title('Decision Value from SVM, visual in Principal Basis (JetHT)')
plt.savefig('SVM_DCs.png')
fig, ax = plt.subplots()
ax.scatter(
principal_components[:, 0], principal_components[:, 1],
alpha=0.8,
c = colors_svm_cutoff
)
plt.xlabel("Principal component 1")
plt.ylabel("Principal component 2")
plt.title('Applying cutoff in SVM, visual in Principal Basis (JetHT)')
plt.savefig('SVM_cutoff.png')
# visual autoencoder loss
autoencoder = Autoencoder(
input_dim = [N_FEATURES],
summary_dir = "model/reco/summary",
model_name = "{} model {}".format(test_model, number_model),
batch_size = BS
)
autoencoder.restore()
sampling_totalsd = autoencoder.get_sd(X, scalar=True)
max_totalsd = max(sampling_totalsd)
min_totalsd = min(sampling_totalsd)
colors_cutoff = list(map(lambda x: [0, 0, 0.8] if x > 10.0 else [0, 1.0, 0], sampling_totalsd))
colors_loss = list(map(lambda x: [0.2, 1.0-((x-min_totalsd)/(max_totalsd-min_totalsd)), (x-min_totalsd)/(max_totalsd-min_totalsd)], sampling_totalsd))
fig, ax = plt.subplots()
ax.scatter(
principal_components[:, 0], principal_components[:, 1],
alpha=0.8,
c = np.log10(sampling_totalsd)
)
plt.xlabel("Principal component 1")
plt.ylabel("Principal component 2")
plt.title('Loss from AE data, testing set visual in Principal Basis (JetHT)')
plt.savefig('JetHT_AE_loss.png')
fig, ax = plt.subplots()
ax.scatter(
principal_components[:, 0], principal_components[:, 1],
alpha=0.8,
c = colors_cutoff,
)
plt.xlabel("Principal component 1")
plt.ylabel("Principal component 2")
plt.title('Applying cutoff in AE, testing set visual in Principal Basis (JetHT)')
plt.savefig('JetHT_AE_cutoff.png') | [
"[email protected]"
] | |
260ff632015d6f5932bddcb9cfb80d61bc74add3 | 1e9c9f2a9639db7cdb032aae69cb4d99aef1d3a5 | /codingBat/python/warmup2/stringMatch.py | 885e6c9cdd2dd38ba959c80188a62fe641e07e7d | [
"MIT"
] | permissive | sagarnikam123/learnNPractice | f0da3f8acf653e56c591353ab342765a6831698c | 1b3b0cb2cff2f478006626a4c37a99102acbb628 | refs/heads/master | 2023-02-04T11:21:18.211654 | 2023-01-24T14:47:52 | 2023-01-24T14:47:52 | 61,184,927 | 2 | 1 | MIT | 2022-03-06T11:07:18 | 2016-06-15T06:57:19 | Python | UTF-8 | Python | false | false | 1,024 | py | #######################################################################################################################
#
# stringMatch
#
# Given 2 strings, a and b, return the number of the positions where they contain
# the same length 2 substring. So "xxcaazz" and "xxbaaz" yields 3, since the "xx", "aa",
# and "az" substrings appear in the same place in both strings.
#
#######################################################################################################################
#
# stringMatch("xxcaazz", "xxbaaz") → 3
# stringMatch("abc", "abc") → 2
# stringMatch("abc", "axc") → 0
# stringMatch("hello", "he") → 1
# stringMatch("he", "hello") → 1
# stringMatch("h", "hello") → 0
# stringMatch("", "hello") → 0
# stringMatch("aabbccdd", "abbbxxd") → 1
# stringMatch("aaxxaaxx", "iaxxai") → 3
# stringMatch("iaxxai", "aaxxaaxx") → 3
#
#######################################################################################################################
| [
"[email protected]"
] | |
2f9afaeeacf9eb5a5b1893e3e8db728e1cf38f06 | 12f83344cdfe561db39ad9106dbf263ccd919f7e | /Projects/miami_metro/debra/migrations/0084_auto__add_field_brands_icon_id.py | 905f99d42b129e430c5245c9813ab6a94d8af89c | [] | no_license | TopWebGhost/Angular-Influencer | ebcd28f83a77a92d240c41f11d82927b98bcea9e | 2f15c4ddd8bbb112c407d222ae48746b626c674f | refs/heads/master | 2021-01-19T10:45:47.039673 | 2016-12-05T01:59:26 | 2016-12-05T01:59:26 | 82,214,998 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 37,415 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Brands.icon_id'
db.add_column('debra_brands', 'icon_id', self.gf('django.db.models.fields.CharField')(default='Nil', max_length=50), keep_default=False)
def backwards(self, orm):
# Deleting field 'Brands.icon_id'
db.delete_column('debra_brands', 'icon_id')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'debra.addpopupchange': {
'Meta': {'object_name': 'AddPopupChange'},
'brand': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['debra.Brands']"}),
'color_new': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'color_orig': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'create_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_new': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'img_orig': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'name_new': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'name_orig': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'price_new': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'price_orig': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['debra.ProductModel']"}),
'size_new': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'size_orig': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'user_id': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'debra.brands': {
'Meta': {'object_name': 'Brands'},
'domain_name': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '200'}),
'icon_id': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo_blueimg_url': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '200'}),
'logo_img_url': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '200'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '200'}),
'promo_discovery_support': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shopstyle_id': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '200'}),
'supported': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'debra.categories': {
'Meta': {'object_name': 'Categories'},
'brand': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['debra.Brands']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '100'})
},
'debra.categorymodel': {
'Meta': {'object_name': 'CategoryModel'},
'categoryId': ('django.db.models.fields.IntegerField', [], {'default': "'-111'", 'max_length': '50'}),
'categoryName': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'default': "'0'", 'to': "orm['debra.ProductModel']"})
},
'debra.colorsizemodel': {
'Meta': {'object_name': 'ColorSizeModel'},
'color': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '500'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'default': "'0'", 'to': "orm['debra.ProductModel']"}),
'size': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '500'})
},
'debra.combinationofuserops': {
'Meta': {'object_name': 'CombinationOfUserOps'},
'combination_id': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '200'}),
'how_many_out_of_stock': ('django.db.models.fields.IntegerField', [], {'default': "'0'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_out_of_stock': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'task_id': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '200'}),
'tracking_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_id': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'user_selection': ('django.db.models.fields.related.ForeignKey', [], {'default': "'2'", 'to': "orm['debra.UserOperations']", 'null': 'True', 'blank': 'True'})
},
'debra.emailfromteaserpage': {
'Meta': {'object_name': 'EmailFromTeaserPage'},
'email_addr': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'time_registered': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'debra.items': {
'Meta': {'object_name': 'Items'},
'brand': ('django.db.models.fields.related.ForeignKey', [], {'default': "'1'", 'to': "orm['debra.Brands']"}),
'cat1': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '100'}),
'cat2': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '100'}),
'cat3': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '100'}),
'cat4': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '100'}),
'cat5': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '100'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url_lg': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '200'}),
'img_url_md': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '200'}),
'img_url_sm': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '200'}),
'insert_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '200'}),
'pr_colors': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '600'}),
'pr_currency': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '200'}),
'pr_id': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'max_length': '100'}),
'pr_instock': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '10'}),
'pr_retailer': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '200'}),
'pr_sizes': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '600'}),
'pr_url': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '200'}),
'price': ('django.db.models.fields.FloatField', [], {'default': "'20.00'", 'max_length': '10'}),
'product_model_key': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['debra.ProductModel']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'saleprice': ('django.db.models.fields.FloatField', [], {'default': "'10.00'", 'max_length': '10'})
},
'debra.preferredbrands': {
'Meta': {'object_name': 'PreferredBrands'},
'brand': ('django.db.models.fields.related.ForeignKey', [], {'default': "'1'", 'to': "orm['debra.Brands']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_id': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'debra.pricingtasks': {
'Meta': {'object_name': 'PricingTasks'},
'combination_id': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '200'}),
'enqueue_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'finish_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'free_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_items': ('django.db.models.fields.IntegerField', [], {'default': "'1'"}),
'price': ('django.db.models.fields.FloatField', [], {'default': "'-11.0'", 'max_length': '10'}),
'proc_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'saleprice': ('django.db.models.fields.FloatField', [], {'default': "'-11.0'", 'max_length': '10'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'task_id': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'unique': 'True', 'max_length': '200'}),
'user_id': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'user_notify': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'debra.productavailability': {
'Meta': {'object_name': 'ProductAvailability'},
'avail': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'finish_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['debra.ColorSizeModel']"})
},
'debra.productmodel': {
'Meta': {'object_name': 'ProductModel'},
'brand': ('django.db.models.fields.related.ForeignKey', [], {'default': "'1'", 'to': "orm['debra.Brands']"}),
'c_idx': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '300', 'blank': 'True'}),
'cat1': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '25'}),
'cat10': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '25'}),
'cat2': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '25'}),
'cat3': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '25'}),
'cat4': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '25'}),
'cat5': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '25'}),
'cat6': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '25'}),
'cat7': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '25'}),
'cat8': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '25'}),
'cat9': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '25'}),
'description': ('django.db.models.fields.TextField', [], {'default': "'Nil'"}),
'err_text': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '200'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'idx': ('django.db.models.fields.IntegerField', [], {'default': "'-11'", 'max_length': '10'}),
'img_url': ('django.db.models.fields.URLField', [], {'default': "'Nil'", 'max_length': '200'}),
'insert_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '200'}),
'price': ('django.db.models.fields.FloatField', [], {'default': "'-11.0'", 'max_length': '10'}),
'prod_url': ('django.db.models.fields.URLField', [], {'default': "'Nil'", 'max_length': '300'}),
'promo_text': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '200'}),
'saleprice': ('django.db.models.fields.FloatField', [], {'default': "'-11.0'", 'max_length': '10'})
},
'debra.productprice': {
'Meta': {'object_name': 'ProductPrice'},
'finish_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'price': ('django.db.models.fields.FloatField', [], {'default': "'-11.0'", 'max_length': '10'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['debra.ColorSizeModel']"}),
'shipping_cost': ('django.db.models.fields.FloatField', [], {'default': "'-1.0'", 'max_length': '10'})
},
'debra.productpromotion': {
'Meta': {'object_name': 'ProductPromotion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'default': "'0'", 'to': "orm['debra.ProductPrice']", 'null': 'True', 'blank': 'True'}),
'promo': ('django.db.models.fields.related.ForeignKey', [], {'default': "'0'", 'to': "orm['debra.Promoinfo']", 'null': 'True', 'blank': 'True'}),
'savings': ('django.db.models.fields.FloatField', [], {'default': "'0.0'", 'max_length': '10'})
},
'debra.promoinfo': {
'Meta': {'object_name': 'Promoinfo'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'd': ('django.db.models.fields.DateField', [], {}),
'exclude_category': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'free_shipping_lower_bound': ('django.db.models.fields.FloatField', [], {'default': '10000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_category': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '100'}),
'promo_disc_amount': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'promo_disc_lower_bound': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'promo_disc_perc': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'promo_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sex_category': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'store': ('django.db.models.fields.related.ForeignKey', [], {'default': "'1'", 'to': "orm['debra.Brands']"}),
'validity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'where_avail': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'debra.promorawtext': {
'Meta': {'object_name': 'PromoRawText'},
'data_source': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'insert_date': ('django.db.models.fields.DateField', [], {}),
'raw_text': ('django.db.models.fields.TextField', [], {}),
'store': ('django.db.models.fields.related.ForeignKey', [], {'default': "'1'", 'to': "orm['debra.Brands']"})
},
'debra.promotionapplied': {
'Meta': {'object_name': 'PromotionApplied'},
'combination_id': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'promo': ('django.db.models.fields.related.ForeignKey', [], {'default': "'0'", 'to': "orm['debra.Promoinfo']", 'null': 'True', 'blank': 'True'}),
'savings': ('django.db.models.fields.FloatField', [], {'default': "'0.0'", 'max_length': '10'}),
'task': ('django.db.models.fields.related.ForeignKey', [], {'default': "'0'", 'to': "orm['debra.PricingTasks']", 'null': 'True', 'blank': 'True'})
},
'debra.ssitemstats': {
'Meta': {'object_name': 'SSItemStats'},
'brand': ('django.db.models.fields.related.ForeignKey', [], {'default': "'1'", 'to': "orm['debra.Brands']"}),
'category': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '10'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '10'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'price': ('django.db.models.fields.FloatField', [], {'default': "'-111.00'", 'max_length': '10'}),
'price_selection_metric': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sale_cnt': ('django.db.models.fields.IntegerField', [], {'default': "'-11'", 'max_length': '10'}),
'saleprice': ('django.db.models.fields.FloatField', [], {'default': "'-111.00'", 'max_length': '10'}),
'tdate': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2012, 10, 1, 1, 42, 57, 75240)'}),
'total_cnt': ('django.db.models.fields.IntegerField', [], {'default': "'-11'", 'max_length': '10'})
},
'debra.storepreferencesfromteaserpage': {
'Meta': {'object_name': 'StorePreferencesFromTeaserPage'},
'aber': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'aerie': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'agnus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'aldo': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'american_eagle': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ann_taylor': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'anthro': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'armani': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'associated_email_addr': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['debra.EmailFromTeaserPage']", 'null': 'True', 'blank': 'True'}),
'bebe': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'betsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'books_brothers': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'br': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'burberry': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'coach': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'diesel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'dkny': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'donna': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'exp': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'extra': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'forever': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'fossil': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'french_connection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'gap': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'guess': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'h_and_m': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hollister': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jcrew': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'kate_spade': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lacoste': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lane_bryant': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'levis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'limited': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lucky': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'miss_sixty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'nicole_miller': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'nike': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'nine_west': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ny_co': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'old_navy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'ralph_lauren': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'steve_madden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'thomas_pink': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'top_shop': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'true_religion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'united_colors': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urban_outfitters': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'victoria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'white_house_black_market': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'zara': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'debra.storespecificitemcategory': {
'Meta': {'object_name': 'StoreSpecificItemCategory'},
'age_group': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '10'}),
'brand': ('django.db.models.fields.related.ForeignKey', [], {'default': "'1'", 'to': "orm['debra.Brands']"}),
'categoryName': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '100'}),
'gender': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '10'}),
'hash_val': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '33'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'debra.taskdailystats': {
'Meta': {'object_name': 'TaskDailyStats'},
'brand': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['debra.Brands']"}),
'finish_time': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'num_active_items': ('django.db.models.fields.IntegerField', [], {'default': "'0'", 'max_length': '50'}),
'num_became_avail': ('django.db.models.fields.IntegerField', [], {'default': "'0'", 'max_length': '50'}),
'num_became_unavail': ('django.db.models.fields.IntegerField', [], {'default': "'0'", 'max_length': '50'}),
'num_dups': ('django.db.models.fields.IntegerField', [], {'default': "'0'", 'max_length': '50'}),
'num_prices_decr': ('django.db.models.fields.IntegerField', [], {'default': "'0'", 'max_length': '50'}),
'num_prices_incr': ('django.db.models.fields.IntegerField', [], {'default': "'0'", 'max_length': '50'}),
'num_prices_unchg': ('django.db.models.fields.IntegerField', [], {'default': "'0'", 'max_length': '50'}),
'num_tasks_started': ('django.db.models.fields.IntegerField', [], {'default': "'0'", 'max_length': '50'}),
'num_tested_avail': ('django.db.models.fields.IntegerField', [], {'default': "'0'", 'max_length': '50'}),
'num_tested_price': ('django.db.models.fields.IntegerField', [], {'default': "'0'", 'max_length': '50'}),
'prices_changed_25': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'prices_changed_50': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'prices_changed_75': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'prices_changed_more': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'debra.userassignedcategory': {
'Meta': {'object_name': 'UserAssignedCategory'},
'categoryIcon': ('django.db.models.fields.files.ImageField', [], {'default': 'None', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'categoryName': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_id': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'debra.useridmap': {
'Meta': {'object_name': 'UserIdMap'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_addr': ('django.db.models.fields.CharField', [], {'default': "'-11.11.11.11'", 'max_length': '50'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'default': "'-1111'", 'unique': 'True', 'max_length': '50'})
},
'debra.useroperations': {
'Meta': {'object_name': 'UserOperations'},
'calculated_price': ('django.db.models.fields.FloatField', [], {'default': "'-11.0'", 'max_length': '10'}),
'color': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'how_many_out_of_stock': ('django.db.models.fields.IntegerField', [], {'default': "'0'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'default': "'Nil'", 'max_length': '1000'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'default': "'0'", 'to': "orm['debra.ProductModel']", 'null': 'True', 'blank': 'True'}),
'item_out_of_stock': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'operator_type': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': "'-1'", 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'size': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'debra.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'about_me': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'access_token': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'blog_url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'facebook_profile_url': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'newsletter_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'raw_data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'website_url': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'debra.usershelfiterrors': {
'Meta': {'object_name': 'UserShelfitErrors'},
'extra_info': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'problematic_url': ('django.db.models.fields.URLField', [], {'default': "'Nil'", 'max_length': '1000'}),
'user_id': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'debra.wishlistitem': {
'Meta': {'object_name': 'WishlistItem'},
'calculated_price': ('django.db.models.fields.FloatField', [], {'default': "'-11.0'", 'max_length': '10'}),
'cat1': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '25'}),
'cat2': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '25'}),
'cat3': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '25'}),
'combination_id': ('django.db.models.fields.CharField', [], {'default': "'Nil'", 'max_length': '200'}),
'compare_flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'how_many_out_of_stock': ('django.db.models.fields.IntegerField', [], {'default': "'0'"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_buylist': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'item_out_of_stock': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'notify_lower_bound': ('django.db.models.fields.FloatField', [], {'default': "'-1'", 'max_length': '10'}),
'promo_applied': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['debra.Promoinfo']", 'null': 'True', 'blank': 'True'}),
'savings': ('django.db.models.fields.FloatField', [], {'default': "'0'", 'max_length': '10'}),
'shipping_cost': ('django.db.models.fields.FloatField', [], {'default': "'-1'", 'max_length': '10'}),
'snooze': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time_notified_last': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'time_price_calculated_last': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user_assigned_cat': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['debra.UserAssignedCategory']", 'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'user_selection': ('django.db.models.fields.related.ForeignKey', [], {'default': "'2'", 'to': "orm['debra.UserOperations']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['debra']
| [
"[email protected]"
] | |
cd9b2621354b41dab2657e8f0bae14493858399f | 6725ff7ad5cbcc1413654c7fbe4d9795a35e50b5 | /L4_task3.py | 9e2c5e6f7a3ba2cb1e763f102fa57d46d7e29e8d | [] | no_license | MaksimKulya/PythonCourse | 59e5a2e67378bfdddf5bd96db8e25782489b7db1 | 40b5559e2fac76d3fb3221ba4b90478dd10f442c | refs/heads/main | 2023-05-03T05:13:05.238092 | 2021-05-18T14:44:44 | 2021-05-18T14:44:44 | 321,064,262 | 0 | 0 | null | 2021-01-20T12:28:47 | 2020-12-13T12:52:01 | Python | UTF-8 | Python | false | false | 414 | py | # Для чисел в пределах от 20 до 240 найти числа, кратные 20 или 21. Необходимо решить задание в одну строку.
# Подсказка: использовать функцию range() и генератор.
import random as rnd
a = [rnd.randint(20, 240) for i in range(100)]
print(a)
b = [n for n in a if n % 20 ==0 or n % 21 ==0]
print(b) | [
"[email protected]"
] | |
6dd0be9d6b07dba30423d4ecfba393cefadaf205 | 5234bc430c83d616a8214d7f77c2c081543b6b26 | /src/Python/1-100/96.UniqueBinarySearchTrees.py | 6c59f9a19ecaa76aa1eae5a23d0e85ffde46d062 | [
"Apache-2.0"
] | permissive | AveryHuo/PeefyLeetCode | 3e749b962cadfdf10d7f7b1ed21c5fafc4342950 | 92156e4b48ba19e3f02e4286b9f733e9769a1dee | refs/heads/master | 2022-04-26T06:01:18.547761 | 2020-04-25T09:55:46 | 2020-04-25T09:55:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py |
class Solution:
def numTrees(self, n: int) -> int:
dp = [0] * (n + 1)
dp[0] = 1
dp[1] = 1
for i in range(2, n + 1):
for j in range(1, i + 1):
dp[i] += dp[j - 1] * dp[i - j]
return dp[n]
if __name__ == "__main__":
solution = Solution()
print(solution.numTrees(3))
| [
"[email protected]"
] | |
53fe24223fbffd0f694c4f4c0faf15c15b2809c4 | fdce456e2f0ea12f854e98583cfda95955b9a36b | /manageusers/apps.py | dca42b37dd05993510c3c0ba37e95721d15238d8 | [] | no_license | atifasr/jobportal | e5fdc8058759311e8d4ca2c0291066ad86059fb6 | 3fe211598daa66f2a76c2b3d4d26d73459ac7457 | refs/heads/master | 2023-08-05T02:01:00.870360 | 2021-09-29T11:59:29 | 2021-09-29T11:59:29 | 388,807,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | from django.apps import AppConfig
class ManageusersConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'manageusers'
# def ready(self):
# from .schedulers import scheduler
# scheduler.start()
| [
"[email protected]"
] | |
8c9447c3f292c1307c78143b0de03a14cccc97b9 | 12abbf73f6e0f88c263b50496aa3c9b769a0ba19 | /venv/Lib/site-packages/nb_log/handlers.py | 7d220fe28fa8528f7510ab6682839d4d72d007df | [] | no_license | yangtingting123456/API_Test_Framework | 037f2d3171cecba39f845d646d8db629dfce2ba9 | dd388491c038d635ccfe323032d86b17ea2a48e0 | refs/heads/master | 2023-02-23T00:21:10.321954 | 2021-01-15T09:33:30 | 2021-01-15T09:33:30 | 319,823,833 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,740 | py | # noinspection PyMissingOrEmptyDocstring
import atexit
import copy
import sys
import os
import traceback
import socket
import datetime
import json
import time
from collections import OrderedDict
from queue import Queue, Empty
# noinspection PyPackageRequirements
from kafka import KafkaProducer
from elasticsearch import Elasticsearch, helpers
from threading import Lock, Thread
import pymongo
import requests
import logging
from logging import handlers
from concurrent_log_handler import ConcurrentRotatingFileHandler # 需要安装。concurrent-log-handler==0.9.1
# noinspection PyUnresolvedReferences
from logging.handlers import WatchedFileHandler
# noinspection PyPackageRequirements
from pythonjsonlogger.jsonlogger import JsonFormatter
from nb_log import nb_log_config_default
from nb_log import nb_print
very_nb_print = nb_print
os_name = os.name
class MongoHandler(logging.Handler):
"""
一个mongodb的log handler,支持日志按loggername创建不同的集合写入mongodb中
"""
# msg_pattern = re.compile('(\d+-\d+-\d+ \d+:\d+:\d+) - (\S*?) - (\S*?) - (\d+) - (\S*?) - ([\s\S]*)')
def __init__(self, mongo_url, mongo_database='logs'):
"""
:param mongo_url: mongo连接
:param mongo_database: 保存日志的数据库,默认使用logs数据库
"""
logging.Handler.__init__(self)
mongo_client = pymongo.MongoClient(mongo_url)
self.mongo_db = mongo_client.get_database(mongo_database)
def emit(self, record):
# noinspection PyBroadException, PyPep8
try:
"""以下使用解析日志模板的方式提取出字段"""
# msg = self.format(record)
# logging.LogRecord
# msg_match = self.msg_pattern.search(msg)
# log_info_dict = {'time': msg_match.group(1),
# 'name': msg_match.group(2),
# 'file_name': msg_match.group(3),
# 'line_no': msg_match.group(4),
# 'log_level': msg_match.group(5),
# 'detail_msg': msg_match.group(6),
# }
level_str = None
if record.levelno == 10:
level_str = 'DEBUG'
elif record.levelno == 20:
level_str = 'INFO'
elif record.levelno == 30:
level_str = 'WARNING'
elif record.levelno == 40:
level_str = 'ERROR'
elif record.levelno == 50:
level_str = 'CRITICAL'
log_info_dict = OrderedDict()
log_info_dict['time'] = time.strftime('%Y-%m-%d %H:%M:%S')
log_info_dict['name'] = record.name
log_info_dict['file_path'] = record.pathname
log_info_dict['file_name'] = record.filename
log_info_dict['func_name'] = record.funcName
log_info_dict['line_no'] = record.lineno
log_info_dict['log_level'] = level_str
log_info_dict['detail_msg'] = record.msg
col = self.mongo_db.get_collection(record.name)
col.insert_one(log_info_dict)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
class KafkaHandler(logging.Handler):
"""
日志批量写入kafka中。
"""
ES_INTERVAL_SECONDS = 0.5
host_name = socket.gethostname()
host_process = f'{host_name} -- {os.getpid()}'
script_name = sys.argv[0].split('/')[-1]
task_queue = Queue()
last_es_op_time = time.time()
has_start_do_bulk_op = False
has_start_check_size_and_clear = False
kafka_producer = None
es_index_prefix = 'pylog-'
def __init__(self, bootstrap_servers, **configs):
"""
:param elastic_hosts: es的ip地址,数组类型
:param elastic_port: es端口
:param index_prefix: index名字前缀。
"""
logging.Handler.__init__(self)
if not self.__class__.kafka_producer:
very_nb_print('实例化kafka producer')
self.__class__.kafka_producer = KafkaProducer(bootstrap_servers=bootstrap_servers, **configs)
t = Thread(target=self._do_bulk_op)
t.setDaemon(True)
t.start()
@classmethod
def __add_task_to_bulk(cls, task):
cls.task_queue.put(task)
# noinspection PyUnresolvedReferences
@classmethod
def __clear_bulk_task(cls):
cls.task_queue.queue.clear()
@classmethod
def _check_size_and_clear(cls):
"""
如果是外网传输日志到测试环境风险很大,测试环境网络经常打满,传输不了会造成日志队列堆积,会造成内存泄漏,所以需要清理。
:return:
"""
if cls.has_start_check_size_and_clear:
return
cls.has_start_check_size_and_clear = True
def __check_size_and_clear():
while 1:
size = cls.task_queue.qsize()
if size > 1000:
very_nb_print(f'kafka防止意外日志积累太多了,达到 {size} 个,为防止内存泄漏,清除队列')
cls.__clear_bulk_task()
time.sleep(0.1)
t = Thread(target=__check_size_and_clear)
t.setDaemon(True)
t.start()
@classmethod
def _do_bulk_op(cls):
if cls.has_start_do_bulk_op:
return
cls.has_start_do_bulk_op = True
# very_nb_print(cls.kafka_producer)
while 1:
try:
# noinspection PyUnresolvedReferences
tasks = list(cls.task_queue.queue)
cls.__clear_bulk_task()
for task in tasks:
topic = (cls.es_index_prefix + task['name']).replace('.', '').replace('_', '').replace('-', '')
# very_nb_print(topic)
cls.kafka_producer.send(topic, json.dumps(task).encode())
cls.last_es_op_time = time.time()
except Exception as e:
very_nb_print(e)
finally:
time.sleep(cls.ES_INTERVAL_SECONDS)
def emit(self, record):
# noinspection PyBroadException, PyPep8
try:
level_str = None
if record.levelno == 10:
level_str = 'DEBUG'
elif record.levelno == 20:
level_str = 'INFO'
elif record.levelno == 30:
level_str = 'WARNING'
elif record.levelno == 40:
level_str = 'ERROR'
elif record.levelno == 50:
level_str = 'CRITICAL'
log_info_dict = OrderedDict()
log_info_dict['@timestamp'] = datetime.datetime.utcfromtimestamp(record.created).isoformat()
log_info_dict['time'] = time.strftime('%Y-%m-%d %H:%M:%S')
log_info_dict['name'] = record.name
log_info_dict['host'] = self.host_name
log_info_dict['host_process'] = self.host_process
# log_info_dict['file_path'] = record.pathname
log_info_dict['file_name'] = record.filename
log_info_dict['func_name'] = record.funcName
# log_info_dict['line_no'] = record.lineno
log_info_dict['log_place'] = f'{record.pathname}:{record.lineno}'
log_info_dict['log_level'] = level_str
log_info_dict['msg'] = str(record.msg)
log_info_dict['script'] = self.script_name
log_info_dict['es_index'] = f'{self.es_index_prefix}{record.name.lower()}'
self.__add_task_to_bulk(log_info_dict)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
class ElasticHandler000(logging.Handler):
"""
日志批量写入es中。
"""
ES_INTERVAL_SECONDS = 2
host_name = socket.gethostname()
def __init__(self, elastic_hosts: list, elastic_port, index_prefix='pylog-'):
"""
:param elastic_hosts: es的ip地址,数组类型
:param elastic_port: es端口
:param index_prefix: index名字前缀。
"""
logging.Handler.__init__(self)
self._es_client = Elasticsearch(elastic_hosts, port=elastic_port)
self._index_prefix = index_prefix
self._task_list = []
self._task_queue = Queue()
self._last_es_op_time = time.time()
t = Thread(target=self._do_bulk_op)
t.setDaemon(True)
t.start()
def __add_task_to_bulk(self, task):
self._task_queue.put(task)
def __clear_bulk_task(self):
# noinspection PyUnresolvedReferences
self._task_queue.queue.clear()
def _do_bulk_op(self):
while 1:
try:
if self._task_queue.qsize() > 10000:
very_nb_print('防止意外日志积累太多了,不插入es了。')
self.__clear_bulk_task()
return
# noinspection PyUnresolvedReferences
tasks = list(self._task_queue.queue)
self.__clear_bulk_task()
helpers.bulk(self._es_client, tasks)
self._last_es_op_time = time.time()
except Exception as e:
very_nb_print(e)
finally:
time.sleep(1)
def emit(self, record):
# noinspection PyBroadException, PyPep8
try:
level_str = None
if record.levelno == 10:
level_str = 'DEBUG'
elif record.levelno == 20:
level_str = 'INFO'
elif record.levelno == 30:
level_str = 'WARNING'
elif record.levelno == 40:
level_str = 'ERROR'
elif record.levelno == 50:
level_str = 'CRITICAL'
log_info_dict = OrderedDict()
log_info_dict['@timestamp'] = datetime.datetime.utcfromtimestamp(record.created).isoformat()
log_info_dict['time'] = time.strftime('%Y-%m-%d %H:%M:%S')
log_info_dict['name'] = record.name
log_info_dict['host'] = self.host_name
log_info_dict['file_path'] = record.pathname
log_info_dict['file_name'] = record.filename
log_info_dict['func_name'] = record.funcName
log_info_dict['line_no'] = record.lineno
log_info_dict['log_level'] = level_str
log_info_dict['msg'] = str(record.msg)
self.__add_task_to_bulk({
"_index": f'{self._index_prefix}{record.name.lower()}',
"_type": f'{self._index_prefix}{record.name.lower()}',
"_source": log_info_dict
})
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
# noinspection PyUnresolvedReferences
class ElasticHandler(logging.Handler):
"""
日志批量写入es中。
"""
ES_INTERVAL_SECONDS = 0.5
host_name = socket.gethostname()
host_process = f'{host_name} -- {os.getpid()}'
script_name = sys.argv[0]
task_queue = Queue()
last_es_op_time = time.time()
has_start_do_bulk_op = False
def __init__(self, elastic_hosts: list, elastic_port, index_prefix='pylog-'):
"""
:param elastic_hosts: es的ip地址,数组类型
:param elastic_port: es端口
:param index_prefix: index名字前缀。
"""
logging.Handler.__init__(self)
self._es_client = Elasticsearch(elastic_hosts, port=elastic_port)
self._index_prefix = index_prefix
t = Thread(target=self._do_bulk_op)
t.setDaemon(True)
t.start()
@classmethod
def __add_task_to_bulk(cls, task):
cls.task_queue.put(task)
# noinspection PyUnresolvedReferences
@classmethod
def __clear_bulk_task(cls):
cls.task_queue.queue.clear()
def _do_bulk_op(self):
if self.__class__.has_start_do_bulk_op:
return
self.__class__.has_start_do_bulk_op = True
while 1:
try:
if self.__class__.task_queue.qsize() > 10000:
very_nb_print('防止意外日志积累太多了,不插入es了。')
self.__clear_bulk_task()
return
tasks = list(self.__class__.task_queue.queue)
self.__clear_bulk_task()
helpers.bulk(self._es_client, tasks)
self.__class__.last_es_op_time = time.time()
except Exception as e:
very_nb_print(e)
finally:
time.sleep(self.ES_INTERVAL_SECONDS)
def emit(self, record):
# noinspection PyBroadException, PyPep8
try:
level_str = None
if record.levelno == 10:
level_str = 'DEBUG'
elif record.levelno == 20:
level_str = 'INFO'
elif record.levelno == 30:
level_str = 'WARNING'
elif record.levelno == 40:
level_str = 'ERROR'
elif record.levelno == 50:
level_str = 'CRITICAL'
log_info_dict = OrderedDict()
log_info_dict['@timestamp'] = datetime.datetime.utcfromtimestamp(record.created).isoformat()
log_info_dict['time'] = time.strftime('%Y-%m-%d %H:%M:%S')
log_info_dict['name'] = record.name
log_info_dict['host'] = self.host_name
log_info_dict['host_process'] = self.host_process
log_info_dict['file_path'] = record.pathname
log_info_dict['file_name'] = record.filename
log_info_dict['func_name'] = record.funcName
log_info_dict['line_no'] = record.lineno
log_info_dict['log_level'] = level_str
log_info_dict['msg'] = str(record.msg)
log_info_dict['script'] = self.script_name
self.__add_task_to_bulk({
"_index": f'{self._index_prefix}{record.name.lower()}',
"_type": f'{self._index_prefix}{record.name.lower()}',
"_source": log_info_dict
})
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
self.handleError(record)
# noinspection PyPep8Naming
def formatMessage(self, record: logging.LogRecord):
# print(record.__dict__)
if hasattr(record, 'for_segmentation_color'):
# del record.for_segmentation_color
# del record.msg
record.message = ''
# print(record.__dict__)
return self._style.format(record)
# logging.Formatter.formatMessage = formatMessage
class ColorHandler(logging.Handler):
"""
根据日志严重级别,显示成五彩控制台日志。
强烈建议使用pycharm的 monokai主题颜色,这样日志的颜色符合常规的交通信号灯颜色指示,色彩也非常饱和鲜艳。
设置方式为 打开pycharm的settings -> Editor -> Color Scheme -> Console Font 选择monokai
"""
terminator = '\r\n' if os_name == 'nt' else '\n'
bule = 96 if os_name == 'nt' else 36
yellow = 93 if os_name == 'nt' else 33
def __init__(self, stream=None, ):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
logging.Handler.__init__(self)
if stream is None:
stream = sys.stdout # stderr无彩。
self.stream = stream
self._display_method = 7 if os_name == 'posix' else 0
self._word_color = 37 if os_name == 'posix' else 30
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def __build_color_msg_with_backgroud_color000(self, record_level, assist_msg, effective_information_msg):
if record_level == 10:
# msg_color = ('\033[0;32m%s\033[0m' % msg) # 绿色
# print(msg1)
msg_color = f'\033[0;32m{assist_msg}\033[0m \033[0;{self._word_color};42m{effective_information_msg}\033[0m' # 绿色
elif record_level == 20:
# msg_color = ('\033[%s;%sm%s\033[0m' % (self._display_method, self.bule, msg)) # 青蓝色 36 96
msg_color = f'\033[0;36m{assist_msg}\033[0m \033[0;{self._word_color};46m{effective_information_msg}\033[0m'
elif record_level == 30:
# msg_color = ('\033[%s;%sm%s\033[0m' % (self._display_method, self.yellow, msg))
msg_color = f'\033[0;33m{assist_msg}\033[0m \033[0;{self._word_color};43m{effective_information_msg}\033[0m'
elif record_level == 40:
# msg_color = ('\033[%s;35m%s\033[0m' % (self._display_method, msg)) # 紫红色
msg_color = f'\033[0;35m{assist_msg}\033[0m \033[0;{self._word_color};45m{effective_information_msg}\033[0m'
elif record_level == 50:
# msg_color = ('\033[%s;31m%s\033[0m' % (self._display_method, msg)) # 血红色
msg_color = f'\033[0;31m{assist_msg}\033[0m \033[0;{self._word_color};41m{effective_information_msg}\033[0m'
else:
msg_color = f'{assist_msg} {effective_information_msg}'
return msg_color
@staticmethod
def __build_color_msg_with_no_backgroud_color000(record_level, assist_msg, effective_information_msg):
if record_level == 10:
# msg_color = ('\033[0;32m%s\033[0m' % msg) # 绿色
# print(msg1)
msg_color = f'\033[0;32m{assist_msg} {effective_information_msg}\033[0m' # 绿色
elif record_level == 20:
# msg_color = ('\033[%s;%sm%s\033[0m' % (self._display_method, self.bule, msg)) # 青蓝色 36 96
msg_color = f'\033[0;36m{assist_msg} {effective_information_msg}\033[0m'
elif record_level == 30:
# msg_color = ('\033[%s;%sm%s\033[0m' % (self._display_method, self.yellow, msg))
msg_color = f'\033[0;33m{assist_msg} {effective_information_msg}\033[0m'
elif record_level == 40:
# msg_color = ('\033[%s;35m%s\033[0m' % (self._display_method, msg)) # 紫红色
msg_color = f'\033[0;35m{assist_msg} {effective_information_msg}\033[0m'
elif record_level == 50:
# msg_color = ('\033[%s;31m%s\033[0m' % (self._display_method, msg)) # 血红色
msg_color = f'\033[0;31m{assist_msg} {effective_information_msg}\033[0m'
else:
msg_color = f'{assist_msg} {effective_information_msg}'
return msg_color
def __build_color_msg_with_backgroud_color(self, record_level, record_copy: logging.LogRecord, ):
background_color = ''
complete_color = ''
if record_level == 10:
background_color = f'[0;{self._word_color};42m'
complete_color = f'[0;32m'
elif record_level == 20:
background_color = f'[0;{self._word_color};46m'
complete_color = f'[0;36m'
elif record_level == 30:
background_color = f'[0;{self._word_color};43m'
complete_color = f'[0;33m'
elif record_level == 40:
background_color = f'[0;{self._word_color};45m'
complete_color = f'[0;35m'
elif record_level == 50:
background_color = f'[0;{self._word_color};41m'
complete_color = f'[0;31m'
record_copy.msg = f'\033{background_color}{record_copy.msg}\033[0m'
msg_without_color = self.format(record_copy)
# print(repr(msg_color))
if isinstance(self.formatter, JsonFormatter) and background_color: # json会把/033 转义成\u001b,导致颜色显示不出来。
msg_without_color = msg_without_color.replace(rf'\u001b{background_color}', f'\033{background_color}')
msg_without_color = msg_without_color.replace(r'\u001b[0m', f'\033[0m\033{complete_color}')
msg_color = f'\033{complete_color}{msg_without_color}\033[0m'
# print(repr(msg_color))
return msg_color
def __build_color_msg_with_no_backgroud_color(self, record_level, record_copy: logging.LogRecord, ):
complete_msg = self.format(record_copy)
if record_level == 10:
# msg_color = ('\033[0;32m%s\033[0m' % msg) # 绿色
# print(msg1)
msg_color = f'\033[0;32m{complete_msg}\033[0m' # 绿色
elif record_level == 20:
# msg_color = ('\033[%s;%sm%s\033[0m' % (self._display_method, self.bule, msg)) # 青蓝色 36 96
msg_color = f'\033[0;36m{complete_msg}\033[0m'
elif record_level == 30:
# msg_color = ('\033[%s;%sm%s\033[0m' % (self._display_method, self.yellow, msg))
msg_color = f'\033[0;33m{complete_msg}\033[0m'
elif record_level == 40:
# msg_color = ('\033[%s;35m%s\033[0m' % (self._display_method, msg)) # 紫红色
msg_color = f'\033[0;35m{complete_msg}\033[0m'
elif record_level == 50:
# msg_color = ('\033[%s;31m%s\033[0m' % (self._display_method, msg)) # 血红色
msg_color = f'\033[0;31m{complete_msg}\033[0m'
else:
msg_color = f'{complete_msg}'
return msg_color
def emit(self, record: logging.LogRecord):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
# noinspection PyBroadException
try:
# very_nb_print(record)
# record.message = record.getMessage()
# effective_information_msg = record.getMessage() # 不能用msg字段,例如有的包的日志格式化还有其他字段
record_copy = copy.copy(record) # copy是因为,不要因为要屏幕彩色日志而影响例如文件日志 叮叮日志等其他handler的格式。
record_copy.for_segmentation_color = '彩色分段标志属性而已'
# del record_copy.msg
# assist_msg = self.format(record_copy)
# print(f'** {assist_msg} ** ')
stream = self.stream
# print(assist_msg)
# print(effective_information_msg)
if nb_log_config_default.DISPLAY_BACKGROUD_COLOR_IN_CONSOLE:
msg_color = self.__build_color_msg_with_backgroud_color(record.levelno, record_copy,
)
else:
msg_color = self.__build_color_msg_with_no_backgroud_color(record.levelno, record_copy)
# stream.write(msg_color)
# stream.write(self.terminator)
# self.flush()
stream.write(msg_color + self.terminator)
except Exception as e:
very_nb_print(e)
very_nb_print(traceback.format_exc())
# self.handleError(record)
@staticmethod
def __spilt_msg(log_level, msg: str):
split_text = '- 级别 -'
if log_level == 10:
split_text = '- DEBUG -'
elif log_level == 20:
split_text = '- INFO -'
elif log_level == 30:
split_text = '- WARNING -'
elif log_level == 40:
split_text = '- ERROR -'
elif log_level == 50:
split_text = '- CRITICAL -'
msg_split = msg.split(split_text, maxsplit=1)
return msg_split[0] + split_text, msg_split[-1]
def __repr__(self):
level = logging.getLevelName(self.level)
name = getattr(self.stream, 'name', '')
if name:
name += ' '
return '<%s %s(%s)>' % (self.__class__.__name__, name, level)
class ConcurrentRotatingFileHandlerWithBufferInitiativeWindwos(ConcurrentRotatingFileHandler):
"""
ConcurrentRotatingFileHandler 解决了多进程下文件切片问题,但频繁操作文件锁,带来程序性能巨大下降。
反复测试极限日志写入频次,在windows上比不切片的写入性能降低100倍。在linux上比不切片性能降低10倍。多进程切片文件锁在windows使用pywin32,在linux上还是要fcntl实现。
所以此类使用缓存1秒钟内的日志为一个长字符串再插入,大幅度地降低了文件加锁和解锁的次数,速度和不做多进程安全切片的文件写入速度几乎一样。
主动触发写入文件。
"""
file_handler_list = []
has_start_emit_all_file_handler = False # 只能在windwos运行正常,windwos是多进程每个进程的变量has_start_emit_all_file_handler是独立的。linux是共享的。
@classmethod
def _emit_all_file_handler(cls):
while True:
for hr in cls.file_handler_list:
# very_nb_print(hr.buffer_msgs_queue.qsize())
hr.rollover_and_do_write()
time.sleep(1)
@classmethod
def start_emit_all_file_handler(cls):
pass
Thread(target=cls._emit_all_file_handler, daemon=True).start()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.buffer_msgs_queue = Queue()
atexit.register(self._when_exit) # 如果程序属于立马就能结束的,需要在程序结束前执行这个钩子,防止不到最后一秒的日志没记录到。
self.file_handler_list.append(self)
if not self.has_start_emit_all_file_handler:
self.start_emit_all_file_handler()
self.__class__.has_start_emit_all_file_handler = True
def _when_exit(self):
pass
self.rollover_and_do_write()
def emit(self, record):
"""
emit已经在logger的handle方法中加了锁,所以这里的重置上次写入时间和清除buffer_msgs不需要加锁了。
:param record:
:return:
"""
# noinspection PyBroadException
try:
msg = self.format(record)
self.buffer_msgs_queue.put(msg)
except Exception:
self.handleError(record)
def rollover_and_do_write(self, ):
# very_nb_print(self.buffer_msgs_queue.qsize())
self._rollover_and_do_write()
def _rollover_and_do_write(self):
buffer_msgs = ''
while True:
try:
msg = self.buffer_msgs_queue.get(block=False)
buffer_msgs += msg + '\n'
except Empty:
break
if buffer_msgs:
try:
self._do_lock()
try:
if self.shouldRollover(None):
self.doRollover()
except Exception as e:
self._console_log("Unable to do rollover: %s" % (e,), stack=True)
# very_nb_print(len(self._buffer_msgs))
self.do_write(buffer_msgs)
finally:
self._do_unlock()
class ConcurrentRotatingFileHandlerWithBufferInitiativeLinux(ConcurrentRotatingFileHandlerWithBufferInitiativeWindwos):
"""
ConcurrentRotatingFileHandler 解决了多进程下文件切片问题,但频繁操作文件锁,带来程序性能巨大下降。
反复测试极限日志写入频次,在windows上比不切片的写入性能降低100倍。在linux上比不切片性能降低10倍。多进程切片文件锁在windows使用pywin32,在linux上还是要fcntl实现。
所以此类使用缓存1秒钟内的日志为一个长字符串再插入,大幅度地降低了文件加锁和解锁的次数,速度和不做多进程安全切片的文件写入速度几乎一样。
主动触发写入文件。
"""
file_handler_list = []
has_start_emit_all_file_handler_process_id_set = set() # 这个linux和windwos都兼容,windwos是多进程每个进程的变量has_start_emit_all_file_handler是独立的。linux是共享的。
__lock_for_rotate = Lock()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.buffer_msgs_queue = Queue()
atexit.register(self._when_exit) # 如果程序属于立马就能结束的,需要在程序结束前执行这个钩子,防止不到最后一秒的日志没记录到。
self.file_handler_list.append(self)
if os.getpid() not in self.has_start_emit_all_file_handler_process_id_set:
self.start_emit_all_file_handler()
self.__class__.has_start_emit_all_file_handler_process_id_set.add(os.getpid())
def rollover_and_do_write(self, ):
# very_nb_print(self.buffer_msgs_queue.qsize())
with self.__lock_for_rotate:
self._rollover_and_do_write()
class CompatibleSMTPSSLHandler(handlers.SMTPHandler):
"""
官方的SMTPHandler不支持SMTP_SSL的邮箱,这个可以两个都支持,并且支持邮件发送频率限制
"""
def __init__(self, mailhost, fromaddr, toaddrs: tuple, subject,
credentials=None, secure=None, timeout=5.0, is_use_ssl=True, mail_time_interval=0):
"""
:param mailhost:
:param fromaddr:
:param toaddrs:
:param subject:
:param credentials:
:param secure:
:param timeout:
:param is_use_ssl:
:param mail_time_interval: 发邮件的时间间隔,可以控制日志邮件的发送频率,为0不进行频率限制控制,如果为60,代表1分钟内最多发送一次邮件
"""
# noinspection PyCompatibility
# very_nb_print(credentials)
# noinspection PyTypeChecker
super().__init__(mailhost, fromaddr, toaddrs, subject,
credentials, secure, timeout)
self._is_use_ssl = is_use_ssl
self._current_time = 0
self._time_interval = 3600 if mail_time_interval < 3600 else mail_time_interval # 60分钟发一次群发邮件,以后用钉钉代替邮件,邮件频率限制的太死了。
self._msg_map = dict() # 是一个内容为键时间为值得映射
self._lock = Lock()
def emit0(self, record: logging.LogRecord):
"""
不用这个判断内容
"""
from threading import Thread
if sys.getsizeof(self._msg_map) > 10 * 1000 * 1000:
self._msg_map.clear()
if record.msg not in self._msg_map or time.time() - self._msg_map[record.msg] > self._time_interval:
self._msg_map[record.msg] = time.time()
# print('发送邮件成功')
Thread(target=self.__emit, args=(record,)).start()
else:
very_nb_print(f' 邮件发送太频繁间隔不足60分钟,此次不发送这个邮件内容: {record.msg} ')
def emit(self, record: logging.LogRecord):
"""
Emit a record.
Format the record and send it to the specified addressees.
"""
from threading import Thread
with self._lock:
if time.time() - self._current_time > self._time_interval:
self._current_time = time.time()
Thread(target=self.__emit, args=(record,)).start()
else:
very_nb_print(f' 邮件发送太频繁间隔不足60分钟,此次不发送这个邮件内容: {record.msg} ')
# noinspection PyUnresolvedReferences
def __emit(self, record):
# noinspection PyBroadException
try:
import smtplib
from email.message import EmailMessage
import email.utils
t_start = time.time()
port = self.mailport
if not port:
port = smtplib.SMTP_PORT
smtp = smtplib.SMTP_SSL(self.mailhost, port, timeout=self.timeout) if self._is_use_ssl else smtplib.SMTP(
self.mailhost, port, timeout=self.timeout)
msg = EmailMessage()
msg['From'] = self.fromaddr
msg['To'] = ','.join(self.toaddrs)
msg['Subject'] = self.getSubject(record)
msg['Date'] = email.utils.localtime()
msg.set_content(self.format(record))
if self.username:
if self.secure is not None:
smtp.ehlo()
smtp.starttls(*self.secure)
smtp.ehlo()
smtp.login(self.username, self.password)
smtp.send_message(msg)
smtp.quit()
# noinspection PyPep8
very_nb_print(
f'发送邮件给 {self.toaddrs} 成功,'
f'用时{round(time.time() - t_start, 2)} ,发送的内容是--> {record.msg} \033[0;35m!!!请去邮箱检查,可能在垃圾邮件中\033[0m')
except Exception as e:
# self.handleError(record)
very_nb_print(
f'[log_manager.py] {time.strftime("%H:%M:%S", time.localtime())} \033[0;31m !!!!!! 邮件发送失败,原因是: {e} \033[0m')
class DingTalkHandler(logging.Handler):
_lock_for_remove_handlers = Lock()
def __init__(self, ding_talk_token=None, time_interval=60):
super().__init__()
self.ding_talk_token = ding_talk_token
self._ding_talk_url = f'https://oapi.dingtalk.com/robot/send?access_token={ding_talk_token}'
self._current_time = 0
self._time_interval = time_interval # 最好别频繁发。
self._lock = Lock()
def emit(self, record):
# from threading import Thread
with self._lock:
if time.time() - self._current_time > self._time_interval:
# very_nb_print(self._current_time)
self._current_time = time.time()
self.__emit(record)
# Thread(target=self.__emit, args=(record,)).start()
else:
very_nb_print(f' 此次离上次发送钉钉消息时间间隔不足 {self._time_interval} 秒,此次不发送这个钉钉内容: {record.msg} ')
def __emit(self, record):
message = self.format(record)
very_nb_print(message)
data = {"msgtype": "text", "text": {"content": message, "title": '这里的标题能起作用吗??'}}
try:
self._remove_urllib_hanlder() # 因为钉钉发送也是使用requests实现的,如果requests调用的urllib3命名空间也加上了钉钉日志,将会造成循环,程序卡住。一般情况是在根日志加了钉钉handler。
resp = requests.post(self._ding_talk_url, json=data, timeout=(5, 5))
very_nb_print(f'钉钉返回 : {resp.text}')
except requests.RequestException as e:
very_nb_print(f"发送消息给钉钉机器人失败 {e}")
def __repr__(self):
level = logging.getLevelName(self.level)
return '<%s (%s)>' % (self.__class__.__name__, level) + ' dingtalk token is ' + self.ding_talk_token
@classmethod
def _remove_urllib_hanlder(cls):
for name in ['root', 'urllib3', 'requests']:
cls.__remove_urllib_hanlder_by_name(name)
@classmethod
def __remove_urllib_hanlder_by_name(cls, logger_name):
with cls._lock_for_remove_handlers:
for index, hdlr in enumerate(logging.getLogger(logger_name).handlers):
if 'DingTalkHandler' in str(hdlr):
logging.getLogger(logger_name).handlers.pop(index)
| [
"[email protected]"
] | |
fea6f964339fb23f6f9a008d7407e6133306cc04 | 3740de0d6e43ea140fc09ab314e4c492603ba185 | /scripts/sources/S_EllipsoidTestWaitingTimesACDres.py | af9ab8366b01c94bbb1df7f27d3b20251e3c79dd | [
"MIT"
] | permissive | s0ap/arpmRes | 29c60c65fd3e11be1cc31d46494e5b3ebf6e05ab | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | refs/heads/master | 2022-02-16T05:01:22.118959 | 2019-08-20T16:45:02 | 2019-08-20T16:45:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,056 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_EllipsoidTestWaitingTimesACDres [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_EllipsoidTestWaitingTimesACDres&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=IIDHFACDdTres).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
from numpy import where, diff, linspace
from scipy.io import loadmat
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
plt.style.use('seaborn')
from CONFIG import GLOBAL_DB, TEMPORARY_DB
from ARPM_utils import save_plot, struct_to_dict, date_mtop
from autocorrelation import autocorrelation
from TradeQuoteProcessing import TradeQuoteProcessing
from InvarianceTestEllipsoid import InvarianceTestEllipsoid
# -
# ## Upload the database
try:
db = loadmat(os.path.join(GLOBAL_DB, 'db_US_10yr_Future_quotes_and_trades'), squeeze_me=True)
except FileNotFoundError:
db = loadmat(os.path.join(TEMPORARY_DB, 'db_US_10yr_Future_quotes_and_trades'), squeeze_me=True)
# ## Process the time series, refining the raw data coming from the database
# +
quotes = struct_to_dict(db['quotes'])
trades = struct_to_dict(db['trades'])
dates_quotes = quotes.time_names #
t = quotes.time # time vector of quotes
p_bid = quotes.bid # bid prices
p_ask = quotes.ask # ask prices
q_bid = quotes.bsiz # bid volumes
q_ask = quotes.asiz # ask volumes
dates_trades = trades.time_names #
t_k = trades.time # time vector of trades
p_last = trades.price # last transaction prices
delta_q = trades.siz # flow of traded contracts' volumes
delta_sgn = trades.aggress # trade sign flow
match = trades.mtch # match events: - the "1" value indicates the "start of a match event" while zeros indicates the "continuation of a match event"
# - the db is ordered such that the start of a match event is in the last column corresponding to that event
t, _, _, _, _, _, t_k, _, _, _, _, _ = TradeQuoteProcessing(t, dates_quotes, q_ask, p_ask, q_bid, p_bid, t_k, dates_trades,
p_last, delta_q, delta_sgn, match)
t = t.flatten()
t_k = t_k.flatten()
# ## Compute the gaps between subsequent events
k_0 = where(t_k >= t[0])[0][0] # index of the first trade within the time window
k_1 = where(t_k <= t[-1])[0][-1] # index of the last trade within the time window
ms = (date_mtop(t_k[k_1]) - date_mtop(t_k[k_0])).seconds * 1000 + (date_mtop(t_k[k_1]) - date_mtop(t_k[k_0])).microseconds / 1000
t_k = linspace(t_k[k_0],t_k[k_1], int(ms)) # time window's wall-clock-time vector expressed in milliseconds
delta_t_k = diff(t_k) # waiting times
# -
# ## ACD fit (Requires the external package ACD_Models_FEX)
# +
q = 1 # maximum lag for the duration
p = 1 # maximum lag for the volatility
stdMethod = 1
tmp_dt_n = [0, delta_t_k]
specOut = ACD_Fit(tmp_dt_n.T,'exp', q, p, stdMethod) # fitting
# estimated parameters
c = specOut.w
b = specOut.p
a = specOut.q
# estimated sigma_n
sigma_n = specOut.h.T
# residuals
ACD_epsi = delta_t_k / sigma_n[1:]
# -
# ## Compute autocorrelations at different lags
lag_ = 10
acf = autocorrelation(ACD_epsi, lag_)
# ## Plot the results of the IID test
# +
lag = 10 # lag to be printed
ell_scale = 1.6 # ellipsoid radius scale
fit = 2 # exponential fit
f = figure(figsize=(12,6))
InvarianceTestEllipsoid(delta_t_k, acf[0,1:], lag_, fit, ell_scale, [],
'Invariance test on the residuals of an ACD fit on arrival times', [-4, 19]);
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
| [
"[email protected]"
] | |
255997393c11703c927617a467958a7455c0b86b | c85b91bfdd7eb2fa5a7d6c6a9b722c8548c83105 | /vscode/extensions/ms-python.python-2020.3.69010/languageServer.0.5.31/Typeshed/third_party/2and3/Crypto/Hash/SHA256.pyi | 0469b7cb2b4e995bbd9b60c74f5c7d1c887412a6 | [
"MIT",
"Apache-2.0"
] | permissive | ryangniadek/.dotfiles | ddf52cece49c33664b56f01b17d476cf0f1fafb1 | be272baf6fb7d7cd4f4db1f6812b710196511ffe | refs/heads/master | 2021-01-14T07:43:12.516127 | 2020-03-22T20:27:22 | 2020-03-22T20:27:22 | 242,632,623 | 0 | 0 | MIT | 2020-09-12T17:28:01 | 2020-02-24T02:50:06 | Python | UTF-8 | Python | false | false | 399 | pyi | from typing import Any, Optional
from Crypto.Hash.hashalgo import HashAlgo
class SHA256Hash(HashAlgo):
oid = ... # type: Any
digest_size = ... # type: int
block_size = ... # type: int
def __init__(self, data: Optional[Any] = ...) -> None: ...
def new(self, data: Optional[Any] = ...): ...
def new(data: Optional[Any] = ...): ...
digest_size = ... # type: Any
| [
"[email protected]"
] | |
47bfc9032bf7353361b1818c44b2797b13363154 | 04d8f0b5a291ec6c3470f4498dd64ab9c1845f96 | /library/third-party/file_formats/pypdf2/pdf_file_merger/info.py | 50f3b155bd7f0b74923a56b302593acd731e7e98 | [] | no_license | volitilov/Python_learn | 8c0f54d89e0ead964320d17eeddeacd5b704b717 | f89e52655f83a9f1105689f0302ef5b0ee30a25c | refs/heads/master | 2022-01-10T13:39:59.237716 | 2019-07-17T11:39:10 | 2019-07-17T11:39:10 | 70,601,503 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,223 | py | from PyPDF2.PdfFileMerger import *
# Инициализирует объект PdfFileMerger. PdfFileMerger объединяет
# несколько PDF-файлов в один PDF-файл. Он может конкатенировать,
# нарезать, вставить или любую комбинацию из вышеперечисленного
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
PdfFileMerger(
strict=True
# Определяет, следует ли предупреждать пользователя обо всех
# проблемах, а также приводит к тому, что некоторые
# исправляемые проблемы являются фатальными. По умолчанию
# используется значение True.
)
# :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
addBookmark(title, pagenum, parent=None)
#
addMetadata(infos)
#
addNamedDestination(title, pagenum)
#
append(fileobj, bookmark=None, pages=None, import_bookmarks=True)
#
close()
#
merge(position, fileobj, bookmark=None, pages=None, import_bookmarks=True)
#
setPageLayout(layout)
#
setPageMode(mode)
#
write(fileobj)
#
| [
"[email protected]"
] | |
f6c63ede371e0271643d28518bd43b3d85636c61 | 0547d1826e99eedb959a3463520d73985a3b844e | /Data Scientist with Python Track Github/22-Statistical Thinking in Python (Part 2)/05-Putting it all together a case study/08-Beak length to depth ratio.py | ee9d31d8f9a75fbdc075f31112ca552fb4842913 | [] | no_license | abhaysinh/Data-Camp | 18031f8fd4ee199c2eff54a408c52da7bdd7ec0f | 782c712975e14e88da4f27505adf4e5f4b457cb1 | refs/heads/master | 2022-11-27T10:44:11.743038 | 2020-07-25T16:15:03 | 2020-07-25T16:15:03 | 282,444,344 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,654 | py | """
Beak length to depth ratio
The linear regressions showed interesting information about the beak geometry.
The slope was the same in 1975 and 2012, suggesting that for every millimeter gained in beak length,
the birds gained about half a millimeter in depth in both years. However, if we are interested in the
shape of the beak, we want to compare the ratio of beak length to beak depth. Let's make that comparison.
Remember, the data are stored in bd_1975, bd_2012, bl_1975, and bl_2012.
Instructions
100 XP
1 Make arrays of the beak length to depth ratio of each bird for 1975 and for 2012.
2 Compute the mean of the length to depth ratio for 1975 and for 2012.
3 Generate 10,000 bootstrap replicates each for the mean ratio for 1975 and 2012 using your draw_bs_reps() function.
4 Get a 99% bootstrap confidence interval for the length to depth ratio for 1975 and 2012.
5 Print the results.
"""
# Compute length-to-depth ratios
ratio_1975 = bl_1975 / bd_1975
ratio_2012 = bl_2012 / bd_2012
# Compute means
mean_ratio_1975 = np.mean(ratio_1975)
mean_ratio_2012 = np.mean(ratio_2012)
# Generate bootstrap replicates of the means
bs_replicates_1975 = draw_bs_reps(ratio_1975, np.mean, size=10000)
bs_replicates_2012 = draw_bs_reps(ratio_2012, np.mean, size=10000)
# Compute the 99% confidence intervals
conf_int_1975 = np.percentile(bs_replicates_1975, [0.5, 99.5])
conf_int_2012 = np.percentile(bs_replicates_2012, [0.5, 99.5])
# Print the results
print('1975: mean ratio =', mean_ratio_1975,
'conf int =', conf_int_1975)
print('2012: mean ratio =', mean_ratio_2012,
'conf int =', conf_int_2012)
| [
"[email protected]"
] | |
4de7ce33e34de4de06b6dfe1dfa17082a880d8c8 | f0987e17aea6668158cd334c1fbacfe6286d3c77 | /NITA/lib/jnpr/toby/services/cgnat.py | ef4120ad53cc1879e9dad7d03399915732161de7 | [] | no_license | fengyun4623/file | 00bf21f952ea3f95ffc9fe18448b244b26b7fadb | 3966c63d48557b0b94303896eed7a767593a4832 | refs/heads/master | 2023-04-02T05:01:25.066052 | 2020-07-29T16:15:31 | 2020-07-29T16:15:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110,053 | py | # pylint: disable=undefined-variable
# p-ylint: disable=invalid-name
"""Module contains methods for CGNAT"""
__author__ = ['Sumanth Inabathini']
__contact__ = '[email protected]'
__copyright__ = 'Juniper Networks Inc.'
__date__ = '2017'
import re
from jnpr.toby.utils import iputils
from jnpr.toby.services import utils
from jnpr.toby.services.services import services
class cgnat(services):
"""Class contains methods for CGNAT"""
def __init__(self, **kwargs):
"""Constructor method for cgnat class"""
super().__init__(**kwargs)
self.cmd_list = []
self.nat_rule = {}
self.nat_pool = {}
self.port_fwd = {}
self.ss_profile = {}
self.tg_sess = None
self.cmd = None
self.ptr = None
self.num_sess = None
self.pool_map = {}
#self.sset_map = {}
self.nat_pool_rule_map = {}
self.ss_map['nat_pool'] = {}
self.data['nat_pool'] = {}
self.data['sess_xtnsv'] = {}
for key in kwargs:
setattr(self, key, kwargs[key])
################################################################
# set methods
################################################################
def set_nat_rule(self, name='nat_rule', **kwargs):
"""Configure NAT rule based on parameters passed
Use the optional argument, 'count', to generate scaling config.
For example, This will create 10 NAT rules from nat_rule1 to nat_rule9::
set_nat_rule(name='nat_rule', count=10)
To delete a config, call set_nat_rule with action='delete'
For example, To delete translation type::
set_nat_rule(name='nat_rule', action='delete', term=1, trans_type='basic-nat44')
This API will execute
'delete services nat rule nat_rule1 term 1 then translated translation-type basic-nat44'
:param string name:
**OPTIONAL** NAT rule name. Default is 'nat_rule'
:param string action:
**OPTIONAL** Valid values are set,delete,activate,deactivate. Default value is 'set'
:param int count:
**OPTIONAL** Number of NAT Rules to be configured. Default is 1
:param int term:
**OPTIONAL** Term name. Default value is 0
:param int index:
**OPTIONAL** Rule starting index. Default value is 1
:param string dir:
**OPTIONAL** NAT direction. Defaul value is 'input'
:param int num_terms:
**OPTIONAL** Number of terms. Default is 1
:param bool pool_scale_term:
**OPTIONAL** Whether to scale poolnames per term. Default is False.
:param bool term_idx_reset:
**OPTIONAL** Whether to reset term index for every rule. Default is False.
:param string src_addr:
**OPTIONAL** From Source IP Address. Default is None
:param int src_addr_step:
**OPTIONAL** From Source IP Address step. Default is 1
:param int src_addr_nw_step:
**OPTIONAL** Number by which source network address will be incremented.
:param int src_addr_nw_step_cnt:
**OPTIONAL** Number of SrcIP network increments after which Src Network address
will be incremented by src_addr_nw_step.
:param string src_low:
**OPTIONAL** Lower limit of the Source IP Addr Range
:param string src_high:
**OPTIONAL** Higher limit of the Source IP Addr Range
:param string src_pfx:
**OPTIONAL** NAT prefix for source translation. Default is None.
:param int src_pfx_step:
**OPTIONAL** Number by which source prefix will be incremented by. Default is 1
:param string list src_pfx_list:
**OPTIONAL** Source Prefix List
:param string dst_addr:
**OPTIONAL** From Destination IP Address. Default is None
:param int dst_addr_step:
**OPTIONAL** From Destination IP Address step. Default is 1
:param string dst_low:
**OPTIONAL** Lower limit of the Destination IP Addr Range
:param string dst_high:
**OPTIONAL** Higher limit of the Destination IP Addr Range
:param int dst_port_low:
**OPTIONAL** Lower limit of the Destination Port Addr Range
:param int dst_port_high:
**OPTIONAL** Higher limit of the Destination Port Addr Range
:param int dst_pfx:
**OPTIONAL** NAT prefix for Destination translation
:param int dst_pfx_step:
**OPTIONAL** Number by which destination prefix will be incremented by. Default is 1
:param string list dst_pfx_list:
**OPTIONAL** Destination Prefix List
:param string clat_pfx:
**OPTIONAL** CLAT prefix
:param int clat_pfx_step:
**OPTIONAL** Number by which clat prefix will be incremented by. Default is 1
:param int rs_name:
**OPTIONAL** Rule-set name
:param int rs_scaling:
**OPTIONAL** To scale rule-set name or not
:param string list from_apps_list:
**OPTIONAL** List of applications to be matched
:param string list from_appsets_list:
**OPTIONAL** List of application sets to be matched
:param string src_pool:
**OPTIONAL** NAT Pool for Source translation
:param string dst_pool:
**OPTIONAL** NAT Pool for Destination translation
:param string dns_alg_pfx:
**OPTIONAL** DNS ALG Prefix
:param string trans_type:
**OPTIONAL** NAT Translation type
:param boot trans_eim:
**OPTIONAL** Flag to set mapping type as EIM.
:param bool trans_eif:
**OPTIONAL** Flat to set filtering type as EIF.
:param bool addr_pool:
**OPTIONAL** Flag to enable address pooling as paired.
:param list trans_eif_pfx_list:
**OPTIONAL** Source prefixes to match for EIF
:param string snat_map_refresh:
**OPTIONAL** Secure NAT Mapping refresh type.
Valid values are inbound, outbound, inbound-outbound
:param int snat_eif_flow_limit:
**OPTIONAL** Secure NAT Mapping EIF Flow limit.
Number of inbound flows to be allowed for a EIF mapping (0..65534)
:param string port_fwd_map:
**OPTIONAL** Port forward mapping
:param bool allow_overlap:
**OPTIONAL** Flag to allow overlapping nat pools
:param bool ams_warm_standby:
**OPTIONAL** Flag to allow NAT on AMS warm standby
:param bool then_syslog:
**OPTIONAL** Flag to enable syslog
:param bool no_trans:
**OPTIONAL** To enable no-translations
:param bool src_any_ucast:
**OPTIONAL** From Source address as any-unicast instead of IP address
:param bool dst_any_ucast:
**OPTIONAL** From Destination address as any-unicast instead of IP address
:return: True if config is successful else False
:rtype: bool
Example::
Python:
hCgn.set_nat_rule(name='rule', **kwargs)
To create rule with all default options:
hCgn.set_nat_rule()
To create rule2:
hCgn.set_nat_rule(name='rule', index=2, **kwargs)
Robot:
hCgn.Set NAT Rule name=rule
hCgn.Set NAT Rule
hCgn.Set NAT Rule name=rule index=2
"""
self.fn_checkin("Configuring NAT Rule")
this = utils.update_opts_from_args(kwargs,
defaults={
'count': 1, 'action': 'set', 'dir': 'input',
'term': 0, 'num_terms': 1, 'rs_name': None,
'pool_scale_term': False, 'term_idx_reset': False,
'src_addr': None, 'src_addr_step': 1,
'src_addr_nw_step': 1, 'src_addr_nw_step_cnt': None,
'src_pfx': None, 'src_pfx_step': 1, 'index': 1,
'dst_addr': None, 'dst_addr_step': 1,
'dst_pfx': None, 'dst_pfx_step': 1,
'clat_pfx': None, 'clat_pfx_step': 1,
})
src_addr = this['src_addr']
dst_addr = this['dst_addr']
src_pfx = this['src_pfx']
dst_pfx = this['dst_pfx']
clat_pfx = this['clat_pfx']
term = this['term']
rule_idx = this['index']
src_addr_cntr = 0
for _ in range(1, this['count']+1):
rule_tag = name + str(rule_idx)
if rule_tag not in self.nat_rule:
self.nat_rule[rule_tag] = {}
self.ptr = self.nat_rule[rule_tag]
#self._update(this)
#self.ptr = this
pool_tag = rule_idx
self.cmd = "{} services nat".format(this['action'])
if this['rs_name']:
self.cmd_add("rule-set {}{} rule {}".format(this['rs_name'], rule_idx, rule_tag))
self.cmd_add("rule {} match-direction {}".format(rule_tag, this['dir']))
self.cmd_add("allow-overlapping-nat-pools", 'allow_overlap', opt='flag')
self.cmd_add("allow-all-nat-on-ams-warm-standby", 'ams_warm_standby', opt='flag')
if this['term_idx_reset']:
term = int(this['term'])
for _ in range(0, this['num_terms']):
self.cmd = "{} services nat rule {} term {}".format(this['action'], rule_tag, term)
if term not in self.nat_rule[rule_tag]:
self.nat_rule[rule_tag][term] = {}
self.ptr = self.nat_rule[rule_tag][term]
self._update(this)
#self.ptr = this
self._cmd_name_tag = rule_tag
self._cmd_mapping = self.nat_pool_rule_map
self.cmd_add("then syslog", 'then_syslog', opt='flag')
self.cmd_add("then no-translation", 'no_trans', opt='flag')
self.cmd_add("from source-address any-unicast", 'src_any_ucast', opt='flag')
self.cmd_add("from destination-address any-unicast", 'dst_any_ucast', opt='flag')
if src_addr is not None:
self.cmd_add("from source-address {}".format(src_addr))
self.ptr['src_addr'] = src_addr
src_addr_cntr += 1
if this['src_addr_nw_step_cnt'] and \
src_addr_cntr % this['src_addr_nw_step_cnt'] == 0:
_src_addr_step_idx = (src_addr_cntr / this['src_addr_nw_step_cnt'])
_src_addr_step = (_src_addr_step_idx * this['src_addr_nw_step'])
src_addr = iputils.incr_ip_subnet(this['src_addr'], _src_addr_step)
else:
src_addr = iputils.incr_ip_subnet(src_addr, this['src_addr_step'])
if dst_addr is not None:
self.cmd_add("from destination-address {}".format(dst_addr))
self.ptr['dst_addr'] = dst_addr
dst_addr = iputils.incr_ip_subnet(dst_addr, this['dst_addr_step'])
if 'src_low' in this and 'src_high' in this:
_range_str = "low {} high {}".format(this['src_low'], this['src_high'])
self.cmd_add("from source-address-range {}".format(_range_str))
if 'dst_low' in this and 'dst_high' in this:
_range_str = "low {} high {}".format(this['dst_low'], this['dst_high'])
self.cmd_add("from destination-address-range {}".format(_range_str))
if 'dst_port_low' in this and 'dst_port_high' in this:
_dst_str = "low {} high {}".format(this['dst_port_low'], this['dst_port_high'])
self.cmd_add("from destination-port range {}".format(_dst_str))
self.cmd_add("from applications", 'from_apps_list')
self.cmd_add("from application-sets", 'from_appsets_list')
self.cmd_add("from source-prefix-list", 'src_pfx_list')
self.cmd_add("from destination-prefix-list", 'dst_pfx_list')
self.cmd_add("then translated source-pool", 'src_pool', tag=pool_tag,
mapping=True)
self.cmd_add("then translated destination-pool", 'dst_pool', tag=pool_tag,
mapping=True)
self.cmd_add("then translated dns-alg-prefix", 'dns_alg_pfx', tag=pool_tag)
pool_tag += 1
if src_pfx is not None:
self.cmd_add("then translated source-prefix {}".format(src_pfx))
self.ptr['src_pfx'] = src_pfx
src_pfx = iputils.incr_ip_subnet(src_pfx, this['src_pfx_step'])
if clat_pfx is not None:
self.cmd_add("then translated destination-prefix {}".format(clat_pfx))
self.ptr['clat_pfx'] = clat_pfx
clat_pfx = iputils.incr_ip_subnet(clat_pfx, this['clat_pfx_step'])
if dst_pfx is not None:
self.cmd_add("then translated destination-prefix {}".format(dst_pfx))
self.ptr['dst_pfx'] = dst_pfx
dst_pfx = iputils.incr_ip_subnet(dst_pfx, this['dst_pfx_step'])
self.cmd_add("then translated translation-type", 'trans_type')
self.cmd_add("then translated mapping-type endpoint-independent", 'trans_eim',
opt='flag')
self.cmd_add("then translated filtering-type endpoint-independent", 'trans_eif',
opt='flag')
self.cmd_add("then translated address-pooling paired", 'addr_pool', opt='flag')
self.cmd_add("then translated filtering-type endpoint-independent prefix-list",
'trans_eif_pfx_list')
self.cmd_add("then port-forwarding-mappings", 'port_fwd_map')
self.cmd_add("then translated secure-nat-mapping mapping-refresh",
'snat_map_refresh')
self.cmd_add("then translated secure-nat-mapping eif-flow-limit",
'snat_eif_flow_limit')
term += 1
rule_idx += 1
result = self.config()
return self.fn_checkout(result)
def set_nat_pool(self, name='nat_pool', **kwargs):
"""Configure NAT pool based on parameters passed
Use the optional argument, 'count', to generate scaling config.
For example, This will create 10 NAT pools from nat_pool1 to nat_pool9::
set_nat_pool('nat_pool', count=10)
:param string name:
**OPTIONAL** Name of NAT pool to be configured. Default is 'nat_pool'
:param string action:
**OPTIONAL** Valid values are set,delete,activate,deactivate. Default is 'set'
:param int count:
**OPTIONAL** Number of NAT Pools to be configured. Default is 1.
:param string addr:
**OPTIONAL** Pool address
:param string addr_low:
**OPTIONAL** Pool address range - low
:param string addr_high:
**OPTIONAL** Pool address range - high
:param int addr_range_step:
**OPTIONAL** Step by which pool address low and high ips need to be incremented
:param int port_low:
**OPTIONAL** Pool port range - low
:param int port_high:
**OPTIONAL** Pool port range - high
:param bool port_range_random:
**OPTIONAL** Flag for Pool port range random allocation
:param int port_limit:
**OPTIONAL** Port limit per address
:param int map_to:
**OPTIONAL** Default Mapping timeout (120s-86400s)
:param int app_to:
**OPTIONAL** APP Mapping timeout (120s-86400s)
:param int eim_to:
**OPTIONAL** EIM timeout (120s-86400s)
:param bool addr_alloc_rr:
**OPTIONAL** Flag to set Address-allocation to round-robin
:param bool port_pres_parity:
**OPTIONAL** Flag to set port preserve parity
(set services nat pool <pool> port preserve-parity)
:param bool port_pres_range:
**OPTIONAL** Flag to set port preserve range
(set services nat pool <pool> port preserve-range)
:param bool port_auto:
**OPTIONAL** Flag to set automatic port allocation
(set services nat pool <pool> port automatic)
:param bool port_auto_auto:
**OPTIONAL** Flag to set automatic port allocation
(set services nat pool <pool> port automatic auto)
:param bool port_auto_random:
**OPTIONAL** Flag to set automatic port random allocation
(set services nat pool <pool> port automatic random-allocation)
:param bool port_pba:
**OPTIONAL** Flag to set port to secured port block allocation
:param int port_pba_blk_to:
**OPTIONAL** Sets PBA active block timeout
:param int port_pba_blk_size:
**OPTIONAL** Sets PBA block size
:param int port_pba_max_blks:
**OPTIONAL** Sets PBA max blocks per address
:param bool port_detnat:
**OPTIONAL** Flag to enable port deterministic-PBA
:param string port_detnat_blk_size:
**OPTIONAL** Sets port deterministic-PBA block size
:param bool port_detnat_incl_bndry_addrs:
**OPTIONAL** Flag to include port deterministic-PBA boundary addresses
:param int snmp_trap_low:
**OPTIONAL** SNMP Trap Address port range - low
:param int snmp_trap_high:
**OPTIONAL** SNMP Trap Address port range - high
:return: True if config is successful else False
:rtype: bool
Example::
Python:
set_nat_pool(name='nat_pool', **kwargs)
Robot:
Set NAT Pool name=nat_pool
"""
self.fn_checkin("Configuring NAT pool")
this = utils.update_opts_from_args(kwargs,
defaults={
'count': 1, 'action': 'set',
'addr': None, 'addr_range_step': 1,
'addr_low': None, 'addr_high': None,
'port_low': None, 'port_high': None
})
(addr, addr_low, addr_high) = (this['addr'], this['addr_low'], this['addr_high'])
for iter_ii in range(1, this['count'] + 1):
# rule_tag = name + iter_ii
#tag = iter_ii
pool_name = name + str(iter_ii)
if pool_name not in self.nat_pool:
self.nat_pool[pool_name] = {}
self.ptr = self.nat_pool[pool_name]
self._update(this)
self.cmd = "{} services nat pool {}".format(this['action'], pool_name)
if addr is not None:
self.cmd_add("address {}".format(addr))
self.ptr['addr'] = addr
addr = iputils.incr_ip_subnet(this['addr'], iter_ii)
if addr_low is not None and addr_high is not None:
self.cmd_add("address-range low {} high {}".format(addr_low, addr_high))
self.ptr['addr_low'] = addr_low
self.ptr['addr_high'] = addr_high
addr_low = iputils.incr_ip(this['addr_low'], iter_ii * this['addr_range_step'])
addr_high = iputils.incr_ip(this['addr_high'], iter_ii * this['addr_range_step'])
if this['port_low'] is not None and this['port_high'] is not None:
if 'port_range_random' in this and this['port_range_random']:
_range_str = "low {} high {}".format(this['port_low'], this['port_high'])
self.cmd_add("port range {} random-allocation".format(_range_str))
else:
self.cmd_add("port range low {} high {}".format(this['port_low'],
this['port_high']))
self.cmd_add("limit-ports-per-address", 'port_limit')
self.cmd_add("mapping-timeout", 'map_to')
self.cmd_add("app-mapping-timeout", 'app_to')
self.cmd_add("ei-mapping-timeout", 'eim_to')
self.cmd_add("address-allocation round-robin", 'addr_alloc_rr', opt='flag')
self.cmd_add("port preserve-parity", 'port_pres_parity', opt='flag')
self.cmd_add("port preserve-range", 'port_pres_range', opt='flag')
self.cmd_add("port automatic", 'port_auto', opt='flag')
self.cmd_add("port automatic auto", 'port_auto_auto', opt='flag')
self.cmd_add("port automatic random-allocation", 'port_auto_random', opt='flag')
if 'snmp_trap_low' in this and 'snmp_trap_high' in this:
_snmp_str = "low {} high {}".format(this['snmp_trap_low'], this['snmp_trap_high'])
self.cmd_add("snmp-trap-thresholds address-port {}".format(_snmp_str))
#self.cmd_add("port secured-port-block-allocation", 'port_pba', opt='flag')
#_cmd = self.cmd = "{} services nat pool {}".format(this['action'], pool_name)
_cmd = self.cmd
self.cmd = _cmd + " port secured-port-block-allocation"
self.cmd_add("", 'port_pba', opt='flag')
self.cmd_add("active-block-timeout", 'port_pba_blk_to')
self.cmd_add("block-size", 'port_pba_blk_size')
self.cmd_add("max-blocks-per-address", 'port_pba_max_blks')
#self.cmd_add("port deterministic-port-block-allocation", 'port_detnat', opt='flag')
self.cmd = _cmd + " port deterministic-port-block-allocation"
self.cmd_add("", 'port_detnat', opt='flag')
self.cmd_add("block-size", 'port_detnat_blk_size')
self.cmd_add("include-boundary-addresses", 'port_detnat_incl_bndry_addrs', opt='flag')
# when the action is not 'set' and there are no other command
# options to be executed
if len(self.cmd_list) == 0 and this['action'] != 'set':
self.cmd_add("")
result = self.config()
self.log('INFO', "NAT Pool: {}".format(self.nat_pool))
return self.fn_checkout(result)
def set_port_forward_rule(self, name='port_fwd_rule', **kwargs):
"""Configure Port Forward Rule based on the parameters passed.
:param string name:
**OPTIONAL** Name of Port Forward Rule to be configured. Default is 'port_fwd_rule'
:param string action:
**OPTIONAL** Valid values are set,delete,activate,deactivate. Default value is 'set'
:param int dst_port:
**OPTIONAL** Destined port
:param int trans_port:
**OPTIONAL** Translated port
:return: True if config is successful else False
:rtype: bool
Example::
Python:
hCgn.set_port_forward(name='port_fwd_rule', **kwargs)
hCgn.set_port_forward()
Robot:
hCgn.Set Port Forward name=port_fwd_rule dst_port=1234 trans_port=23
"""
self.fn_checkin("Configuring Port Forward Rule")
this = utils.update_opts_from_args(kwargs, defaults={'action': 'set'})
if 'dst_port' not in this or 'trans_port' not in this:
raise MissingMandatoryArgument('dst_port/trans_port')
if name not in self.port_fwd:
self.port_fwd[name] = {}
self.ptr = self.port_fwd[name]
self._update(this)
self.cmd = "{} services nat port-forwarding {}".format(this['action'], name)
self.cmd_add("destined-port {} translated-port {}".format(this['dst_port'],
this['trans_port']))
result = self.config()
return self.fn_checkout(result)
################################################################
# Get/Verify methods
################################################################
def verify(self, **kwargs):
"""Wrapper for minimum verifications to be done for CGNAT
This will call services.verify() to do all basic Services verifications required.
:return: True if successful else False
:rtype: bool
Example::
Python:
hCgn.verify()
Robot:
hCgn.Verify
"""
self.fn_checkin("Verifying CGNAT")
#self._get_tg_sess(**kwargs)
#self._get_tg_port_and_config_mapping(**kwargs)
self._get_tg_port_and_config_mapping(**kwargs)
super().verify(**kwargs)
# self.verify_sess_count(**kwargs)
# self.tg_sess = kwargs.pop('tg_sess')
self.verify_nat_pool_detail(**kwargs)
self.verify_sessions_extensive(**kwargs)
# self.verify_nat_eim(tg_sess=self.tg_sess, **kwargs)
# self.verify_nat_mappings_detail(tg_sess=self.tg_sess, **kwargs)
# self.verify_nat_app_mappings(tg_sess=self.tg_sess, **kwargs)
# self.verify_nat_mappings_summary(tg_sess=self.tg_sess, **kwargs)
# self.verify_nat_statistics(tg_sess=self.tg_sess, **kwargs)
# self.verify_nat_syslogs(tg_sess=self.tg_sess, **kwargs)
return self.fn_checkout()
def get_nat_pool_detail(self, name=None):
"""Fetch NAT Pool details as dictionary
:param string name:
**OPTIONAL** Name of the NAT pool. If name is passed, details of that NAT Pool
are fetched. Else, details for all the pools configured (saved in the object)
will be fetched. Default is None
:return: True if command output is processed successfully
:rtype: bool
Example::
Python:
hCgn.get_nat_pool_detail()
hCgn.get_nat_pool_detail(name='pool1')
Robot:
hCgn.Get NAT Pool Detail
hCgn.Get NAT Pool Detail name=pool1
"""
self.fn_checkin("Fetching NAT Pool detail")
if 'nat_pool' not in self.data:
self.data['nat_pool'] = {}
# If name is specified, get details for that pool else for all the pools configured
# Need to take care of scaling scenarios. Don't need to get details of
# all the pools
names = [name] if name is not None else self.nat_pool.keys()
_xpath = 'service-nat-pool-information/sfw-per-service-set-nat-pool'
for pool_name in names:
cmd = 'show services nat pool {} detail'.format(pool_name)
entry = self.get_xml_output(cmd, xpath=_xpath)
if pool_name not in self.data['nat_pool']:
self.data['nat_pool'][pool_name] = {}
self.data['nat_pool'][pool_name]['spic'] = str(entry['interface-name'])
self.data['nat_pool'][pool_name]['sset'] = str(entry['service-set-name'])
ptr = self.data['nat_pool'][pool_name]
pool = entry['service-nat-pool']
ptr['addr_range'] = str(pool['pool-address-range-list']['pool-address-range'])
ptr['addr_range_low'], ptr['addr_range_high'] = ptr['addr_range'].split('-')
ptr['trans_type'] = str(pool['translation-type'])
utils.update_data_from_output(ptr, pool, {
'pool-addresses-in-use': 'addrs_in_use',
'pool-out-of-address-errors': 'out_of_addr_errs',
'pool-port-range': 'port_range',
'pool-ports-in-use': 'ports_in_use',
'pool-out-of-port-errors': 'out_of_port_errs',
'pool-max-ports-in-use': 'max_ports_in_use',
'max-port-blocks-used': 'max_blks_used',
'port-blocks-in-use': 'ports_blks_in_use',
'port-block-allocation-errors': 'port_blk_alloc_errs',
'port-block-mem-alloc-failure-errors': 'port_blk_mem_alloc_fail_errs',
'pool-parity-port-errors': 'parity_port_errs',
'pool-preserve-range-errors': 'preserve_range_errs',
'pool-configured-port-range': 'configured_port_range',
'pool-preserve-range-enabled': 'preserve_range_enabled',
'pool-app-port-errors': 'app_errs',
'pool-app-exceed-port-limit-errors': 'app_xcd_port_lmt_errs',
'port-block-type': 'blk_type',
'port-blocks-limit-exceeded-errors': 'port_blk_limit_exceed_errs',
'detnat-subscriber-exceeded-port-limits': 'detnat_subs_exceed_port_limits',
'pool-users': 'pool_users',
'pool-mem-alloc-errors': 'pool_mem_alloc_err',
'eif-inbound-session-count': 'eif_in_sess_cnt',
'eif-inbound-session-limit-exceed-drop': 'eif_in_sess_lmt_xcd_drop',
})
self.log('INFO', "NAT pool detail: {}".format(self.data['nat_pool']))
return self.fn_checkout(True)
def verify_nat_pool_detail(self, name=None, **kwargs):
"""Verify NAT pool detail
:param string name:
**OPTIONAL** Name of NAT pool. If name is passed, details for that NAT Pool
are verified, else details for all pools configured (saved in object)
will be verified. Default is None
:return: True if successful else False
:rtype: bool
Example::
Python:
hCgn.verify_nat_pool_detail(name='nat_pool1')
hCgn.verify_nat_pool_detail()
Robot:
hCgn.Verify NAT Pool Detail name=nat_pool1
hCgn.Verify NAT Pool Detail
"""
self.fn_checkin("Verifying NAT pool detail")
self.get_nat_pool_detail(name)
self._get_tg_port_and_config_mapping(**kwargs)
self._get_ss_from_pool()
result = True
pool_names = [name] if name is not None else self.data['nat_pool'].keys()
self.log('INFO', "Verifying details for pools: {}".format(pool_names))
for pool_name in pool_names:
# for tg_if in self.tg_cfg_map:
#_cfg_map = self.tg_cfg_map[tg_if]
self.log('INFO', "Verifying details for pool, {}".format(pool_name))
#pool_data = self.data['nat_pool'][_cfg_map['nat_pool']]
pool_data = self.data['nat_pool'][pool_name]
#_pool_sess_cnt = self.tg_sess_cnt['nat_pool'][pool_name]['tot_sess']
#sset = self.pool_map[pool_name]['sset']
#spic = self.pool_map[pool_name]['spic']
_pool_sess_cnt = self.pool_map[pool_name]['total_sess']
if re.search(r'twice.napt', pool_data['trans_type'], re.IGNORECASE):
exp_data = {}
elif re.search(r'deterministic.napt', pool_data['trans_type'], re.IGNORECASE):
exp_data = {
'out_of_port_errs': 0,
#'ports_in_use': self.pool_map[pool_name]['tot_sess'],
'ports_in_use': _pool_sess_cnt, 'max_ports_in_use': _pool_sess_cnt,
'parity_port_errs': 0, 'preserve_range_errs': 0, 'app_errs': 0,
'app_xcd_port_lmt_errs': 0, 'detnat_subs_xcd_port_lmts': 0,
'eif_in_sess_lmt_xcd_drop': 0, 'port_blk_alloc_errs': 0,
'pool_mem_alloc_err': 0, 'out_of_port_errs': 0,
}
elif re.search(r'napt|nat64', pool_data['trans_type'], re.IGNORECASE):
exp_data = {'out_of_port_errs': 0,
#'ports_in_use': self.pool_map[pool_name]['tot_sess'],
'ports_in_use': _pool_sess_cnt, 'max_ports_in_use': _pool_sess_cnt}
elif re.search(r'dynamic', pool_data['trans_type'], re.IGNORECASE):
exp_data = {'out_of_addr_errs': 0, 'addrs_in_use': _pool_sess_cnt}
if 'addr' in self.nat_pool[pool_name]:
pool_addr = self.nat_pool[pool_name]['addr']
addr_range = iputils.get_network_ip_range(pool_addr)
exp_data['addr_range_low'], exp_data['addr_range_high'] = addr_range.split('-')
if pool_name in self.pool_map:
exp_data['spic'] = self.pool_map[pool_name]['spic']
exp_data['sset'] = self.pool_map[pool_name]['sset']
for key in kwargs:
if 'tg_sess' in key:
continue
exp_data[key] = kwargs[key]
self.log('INFO', "Verifying expected({}) and actual({}) data".format(exp_data,
pool_data))
#result &= utils.cmp_dicts(exp_data, pool_data)
if utils.cmp_dicts(exp_data, pool_data):
self.log('INFO', "Verification details for pool, {}, PASSED".format(pool_name))
else:
self.log('INFO', "Verification for pool, {}, FAILED".format(pool_name))
result = False
return self.fn_checkout(result)
def get_nat_eim_mappings(self, **kwargs):
"""Return NAT EIM mappings as dictionary
NAT eim mappings output is parsed and dictionary is returned.
An exception will be raised if there's no output.
:param string private_ip:
**OPTIONAL** Private IP to be used for filtering the mappings output
:param string public_ip:
**OPTIONAL** Public IP to be used for filtering the mappings output
:return: Dictionary containing the NAT EIM mappings data
:rtype: dict
Example::
Python:
hCgn.get_nat_eim_mappings()
Robot:
hCgn.Get NAT EIM Mappings
"""
self.fn_checkin("Retrieving NAT EIM mappings")
cmd = 'show services nat mappings endpoint-independent'
if 'private_ip' in kwargs and kwargs['private_ip'] is not None:
cmd += ' private {}'.format(kwargs['private_ip'])
if 'public_ip' in kwargs and kwargs['public_ip'] is not None:
cmd += ' public {}'.format(kwargs['public_ip'])
output = self.dh.cli(command=cmd).response()
if len(output.splitlines()) < 2:
self.fn_checkout(False, err_msg="No valid output found")
mapping = {}
(pcp_ip, b4_ip) = (None, None)
(spic, sset, nat_pool, int_ip, int_port) = (None, None, None, None, None)
self.data['eim_maps'] = data = self.dd()
for line in output.splitlines():
if len(line) <= 0:
continue
match = re.search(r'Interface:\s*(.*), Service set:\s*(.*)', line, re.IGNORECASE)
if match:
# Do we have any data to be stored? Store it first
if 'nat_ip' in mapping and mapping['nat_ip'] is not None:
# Do we have PCP Mapping, then PCP IP is also a key
if pcp_ip is not None:
_map_ptr = data[spic][sset][nat_pool][pcp_ip][int_ip][int_port] = {}
elif b4_ip is not None:
_map_ptr = data[spic][sset][nat_pool][b4_ip][int_ip][int_port] = {}
else:
_map_ptr = data[spic][sset][nat_pool][int_ip][int_port] = {}
for key in mapping:
_map_ptr[key] = mapping[key]
mapping = {}
pcp_ip = b4_ip = None
spic, sset = match.group(1), match.group(2)
self.log('INFO', 'Service pics:{} Service set:{}'.format(spic, sset))
continue
match = re.search(r'NAT pool:\s*(.*)', line, re.IGNORECASE)
if match:
nat_pool = match.group(1)
continue
match = re.search(r'PCP Client\s*:\s*(' + utils.get_regex_ip() +
r')\s*PCP lifetime\s*:\s*(\d+)', line, re.IGNORECASE)
if match:
# Do we have any data to be stored (Non-PCP output)
if 'nat_ip' in mapping and mapping['nat_ip'] is not None:
if pcp_ip is not None: # Do we have PCP Mapping, then PCP IP is also a key
_map_ptr = data[spic][sset][nat_pool][pcp_ip][int_ip][int_port] = {}
else:
_map_ptr = data[spic][sset][nat_pool][int_ip][int_port] = {}
for key in mapping:
_map_ptr[key] = mapping[key]
mapping = {}
pcp_ip = None
pcp_ip = match.group(1)
if iputils.is_ip_ipv6(pcp_ip):
pcp_ip = iputils.normalize_ipv6(pcp_ip)
mapping['pcp_lifetime'] = match.group(1)
continue
match = re.search(r'Mapping\s*:\s*(' + utils.get_regex_ip() +
r')\s*:\s*(\d+)\s*-->\s*(' + utils.get_regex_ip() +
r')\s*:\s*(\d+)', line, re.IGNORECASE)
if match:
# Do we have any data to be stored(Non-PCP output)? Store it
# first
if 'nat_ip' in mapping and mapping['nat_ip'] is not None:
# Do we have SW flow, then B4 IP is also a key
if b4_ip is not None:
_map_str = data[spic][sset][nat_pool][b4_ip][int_ip][int_port] = {}
else:
_map_str = data[spic][sset][nat_pool][int_ip][int_port] = {}
for key in mapping:
_map_str[key] = mapping[key]
mapping = {}
b4_ip = None
int_ip, int_port = match.group(1), match.group(2)
if iputils.is_ip_ipv6(int_ip):
int_ip = iputils.normalize_ipv6(int_ip)
mapping['nat_ip'] = match.group(3)
mapping['nat_port'] = match.group(4)
continue
match = re.search(r'Session Count\s*:\s*(\d+)', line, re.IGNORECASE)
if match:
mapping['sess_cnt'] = match.group(1)
continue
match = re.search(r'Mapping State\s*:\s+((\w+)\s+\((\d+)s\)|(\w+))', line,
re.IGNORECASE)
if match:
if match.group(3) is not None:
mapping['state'] = match.group(2).lower()
mapping['state_to'] = match.group(3)
else:
mapping['state'] = match.group(1).lower()
match = re.search(r'B4 Address\s+:\s+(' + utils.get_regex_ip() + ')', line,
re.IGNORECASE)
if match:
mapping['b4_ip'] = b4_ip = iputils.normalize_ipv6(match.group(1))
continue
if pcp_ip is not None:
_map_str = data[spic][sset][nat_pool][pcp_ip][int_ip][int_port] = {}
elif b4_ip is not None:
_map_str = data[spic][sset][nat_pool][b4_ip][int_ip][int_port] = {}
else:
_map_str = data[spic][sset][nat_pool][int_ip][int_port] = {}
for key in mapping:
_map_str[key] = mapping[key]
self.log('INFO', 'NAT EIM mappings dump : {}'.format(data))
self.fn_checkout()
return data
def verify_nat_eim_mappings(self, **kwargs):
"""Verify NAT EIM mappings
Fetches NAT EIM data from the output by calling get_nat_eim_mappings.
This data is verified against the data fetched from configuration and traffic generator.
Number of mappings to be verified can be limited by 'limit' or 'limit_perc'.
Random mappings, to be verified, are picked from sessions to be sent by TG.
:param int limit:
**OPTIONAL** Number of mappings to be verified.
:param int limit_perc:
**OPTIONAL** Percentage number of mappings to be verified. Default is 1
:return: True on successful verification else raises an exception
:rtype: True or exception
Example::
Python:
hCgn.verify_nat_eim_mappings()
Robot:
hCgn.Verify NAT EIM Mappings
"""
self.fn_checkin("Verifying NAT mappings EIM")
result = True
# Fetch NAT EIM output as dictionary
act_data = self.get_nat_eim_mappings()
self._get_tg_port_and_config_mapping(**kwargs)
for tg_if in self.tg_cfg_map:
_cfg_map = self.tg_cfg_map[tg_if]
# Iterate over list of random mappings indices
for sess_idx in _cfg_map['rand_sess_idx_list']:
# We need to verify if theres a mapping for this session on router
src_ip = self.tg_sess[tg_if]['sess_list'][sess_idx]['src_ip']
src_port = str(self.tg_sess[tg_if]['sess_list'][sess_idx]['src_port'])
flow = self._get_src_ip_port_flow_from_data(src_ip, src_port, _cfg_map, act_data)
if flow is None:
continue
if 'nat_ip' in flow and flow['nat_ip'] is None:
continue
result &= self._is_nat_ip_in_pool(flow, _cfg_map)
result &= self._is_nat_port_in_pool(flow, _cfg_map)
result &= utils.cmp_dicts(exp_data={'state': 'active'}, act_data=flow)
return self.fn_checkout(result)
def get_nat_app_mappings(self, **kwargs):
"""Return NAT APP mappings as dictionary
:param string private_ip:
**OPTIONAL** Private IP to be used for filtering the mappings output
:param string public_ip:
**OPTIONAL** Public IP to be used for filtering the mappings output
:return: Dictionary containing the NAT APP mappings data
:rtype: dict
Example::
Python:
hCgn.get_nat_app_mappings()
Robot:
hCgn.Get NAT APP Mappings
"""
self.fn_checkin("Retrieving NAT APP mappings")
cmd = 'show services nat mappings address-pooling-paired'
if 'private_ip' in kwargs and kwargs['private_ip'] is not None:
cmd += ' private {}'.format(kwargs['private_ip'])
if 'public_ip' in kwargs and kwargs['public_ip'] is not None:
cmd += ' public {}'.format(kwargs['public_ip'])
output = self.dh.cli(command=cmd).response()
if len(output.splitlines()) < 2:
return self.fn_checkout(False, "No valid output found")
mapping = {}
(spic, sset, nat_pool, int_ip, b4_ip) = (None, None, None, None, None)
self.data['app_maps'] = data = self.dd()
for line in output.splitlines():
if len(line) <= 0:
continue
match = re.search(r'Interface:\s*(.*), Service set:\s*(.*)', line, re.IGNORECASE)
if match:
if 'nat_ip' in mapping and mapping['nat_ip'] is not None:
# Do we have SW flow, then B4 IP is also a key
if b4_ip is not None:
_map_ptr = data[spic][sset][nat_pool][b4_ip][int_ip] = {}
else:
_map_ptr = data[spic][sset][nat_pool][int_ip] = {}
for key in mapping:
_map_ptr[key] = mapping[key]
mapping = {}
b4_ip = None
spic, sset = match.group(1), match.group(2)
continue
match = re.search(r'NAT pool:\s*(.*)', line, re.IGNORECASE)
if match:
if 'nat_ip' in mapping and mapping['nat_ip'] is not None:
# Do we have SW flow, then B4 IP is also a key
if b4_ip is not None:
_map_ptr = data[spic][sset][nat_pool][b4_ip][int_ip] = {}
else:
_map_ptr = data[spic][sset][nat_pool][int_ip] = {}
for key in mapping:
_map_ptr[key] = mapping[key]
mapping = {}
b4_ip = None
nat_pool = match.group(1)
continue
match = re.search(r'Mapping\s*:\s*(' + utils.get_regex_ip() + r')\s*-->\s*(' +
utils.get_regex_ip() + r')\s*', line, re.IGNORECASE)
if match:
if 'nat_ip' in mapping and mapping['nat_ip'] is not None:
# Do we have SW flow, then B4 IP is also a key
if b4_ip is not None:
_map_ptr = data[spic][sset][nat_pool][b4_ip][int_ip] = {}
else:
_map_ptr = data[spic][sset][nat_pool][int_ip] = {}
for key in mapping:
_map_ptr[key] = mapping[key]
mapping = {}
b4_ip = None
int_ip = iputils.normalize_ipv6(match.group(1)) \
if iputils.is_ip_ipv6(match.group(1)) else match.group(1)
mapping['nat_ip'] = match.group(2)
continue
match = re.search(r'Ports In Use\s*:\s*(\d+)', line, re.IGNORECASE)
if match:
mapping['ports_in_use'] = match.group(1)
continue
match = re.search(r'Session Count\s*:\s*(\d+)', line, re.IGNORECASE)
if match:
mapping['sess_cnt'] = match.group(1)
continue
match = re.search(r'Mapping State\s*:\s+((\w+)\s+\((\d+)s\)|(\w+))', line,
re.IGNORECASE)
if match:
if match.group(3) is not None:
mapping['state'] = match.group(2).lower()
mapping['state_to'] = match.group(3)
else:
mapping['state'] = match.group(1).lower()
continue
match = re.search(r'B4 Address\s+:\s+(' + utils.get_regex_ip() + r')', line,
re.IGNORECASE)
if match:
mapping['b4_ip'] = b4_ip = iputils.normalize_ipv6(match.group(1))
continue
if spic is not None:
if b4_ip is not None:
_map_ptr = data[spic][sset][nat_pool][b4_ip][int_ip] = {}
else:
_map_ptr = data[spic][sset][nat_pool][int_ip] = {}
for key in mapping:
_map_ptr[key] = mapping[key]
self.log('INFO', "NAT APP mappings dump : {}".format(data))
self.fn_checkout()
return data
def verify_nat_app_mappings(self, **kwargs):
"""Verify NAT APP mappings
Fetches NAT APP data from the output by calling get_nat_app_mappings.
This data is verified against the data fetched from configuration and traffic generator.
Number of mappings to be verified can be limited by 'limit' or 'limit_perc'.
Random mappings, to be verified, are picked from sessions to be sent by TG.
:param int limit:
**OPTIONAL** Number of mappings to be verified.
:param int limit_perc:
**OPTIONAL** Percentage number of mappings to be verified. Default is 1
:return: True on successful verification else raises an exception
:rtype: True or exception
Example::
Python:
hCgn.verify_nat_app_mappings()
Robot:
hCgn.Verify NAT APP Mappings
"""
self.fn_checkin("Verifying NAT APP mappings")
result = True
# Fetch NAT APP output as dictionary
act_data = self.get_nat_app_mappings()
self._get_tg_port_and_config_mapping(**kwargs)
for tg_if in self.tg_cfg_map:
_cfg_map = self.tg_cfg_map[tg_if]
# Iterate over list of random mappings indices
for sess_idx in _cfg_map['rand_sess_idx_list']:
# We need to verify if theres a mapping for this session on the
# router
src_ip = self.tg_sess[tg_if]['sess_list'][sess_idx]['src_ip']
flow = self._get_src_ip_flow_from_data(src_ip, _cfg_map, act_data)
if flow is None:
continue
exp_app_data = {'state': 'active'}
exp_app_data['ports_in_use'] = exp_app_data['sess_cnt'] = _cfg_map['tot_sess']
result &= utils.cmp_dicts(exp_data=exp_app_data, act_data=flow)
result &= self._is_nat_ip_in_pool(flow, _cfg_map)
return self.fn_checkout(result)
# def get_nat_mappings_detail(self, **kwargs):
def get_nat_mappings_detail(self, pool_name=None):
"""Return NAT mappings detail as dictionary
:param string pool_name:
**OPTIONAL** NAT Pool name to be used for filtering the output
:return: Dictionary containing the NAT mappings detail data
:rtype: dict
Example::
Python:
hCgn.get_nat_mappings_detail()
Robot:
hCgn.Get NAT Mappings Detail
"""
self.fn_checkin("Retrieving NAT mappings detail")
cmd = 'show services nat mappings detail'
if pool_name is not None:
cmd += ' {}'.format(pool_name)
output = self.dh.cli(command=cmd).response().splitlines()
if len(output) < 2:
return self.fn_checkout(False, "No valid output found")
data = self.dd()
count = -1
for line in output:
count += 1
if len(line) <= 0:
continue
match = re.search(r'Interface:\s*(.*), Service set:\s*(\w+)', line, re.IGNORECASE)
if match:
spic, sset = match.group(1), match.group(2)
continue
match = re.search(r'NAT pool:\s*(\w+)', line, re.IGNORECASE)
if match:
nat_pool = match.group(1)
continue
match = re.search(r'Mapping\s*:\s*(' + utils.get_regex_ip() +
r')\s*:\s*(\d+)\s*-->\s*(' + utils.get_regex_ip() +
r')\s*:\s*(\d+)', line, re.IGNORECASE)
if match:
int_ip = iputils.normalize_ipv6(match.group(1)) \
if iputils.is_ip_ipv6(match.group(1)) else match.group(1)
int_port = match.group(2)
data[spic][sset][nat_pool][int_ip][int_port]['eim_nat_ip'] = match.group(3)
data[spic][sset][nat_pool][int_ip][int_port]['eim_nat_port'] = match.group(4)
match = re.search(r'Session Count\s*:\s*(\d+)', output[count + 1], re.IGNORECASE)
if match:
data[spic][sset][nat_pool][int_ip][int_port]['eim_sess_cnt'] = match.group(1)
match = re.search(r'Mapping State\s*:\s+((\w+)\s+\((\d+)s\)|(\w+)\s*)',
output[count + 2], re.IGNORECASE)
if match:
if match.group(3) is not None:
data[spic][sset][nat_pool][int_ip][int_port]['eim_state'] = \
match.group(2).lower()
data[spic][sset][nat_pool][int_ip][int_port]['eim_state_to'] = \
match.group(3)
else:
data[spic][sset][nat_pool][int_ip][int_port]['eim_state'] = \
match.group(1).lower()
match = re.search(r'Ports In Use\s*:\s*(\d+)', line, re.IGNORECASE)
if match:
data[spic][sset][nat_pool][int_ip]['app_ports_in_use'] = match.group(1)
continue
match = re.search(r'Session Count\s*:\s*(\d+)', line, re.IGNORECASE)
if match:
data[spic][sset][nat_pool][int_ip]['app_sess_cnt'] = match.group(1)
continue
match = re.search(r'Mapping State\s*:\s+((\w+)\s+\((\d+)s\)|(\w+)\s*)', line,
re.IGNORECASE)
if match:
if match.group(3) is not None:
data[spic][sset][nat_pool][int_ip]['app_state'] = match.group(2).lower()
data[spic][sset][nat_pool][int_ip]['app_state_to'] = match.group(3)
else:
data[spic][sset][nat_pool][int_ip]['app_state'] = match.group(1).lower()
continue
self.log('INFO', "NAT mappings detail data dump: {}".format(data))
self.fn_checkout()
return data
def verify_nat_mappings_detail(self, **kwargs):
"""Verify NAT Mappings detail
Fetches NAT mappings data from the output by calling get_nat_mappings_detail
This data is verified against the data fetched from configuration and traffic generator.
Number of mappings to be verified can be limited by 'limit' or 'limit_perc'.
Random mappings, to be verified, are picked from sessions to be sent by TG.
:param int limit:
**OPTIONAL** Number of mappings to be verified.
:param int limit_perc:
**OPTIONAL** Percentage number of mappings to be verified. Default is 1
:return: True on successful verification else raises an exception
:rtype: True or exception
Example::
Python:
hCgn.verify_nat_mappings_detail()
Robot:
hCgn.Verify Mappings Detail
"""
self.fn_checkin("Verifying NAT mappings detail")
result = True
# Fetch Mappings details
act_data = self.get_nat_mappings_detail()
self._get_tg_port_and_config_mapping(**kwargs)
for tg_if in self.tg_cfg_map:
_cfg_map = self.tg_cfg_map[tg_if]
for sess_idx in _cfg_map['rand_sess_idx_list']:
# We need to verify if theres a mapping for this session on the
# router
src_ip = self.tg_sess[tg_if]['sess_list'][sess_idx]['src_ip']
src_port = str(self.tg_sess[tg_if]['sess_list'][sess_idx]['src_port'])
# Verify APP
flow = self._get_src_ip_flow_from_data(src_ip, _cfg_map, act_data)
exp_app_data = {'app_state': 'active'}
exp_app_data['app_ports_in_use'] = _cfg_map['tot_sess']
exp_app_data['app_sess_cnt'] = _cfg_map['tot_sess']
result &= utils.cmp_dicts(exp_data=exp_app_data, act_data=flow)
flow = self._get_src_ip_port_flow_from_data(src_ip, src_port, _cfg_map, act_data)
if flow is None:
continue
# Verify EIM
act_eim_data = {}
for key in flow:
if 'eim_' in key:
act_eim_data[key] = flow[key]
result &= self._is_nat_ip_in_pool(flow, _cfg_map)
if 'nat_port' in _cfg_map:
result &= self._is_nat_port_in_pool(flow, _cfg_map)
return self.fn_checkout(result)
def get_nat_mappings_summary(self):
"""Return NAT mappings summary data as dictionary
This will parse the mappings summary output and builds a dictionary.
:return: dictionary on successful parsing or raises an excepion if theres no output
:rtype: dict or exception
Example::
Python:
hCgn.get_nat_mappings_summary()
Robot:
hCgn.Get NAT Mappings Summary
"""
self.fn_checkin("Retrieving NAT mappings summary")
output = self.dh.cli(command='show services nat mappings summary').response().splitlines()
data = self.dd()
spic = None
for line in output:
match = re.search(r'Service Interface:\s*(\w*.\w*\/\w*\/\w*)\s*', line)
if match:
spic = match.group(1)
else:
match = re.search(r'Service Interface:\s*(\w*)\s*', line)
if match:
spic = match.group(1)
match = re.search(r'Total number of address mappings:\s*(\d+)', line)
if match:
data[spic]['addr_map'] = int(match.group(1))
match = re.search(r'Total number of endpoint independent port mappings:\s*(\d+)', line)
if match:
data[spic]['eim_map'] = int(match.group(1))
match = re.search(r'Total number of endpoint independent filters:\s*(\d+)', line)
if match:
data[spic]['eif_map'] = int(match.group(1))
self.log('DEBUG', 'NAT mappings summary : {}'.format(data))
self.fn_checkout()
return data
def verify_nat_mappings_summary(self, **kwargs):
"""Verify NAT mappings summary
This will call get_nat_mappings_summary for fetching the mappings summary output.
This data will be verified against the data from TG Sessions and configuration
data that is already saved in the object.
:return: Dictionary containing the NAT mappings summary data
:rtype: dict
Example::
Python:
hCgn.verify_nat_mappings_summary()
Robot:
hCgn.Verify Mappings Summary
"""
self.fn_checkin("Verifying NAT mappings summary")
act_data = self.get_nat_mappings_summary()
result = True
self._get_tg_port_and_config_mapping(**kwargs)
for tg_if in self.tg_cfg_map:
_cfg_map = self.tg_cfg_map[tg_if]
rule_cfg = self.nat_rule[_cfg_map['nat_rule']]
trans_type = rule_cfg['trans_type']
exp_data = {}
if trans_type.startswith('dnat'):
exp_data['addr_map'] = len(self.tg_sess[tg_if]['dst_ips_list'])
else:
exp_data['addr_map'] = len(self.tg_sess[tg_if]['src_ips_list'])
if 'trans_eim' in rule_cfg and rule_cfg['trans_eim'] is not None:
exp_data['eim_map'] = _cfg_map['tot_sess']
if 'trans_eif' in rule_cfg and rule_cfg['trans_eif'] is not None:
exp_data['eif_map'] = _cfg_map['tot_sess']
result &= utils.cmp_dicts(exp_data=exp_data, act_data=act_data)
return self.fn_checkout(result)
def get_nat_statistics(self, spic=None, timeout=300):
"""Fetch NAT statistics as dictionary
:param string spic:
**REQUIRED** Service PIC interface
:param int timeout:
**OPTIONAL** Cli command timeout. Default is 300
:return: Dictionary containing the NAT statistics data
:rtype: dict
Example::
Python:
hCgn.get_nat_statistics()
Robot:
hCgn.Get NAT Statistics
"""
self.fn_checkin("Retrieving NAT statistics")
if spic is None:
raise MissingMandatoryArgument('spic')
cmd = 'show services nat statistics interface ' + spic
entry = self.get_xml_output(cmd=cmd, xpath='service-nat-statistics-information',
want_list=False, timeout=timeout)
data = self.dd()
data['spic'] = entry.pop('interface-name', None)
for key in entry:
if 'query-unsupported-msg' in key:
continue
tmp_key = key
tmp_key = tmp_key.replace('-', '_')
data[tmp_key] = entry[key]
self.log('DEBUG', 'NAT statistics: {}'.format(data))
self.fn_checkout()
return data
def verify_nat_statistics(self, **kwargs):
"""Verify NAT statistics
:return: Dictionary containing the NAT statistics data
:rtype: dict
Example::
Python:
hCgn.verify_nat_statistics()
Robot:
hCgn.Verify NAT Statistics
"""
self.fn_checkin("Verifying NAT statistics")
result = True
self._get_tg_port_and_config_mapping(**kwargs)
for tg_if in self.tg_cfg_map:
_cfg_map = self.tg_cfg_map[tg_if]
act_data = self.get_nat_statistics(_cfg_map['spic'], **kwargs)
exp_data = {}
exp_data['nat_total_pkts_translated'] = self.tg_sess[tg_if]['total']
exp_data['nat_map_allocation_successes'] = self.tg_sess[tg_if]['total']
result &= utils.cmp_dicts(exp_data=exp_data, act_data=act_data)
return self.fn_checkout(result)
def get_sessions_extensive(self, **kwargs):
"""Fetch session extensive as dictionary
:return: Dictionary containing the session extensive data
:rtype: dict
Example::
Python:
hCgn.get_sessions_extensive()
Robot:
hCgn.Get Sessions Extensive
"""
self.fn_checkin("Retrieving session extensive")
is_nat = None
cmd = 'show services sessions extensive'
if 'ss' in kwargs:
cmd += ' service-set {}'.format(kwargs['ss'])
if 'sp' in kwargs:
cmd += ' interface {}'.format(kwargs['sp'])
if 'app_proto' in kwargs:
cmd += ' application-protocol {}'.format(kwargs['app_proto'])
if 'src_pfx' in kwargs:
cmd += ' source-prefix {}'.format(kwargs['src_pfx'])
if 'dst_pfx' in kwargs:
cmd += ' destination-prefix {}'.format(kwargs['dst_pfx'])
if 'src_port' in kwargs:
cmd += ' source-port {}'.format(kwargs['src_port'])
if 'limit' in kwargs:
cmd += ' limit {}'.format(kwargs['limit'])
output = self.dh.cli(command=cmd).response()
# data = {}
self.data['sess_xtnsv'] = {}
# conv = {}
for line in output.splitlines():
match = re.search(r'(^.*-\d+\/\d+\/\d+)', line)
if match:
spic = match.group(1)
if spic not in self.data['sess_xtnsv']:
self.data['sess_xtnsv'][spic] = {}
continue
match = re.search(
r'Service Set:\s*(.*),\s*Session:\s*(\d+),\s*ALG:\s*(.*),\s*Flags:\s*(.*),\s*'
r'IP Action:\s*(.*),\s*Offload:\s*(.*),\s*Asymmetric:\s*(.*)', line, re.IGNORECASE)
if match:
(sset, sess_id, alg, flags, action, offload, assym) = match.groups()
if sset not in self.data['sess_xtnsv'][spic]:
self.data['sess_xtnsv'][spic][sset] = {}
try:
src_ip
except NameError:
src_ip = src_port = None
# if src_ip:
# src_ip = src_port = None
is_nat = False
continue
match = re.search(r'NAT Action:\s*Translation Type\s*-\s*(.*)', line)
if match:
is_nat = True
trans_type = match.group(1)
continue
match = re.search(r'^\s*NAT (\w+)\s+(' + utils.get_regex_ip()
+ r'):(\d+)\s*->\s*(' +
utils.get_regex_ip()
+ r'):\s*(\d+)', line, re.IGNORECASE)
if match:
nat_type = match.group(1)
(src_ip, src_port, nat_ip, nat_port) = match.groups()[1:]
if src_ip not in self.data['sess_xtnsv'][spic][sset]:
self.data['sess_xtnsv'][spic][sset][src_ip] = {}
if src_port not in self.data['sess_xtnsv'][spic][sset][src_ip]:
self.data['sess_xtnsv'][spic][sset][src_ip][src_port] = {}
if re.search(r'destination', nat_type, re.IGNORECASE) and \
re.search(r'nat64', trans_type, re.IGNORECASE):
# Ignore the destination flow for NAT64
continue
self.data['sess_xtnsv'][spic][sset][src_ip][src_port]['nat_ip'] = nat_ip
self.data['sess_xtnsv'][spic][sset][src_ip][src_port]['nat_port'] = nat_port
conv = self.data['sess_xtnsv'][spic][sset][src_ip][src_port]
conv['trans_type'] = trans_type.lower()
conv['nat_type'] = nat_type.lower()
conv['sess_id'] = sess_id
conv['alg'] = alg
conv['flags'] = flags
conv['ip_action'] = action
conv['offload'] = offload
conv['assym'] = assym
continue
match = re.search(r'^\s*NAT (\w+)\s+(' + utils.get_regex_ip() +
r')\s*->\s*(' + utils.get_regex_ip() + ')', line, re.IGNORECASE)
if match:
nat_type = match.group(1)
(src_ip, nat_ip) = match.groups()[1:]
if src_ip not in self.data['sess_xtnsv'][spic][sset]:
self.data['sess_xtnsv'][spic][sset][src_ip] = {}
if re.search(r'destination', nat_type, re.IGNORECASE) and \
re.search(r'nat64', trans_type, re.IGNORECASE):
# Ignore the destination flow for NAT64
continue
self.data['sess_xtnsv'][spic][sset][src_ip]['nat_ip'] = nat_ip
conv = self.data['sess_xtnsv'][spic][sset][src_ip]
trans_type = re.sub(r'\s', '-', trans_type).lower()
conv['trans_type'] = trans_type.lower()
conv['nat_type'] = nat_type.lower()
conv['sess_id'] = sess_id
conv['alg'] = alg
conv['flags'] = flags
conv['ip_action'] = action
conv['offload'] = offload
conv['assym'] = assym
continue
match = re.search(r'(\w+)\s+(' + utils.get_regex_ip() + r'):(\d+)\s*->\s*(' +
utils.get_regex_ip() +
r'):(\d+)\s* (\w+) \s*([I|O])\s*(\d+)', line, re.IGNORECASE)
if match:
flow_type = 'iflow' if match.group(7) == 'I' else 'rflow'
if not is_nat:
if not src_ip:
(src_ip, src_port) = match.groups()[2:4]
# Non-NAT (SFW)
if src_ip not in self.data['sess_xtnsv'][spic][sset]:
self.data['sess_xtnsv'][spic][sset][src_ip] = {}
if src_port not in self.data['sess_xtnsv'][spic][sset][src_ip]:
self.data['sess_xtnsv'][spic][sset][src_ip][src_port] = {}
self.data['sess_xtnsv'][spic][sset][src_ip][src_port][
flow_type + '_proto'] = match.group(1).lower()
conv = self.data['sess_xtnsv'][spic][sset][src_ip][src_port]
conv['sess_id'] = sess_id
conv['alg'] = alg
conv['flags'] = flags
conv['ip_action'] = action
conv['offload'] = offload
conv['assym'] = assym
conv[flow_type + '_proto'] = match.group(1).lower()
conv[flow_type + '_src_ip'] = match.group(2)
conv[flow_type + '_src_port'] = match.group(3)
conv[flow_type + '_dst_ip'] = match.group(4)
conv[flow_type + '_dst_port'] = match.group(5)
conv[flow_type + '_state'] = match.group(6)
conv[flow_type + '_dir'] = match.group(7)
conv[flow_type + '_frm_cnt'] = match.group(8)
continue
match = re.search(r'(\w+)\s+(' + utils.get_regex_ip() + r')\s*->\s*(' +
utils.get_regex_ip() +
r')\s* (\w+) \s*([I|O])\s*(\d+)', line, re.IGNORECASE)
if match:
flow_type = 'iflow' if match.group(5) == 'I' else 'rflow'
if not is_nat:
if not src_ip:
src_ip = match.groups()[2]
# # Non-NAT (SFW)
if src_ip not in self.data['sess_xtnsv'][spic][sset]:
self.data['sess_xtnsv'][spic][sset][src_ip] = {}
self.data['sess_xtnsv'][spic][sset][src_ip][
flow_type + '_proto'] = match.group(1).lower()
conv = self.data['sess_xtnsv'][spic][sset][src_ip]
conv['sess_id'] = sess_id
conv['alg'] = alg
conv['flags'] = flags
conv['ip_action'] = action
conv['offload'] = offload
conv['assym'] = assym
conv[flow_type + '_proto'] = match.group(1).lower()
conv[flow_type + '_src_ip'] = match.group(2)
conv[flow_type + '_dst_ip'] = match.group(3)
conv[flow_type + '_state'] = match.group(4)
conv[flow_type + '_dir'] = match.group(5)
conv[flow_type + '_frm_cnt'] = match.group(6)
continue
match = re.search(r'Byte count:\s*(\d+)', line, re.IGNORECASE)
if match:
conv[flow_type + '_byte_count'] = match.group(1)
continue
match = re.search(r'Flow role:\s*(.*),\s*Timeout:\s*(\d+)', line, re.IGNORECASE)
if match:
conv[flow_type + '_role'] = match.group(1)
conv[flow_type + '_timeout'] = match.group(2)
continue
self.log('INFO', "session extensive dump:{}".format(self.data['sess_xtnsv']))
self.fn_checkout()
return self.data['sess_xtnsv']
def verify_sessions_extensive(self, **kwargs):
"""Verify sessions extensive
:return: Dictionary containing the sessions extensive data
:rtype: dict
Example::
Python:
hCgn.verify_sessions_extensive()
Robot:
hCgn.Verify Sessions Extensive
"""
self.fn_checkin("Verifying session extensive")
self.get_sessions_extensive(**kwargs)
self.data = self.data['sess_xtnsv']
result = True
self._get_tg_port_and_config_mapping(**kwargs)
for tg_if in self.tg_cfg_map:
_cfg_map = self.tg_cfg_map[tg_if]
for sess_idx in _cfg_map['rand_sess_idx_list']:
# We need to verify if theres a mapping for this session on the
# router
src_ip = self.tg_sess[tg_if]['sess_list'][sess_idx]['src_ip']
src_port = str(self.tg_sess[tg_if]['sess_list'][sess_idx]['src_port'])
spic = _cfg_map['spic']
sset = _cfg_map['sset']
try:
flow = self.data[spic][sset][src_ip][src_port]
except (TypeError, KeyError):
self.log('INFO', "Error while retrieving flow")
continue
if flow is None:
self.log('INFO', "Flow is none")
continue
self.log('INFO', "flow is {}".format(flow))
result &= self._is_nat_ip_in_pool(flow, _cfg_map)
result &= self._is_nat_port_in_pool(flow, _cfg_map)
ss_name = _cfg_map['sset']
if 'sl_class_list' in self.sset[ss_name]:
if self.sset[ss_name]['sl_class_list'] is not None:
exp_data = {}
exp_data['src_ip'] = src_ip
exp_data['dst_ip'] = self.tg_sess[tg_if]['sess_list'][sess_idx]['dst_ip']
exp_data['proto'] = self.tg_sess[tg_if]['sess_list'][sess_idx][
'protocol'].lower()
if 'stateful-firewall-logs' in self.sset[ss_name]['sl_class_list']:
msg = 'JSERVICES_SFW_RULE_ACCEPT'
if 'nat-logs' in self.sset[ss_name]['sl_class_list']:
msg = 'JSERVICES_NAT_RULE_MATCH'
if 'session-logs' in self.sset[ss_name]['sl_class_list']:
exp_data['nat_ip'] = flow['nat_ip']
exp_data['nat_port'] = flow['nat_port']
msg = 'JSERVICES_SESSION_OPEN'
result &= self._verify_syslogs(msg=msg, src_port=src_port, xtnsv=exp_data)
return self.fn_checkout(result)
def get_nat_pool_ips(self):
"""Return Pool IPs configured
:return: list of Pool IPs configured
:rtype: list
Example::
Python:
hCgn.get_nat_pool_ips()
Robot:
hCgn.Get NAT Pool IPs
"""
self.fn_checkin("Retrieving configured pool ips")
#pools = self.nat_pool.keys()
pool_ips = []
for pool in self.nat_pool:
pool_ips.append(self.nat_pool[pool]['addr'])
self.fn_checkout()
return pool_ips
def get_detnat_port_block(self, internal_ip=None):
"""Return Deterministic NAT Port Block output as dictionary
:param string internal_ip:
**OPTIONAL** Internal IP
:param string err_lvl:
**OPTIONAL** Error Level. Default is 'ERROR'
:returns: Dictionary
:rtype: dict
Example::
Python:
cgn.get_detnat_port_blocks()
Robot:
Get DetNAT Port Blocks
"""
self.fn_checkin("Retrieving DetNAT Port Blocks output")
cmd = "show services nat deterministic-nat nat-port-block {}".format(internal_ip)
_xpath = 'service-detnat-information'
#_xpath = 'service-detnat-information'
#dinfo = self.get_xml_output(cmd, xpath=_xpath)
detnat_output = self.get_xml_output(cmd, xpath=_xpath)
data = self.data['detnat_port_blk'] = self.dd()
#int_ip = dinfo['detnat-internal-host']
#denat_output = dinfo['detnat-internal-host']
# for entry in detnat_output:
int_ip = detnat_output['detnat-internal-host']
if iputils.is_ip_ipv6(int_ip):
int_ip = iputils.normalize_ipv6(int_ip)
data[int_ip]['sset'] = detnat_output['service-set-name']
data[int_ip]['spic'] = detnat_output['interface-name']
data[int_ip]['nat_pool'] = detnat_output['pool-name']
data[int_ip]['nat_ip'] = detnat_output['detnat-nat-ip']
data[int_ip]['nat_port_low'] = detnat_output['detnat-nat-port-low']
data[int_ip]['nat_port_high'] = detnat_output['detnat-nat-port-high']
self.log('INFO', "Det NAT info: {}".format(data))
self.fn_checkout()
return data
def verify_detnat_port_block(self, **kwargs):
"""Verify DetNAT Port blocks
:paran string internal_ip:
**MANDATORY** Internal IP
:param string err_lvl:
**OPTIONAL** Error Level. Default is 'ERROR'
:returns: True or False
:rtype: bool
Example::
Python:
cgn.verify_detnat_port_blocks()
Robot:
Verify DetNAT Port Blocks
"""
act_data = self.get_detnat_port_block(**kwargs)
internal_ip = kwargs.pop('internal_ip')
# Compare the expected values against the actual values
return utils.cmp_dicts(exp_data=kwargs, act_data=act_data[internal_ip])
################################################################
# local methods
################################################################
def _get_syslogs(self, ptrn, **kwargs):
"""Parse NAT/SFW related logs from 'show log messages' and return as dictionary """
self.fn_checkin("Retrieving NAT log messages")
if 'src_ip' in kwargs and kwargs['src_ip'] is not None:
ptrn += " | match " + str(kwargs['src_ip'])
if 'src_port' in kwargs and kwargs['src_port'] is not None:
ptrn += ":" + str(kwargs['src_port'])
cmd = 'show log messages | match ' + ptrn
data = self.dd()
output = self.dh.cli(command=cmd).response()
pr_ptrn1 = re.compile(r'proto (\d+)\s*\((.+)\)')
pr_ptrn = re.compile(r'proto (\d+)\s*\((\w+)\)')
ip_ptrn = re.compile(r'('+utils.get_regex_ip() +
r')[:|\/](\d+)\s*->\s*('+utils.get_regex_ip()+r')[:|\/](\d+)')
ptrn2 = re.compile(r'('+utils.get_regex_ip()+r'):(\d+)')
ptrn3 = re.compile(r'(MSVCS_LOG_.*|JSERVICES]_LOG_.*)')
regex_ipaddr = utils.get_regex_ip()
pr_ptrn1 = r'proto (\d+)\s*\((.+)\)'
pr_ptrn = r'proto (\d+)\s*\((\w+)\)'
ip_ptrn = r'('+regex_ipaddr + r')[:|\/](\d+)\s*->\s*('+regex_ipaddr+r')[:|\/](\d+)'
ptrn2 = r'('+regex_ipaddr+r'):(\d+)'
ptrn3 = r'(MSVCS_LOG_.*|JSERVICES]_LOG_.*)'
reg_ex_if = utils.get_regex_if()
for line in output.splitlines():
# ALG64 Control session
match = re.search(r'' + ptrn3 + r':\s+App:(.*),\s+('+reg_ex_if+r')\s+' + ptrn2 +
r'\s*\[' + ptrn2 + r'\]\s+->\s+' + ptrn2 +
r'\s*\[('+regex_ipaddr+r')\]\s*\((.*)\)', line)
if match:
ptr = data[match.group(1)][match.group(4)][match.group(5)]
ptr['app'] = match.group(2)
ptr['intf'] = match.group(3)
ptr['nat_ip'] = match.group(6)
ptr['nat_port'] = match.group(7)
ptr['dst_ip'] = match.group(8)
ptr['dst_port'] = match.group(9)
ptr['srvr_ip'] = match.group(10)
ptr['proto'] = match.group(11).lower()
# ALG64 Data session
match = re.search(r'' + ptrn3 + r':\s+App:(.*),\s+('+reg_ex_if+r')\s+' + ptrn2 +
r'\s*\[('+regex_ipaddr+r')\]\s+->\s+' + ptrn2 +
r'\s*\[' + ptrn2 + r'\]\s*\((.*)\)', line)
if match:
ptr = data[match.group(1)][match.group(4)][match.group(5)]
ptr['app'] = match.group(2)
ptr['intf'] = match.group(3)
ptr['nat_ip'] = match.group(6)
# Nat port is same as the source port
ptr['nat_port'] = match.group(5)
ptr['dst_ip'] = match.group(7)
ptr['dst_port'] = match.group(8)
ptr['srvr_ip'] = match.group(9)
ptr['srvr_port'] = match.group(1)
ptr['proto'] = match.group(11).lower()
# new message
match = re.search(r'' + ptrn3 + r':\s+App:(.*),\s+('+reg_ex_if+r')\s+' + ptrn2 +
r'\s*\[' + ptrn2 + r'\]\s+->\s+' + ptrn2 + r'\s*\((.*)\)', line)
if match:
ptr = data[match.group(1)][match.group(4)][match.group(5)]
ptr['app'] = match.group(2)
ptr['intf'] = match.group(3)
ptr['nat_ip'] = match.group(6)
ptr['nat_port'] = match.group(7)
ptr['dst_ip'] = match.group(8)
ptr['dst_port'] = match.group(9)
ptr['proto'] = match.group(10).lower()
# Basic NAT
match = re.search(r'' + ptrn3 + r':\s+App:(.*),\s+('+reg_ex_if+r')\s+' + ptrn2 +
r'\s*\[('+regex_ipaddr+r')\]\s+->\s+' + ptrn2 + r'\s*\((.*)\)', line)
if match:
ptr = data[match.group(1)][match.group(4)][match.group(5)]
ptr['app'] = match.group(2)
ptr['intf'] = match.group(3)
ptr['nat_ip'] = match.group(6)
ptr['dst_ip'] = match.group(7)
ptr['dst_port'] = match.group(8)
ptr['proto'] = match.group(9).lower()
# new message Without NAT
match = re.search(r'' + ptrn3 + r':\s+App:(.*),\s+('+reg_ex_if+r')\s+' +
ptrn2 + r'\s*->\s+' + ptrn2 + r'\s*\((.*)\)', line)
if match:
ptr = data[match.group(1)][match.group(4)][match.group(5)]
ptr['app'] = match.group(2)
ptr['intf'] = match.group(3)
ptr['dst_ip'] = match.group(6)
ptr['dst_port'] = match.group(7)
ptr['proto'] = match.group(8).lower()
# Without NAT
match = re.search(r'' + ptrn3 + r':\s*' + ptrn2 +
r'\s*->\s*' + ptrn2 + r'\s*\((.*)\)', line)
if match:
ptr = data[match.group(1)][match.group(2)][match.group(3)]
ptr['dst_ip'] = match.group(4)
ptr['dst_port'] = match.group(5)
ptr['proto'] = match.group(6).lower()
# NAT44
match = re.search(r'' + ptrn3 + r':\s*' + ptrn2 +
r'\s*\[('+regex_ipaddr+r')\]\s*->\s*' + ptrn2 + r'\s*\((.*)\)', line)
if match:
ptr = data[match.group(1)][match.group(2)][match.group(3)]
ptr['nat_ip'] = match.group(4)
ptr['dst_ip'] = match.group(5)
ptr['dst_port'] = match.group(6)
ptr['proto'] = match.group(7).lower()
# Source NAT
match = re.search(r'' + ptrn3 + r':\s*' + ptrn2 +
r'\s*\[' + ptrn2 + r'\]\s*->\s*' + ptrn2 + r'\s*\((.*)\)', line)
if match:
ptr = data[match.group(1)][match.group(2)][match.group(3)]
ptr['nat_ip'] = match.group(4)
ptr['nat_port'] = match.group(5)
ptr['dst_ip'] = match.group(6)
ptr['dst_port'] = match.group(7)
ptr['proto'] = match.group(8).lower()
# Destination NAT
match = re.search(r'' + ptrn3 + r':\s*' + ptrn2 + r'\s*->\s*' + ptrn2 +
r'\s*\[('+regex_ipaddr+r')\]\s*\((.*)\)', line)
if match:
ptr = data[match.group(1)][match.group(2)][match.group(3)]
ptr['dst_ip'] = match.group(4)
ptr['dst_port'] = match.group(5)
ptr['nat_ip'] = match.group(6)
ptr['proto'] = match.group(7).lower()
match = re.search(r'(JSERVICES.*):\s*' + pr_ptrn + r'\s*application: (.*),\s*' +
ip_ptrn +
r',\s*Match (.*) rule-set:\s*(.*), rule:\s*(.*), term:\s*(\d+)', line)
if match:
ptr = data[match.group(1)][match.group(5)][match.group(6)]
ptr['proto_num'] = match.group(2)
ptr['proto'] = match.group(3).lower()
ptr['app'] = match.group(4)
ptr['dst_ip'] = match.group(7)
ptr['dst_port'] = match.group(8)
ptr['match'] = match.group(9)
ptr['ruleset_name'] = match.group(10)
ptr['rule_name'] = match.group(11)
ptr['term'] = match.group(12)
match = re.search(r'(JSERVICES.*):\s*' +
pr_ptrn + r'\s*app: (.*),\s*('+reg_ex_if+r')\s*' +
ip_ptrn +
r',\s*Match (.*) rule-set\s*(.*) rule\s*(.*) term\s*(\d+)', line)
if match:
ptr = data[match.group(1)][match.group(6)][match.group(7)]
ptr['proto_num'] = match.group(2)
ptr['proto'] = match.group(3).lower()
ptr['app'] = match.group(4)
ptr['intf'] = match.group(5)
ptr['dst_ip'] = match.group(8)
ptr['dst_port'] = match.group(9)
ptr['match'] = match.group(10)
ptr['ruleset_name'] = match.group(11)
ptr['rule_name'] = match.group(12)
ptr['term'] = match.group(13)
# ICMP64 session where source is not predictable, hence not using port
# as a KEY
match = re.search(r'(JSERVICES.*):\s+App:(.*),\s+('+reg_ex_if+r')\s+' + ptrn2 +
r'\s*\[' + ptrn2 + r'\]\s+->\s+' + ptrn2 +
r'\s*\[('+regex_ipaddr+r')\]\s*\((.*)\)', line)
if match:
ptr = data[match.group(1)][match.group(4)]
ptr['app'] = match.group(2)
ptr['intf'] = match.group(3)
ptr['nat_ip'] = match.group(6)
ptr['nat_port'] = match.group(7)
ptr['dst_ip'] = match.group(8)
ptr['dst_port'] = match.group(9)
ptr['srvr_ip'] = match.group(10)
ptr['proto'] = match.group(11).lower()
# For ICMP case where source port is not predictable
match = re.search(r'(JSERVICES.*):\s+App:(.*),\s+('+reg_ex_if+r')\s+' + ptrn2 +
r'\s*\[' + ptrn2 + r'\]\s+->\s+' + ptrn2 + r'\s*\((.*)\)', line)
if match:
ptr = data[match.group(1)][match.group(4)]
ptr['app'] = match.group(2)
ptr['intf'] = match.group(3)
ptr['nat_ip'] = match.group(6)
ptr['nat_port'] = match.group(7)
ptr['dst_ip'] = match.group(8)
ptr['dst_port'] = match.group(9)
ptr['proto'] = match.group(10).lower()
# new message for SFW44/SFW66
match = re.search(r'(JSERVICES.*):\s+App:(.*),\s+('+reg_ex_if+r')\s+' + ptrn2 +
r'\s*->\s*' + ptrn2 + r'\s*\((.*)\)', line)
if match:
ptr = data[match.group(1)][match.group(4)]
ptr['app'] = match.group(2)
ptr['intf'] = match.group(3)
ptr['dst_ip'] = match.group(6)
ptr['dst_port'] = match.group(7)
ptr['proto'] = match.group(8).lower()
# new message for SFW44/SFW66, SESSION_OPEN LOGS
match = re.search(r'(JSERVICES.*):\s+App:(.*),\s+('+reg_ex_if+r')\s+' + ptrn2 +
r'\s*->\s*' +
ptrn2 + r'\s*\((.*)\)', line)
if match:
ptr = data[match.group(1)][match.group(4)][match.group(5)]
ptr['app'] = match.group(2)
ptr['intf'] = match.group(3)
ptr['dst_ip'] = match.group(6)
ptr['dst_port'] = match.group(7)
ptr['proto'] = match.group(8).lower()
# NEW for SFW
match = re.search(r'(JSERVICES.*):\s*' + pr_ptrn1 +
r'\s*app: (.*),\s*('+reg_ex_if+r')\s*' +
ip_ptrn +
r',\s*Match (.*) rule-set\s*(.*) rule\s*(.*) term\s*(\d+)', line)
if match:
ptr = data[match.group(1)][match.group(6)]
ptr['proto_num'] = match.group(2)
ptr['proto'] = match.group(3).lower()
ptr['app'] = match.group(4)
ptr['intf'] = match.group(5)
ptr['dst_ip'] = match.group(8)
ptr['dst_port'] = match.group(9)
ptr['match'] = match.group(10)
ptr['ruleset_name'] = match.group(11)
ptr['rule_name'] = match.group(12)
ptr['term'] = match.group(13)
# NEW for NAT RULE MATCH 14.2 latest
match = re.search(r'(JSERVICES.*):\s*' + pr_ptrn1 +
r'\s*application: (.*),\s*('+reg_ex_if+'):' + ip_ptrn +
r',\s*Match (.*) rule-set:\s*(.*), rule:\s*(.*), term:\s*(.*)', line)
if match:
ptr = data[match.group(1)][match.group(6)]
ptr['proto_num'] = match.group(2)
ptr['proto'] = match.group(3).lower()
ptr['app'] = match.group(4)
ptr['intf'] = match.group(5)
ptr['dst_ip'] = match.group(8)
ptr['dst_port'] = match.group(9)
ptr['match'] = match.group(10)
ptr['ruleset_name'] = match.group(11)
ptr['rule_name'] = match.group(12)
ptr['term'] = match.group(13)
# NEW for SFW
match = re.search(r'(JSERVICES.*):\s*' + pr_ptrn1 +
r'\s*application: (.*),\s*' + ip_ptrn +
r',\s*Match (.*) rule-set:\s*(.*), rule:\s*(.*), term:\s*(\d+)', line)
if match:
ptr = data[match.group(1)][match.group(5)]
ptr['proto_num'] = match.group(2)
ptr['proto'] = match.group(3).lower()
ptr['app'] = match.group(4)
ptr['dst_ip'] = match.group(7)
ptr['dst_port'] = match.group(8)
ptr['match'] = match.group(9)
ptr['ruleset_name'] = match.group(10)
ptr['rule_name'] = match.group(11)
ptr['term'] = match.group(12)
# new for session log
match = re.search(r'' + ptrn3 + r':\s+application:(.*),\s+('+reg_ex_if+r')\s+' +
ptrn2 +
r'\s*\[' + ptrn2 + r'\]\s+->\s+\[('+regex_ipaddr+r')\]\s+' +
ptrn2 + r'\s*\((.*)\)', line)
if match:
ptr = data[match.group(1)][match.group(4)][match.group(5)]
ptr['app'] = match.group(2)
ptr['intf'] = match.group(3)
ptr['nat_ip'] = match.group(6)
ptr['nat_port'] = match.group(7)
ptr['dst_ip'] = match.group(9)
ptr['dst_port'] = match.group(10)
ptr['srvr_ip'] = match.group(8)
ptr['proto'] = match.group(11).lower()
# new for session log
match = re.search(r'(JSERVICES.*):\s*application:(.*),\s+('+reg_ex_if+r')\s*' + ptrn2 +
r'\s*\[' + ptrn2 + r'\]\s*->\s*' + ptrn2 + r'\s*\((.*)\)', line)
if match:
ptr = data[match.group(1)][match.group(4)][match.group(5)]
ptr['app'] = match.group(2)
ptr['intf'] = match.group(3)
ptr['nat_ip'] = match.group(6)
ptr['nat_port'] = match.group(7)
ptr['dst_ip'] = match.group(8)
ptr['dst_port'] = match.group(9)
ptr['srvr_ip'] = match.group(4)
ptr['proto'] = match.group(10).lower()
# new for nat rule
match = re.search(r'(JSERVICES.*):\s*' + pr_ptrn +
r'\s*application: (.*),\s*('+reg_ex_if+'):' +
ip_ptrn +
r',\s*Match (.*) rule-set:\s*(.*), rule:\s*(.*), term:\s*(.*)', line)
if match:
ptr = data[match.group(1)][match.group(6)][match.group(7)]
ptr['proto_num'] = match.group(2)
ptr['proto'] = match.group(3).lower()
ptr['app'] = match.group(4)
ptr['intf'] = match.group(5)
ptr['dst_ip'] = match.group(8)
ptr['dst_port'] = match.group(9)
ptr['match'] = match.group(10)
ptr['ruleset_name'] = match.group(11)
ptr['rule_name'] = match.group(12)
ptr['term'] = match.group(13)
# new for session log NAT64
match = re.search(r'(JSERVICES.*):\s*application:(.*),\s+('+reg_ex_if+r')\s*' + ptrn2 +
r'\s*\[' + ptrn2 + r'\]\s*->\s*\[('+regex_ipaddr+r')\]\s*' + ptrn2 +
r'\s*\((.*)\)', line)
if match:
ptr = data[match.group(1)][match.group(4)][match.group(5)]
ptr['app'] = match.group(2)
ptr['intf'] = match.group(3)
ptr['nat_ip'] = match.group(6)
ptr['nat_port'] = match.group(7)
ptr['dst_ip_v4'] = match.group(8)
ptr['dst_ip'] = match.group(9)
ptr['dst_port'] = match.group(10)
ptr['proto'] = match.group(11).lower()
self.log('INFO', 'NAT log messages: {}'.format(data))
self.fn_checkout()
return data
def _verify_syslogs(self, **kwargs):
"""Verify NAT/SFW related logs from 'show log messages' """
self.fn_checkin("Verifying NAT log messages")
result = True
if 'then_syslog' in self.nat_rule and self.nat_rule['then_syslog'] is None:
self.log('INFO', "Nothing to do as Syslog is not enabled in the configurtion.")
return False
if 'xtnsv' in kwargs and kwargs['xtnsv'] is not None:
exp_data = kwargs['xtnsv']
msg = kwargs['msg']
src_ip = exp_data['src_ip']
# src_ip = exp_data['src_ip']
# src_ip = exp_data.pop('src_ip')
# exp_data['srvr_ip'] = src_ip
src_port = kwargs['src_port']
act_value = self._get_syslogs(msg, **exp_data)
act_val = ''
if src_port is not None:
try:
act_val = act_value[msg][src_ip][src_port]
# act_val['src_ip'] = act_val.pop('srvr_ip')
except (TypeError, KeyError):
self.fn_checkout(False, "No syslog message({}) found for {} {}".format(
msg, src_ip, src_port))
else:
try:
act_val = act_value[msg][src_ip]
# act_val['src_ip'] = act_val.pop('srvr_ip')
except (TypeError, KeyError):
self.fn_checkout(False, "No syslog message({}) found for {}".format(msg,
src_ip))
# act_val['src_ip'] = act_val.pop('srvr_ip')
exp = {}
for key in exp_data:
if key == 'src_ip':
if 'srvr_ip' in act_val:
exp['srvr_ip'] = exp_data[key]
else:
exp[key] = exp_data[key]
# self.log('INFO', 'Expected data is:{} and actual data is {}'.format(exp, act_val))
result &= utils.cmp_dicts(exp_data=exp, act_data=act_val)
# self._get_intf_ss()
# for tg_if in self.tg_sess:
# path = self.topo['intf'][tg_if]['path']
# intf_list = self.topo['path_res'][self.resource][path]
# for r_if in intf_list:
# r_if = t['resources'][self.resource]['interfaces'][r_if]['pic']
# if r_if in self.intf_ss:
# sset = self.intf_ss[r_if]
# sp = self.sset[sset]['sp']
# nat_rule = self.sset[sset]['nat_rules']
# nat_rule = nat_rule[0]
# nat_pool = self.nat_rule[nat_rule]['src_pool']
# nat_ip = self.nat_pool[nat_pool]['addr']
# nat_port = self.nat_pool[nat_pool]['nat_port']
# max_sess = len(self.tg_sess[tg_if]['sess'])
# if max_sess < 100:
# limit_perc = 100
# limit_final = int(float(max_sess) * (limit_perc/100))
# if limit is not None:
# limit_final = limit
# Range = random.sample(range(max_sess), limit_final)
# for sess_idx in Range:
# sess = self.tg_sess[tg_if]['sess'][sess_idx]
# if nat_port is not None:
# ports = nat_port.split("-")
# nat_port_low, nat_port_high = ports[0] , ports[1]
# src_ips = []
# if limit is not None:
# src_ips.append(act_value[sp][sset].keys())
# else:
# src_ips.append(sess['src_ip'])
# # self.log('INFO', 'sess{} , src_ips{}'.format(pp(sess), pp(src_ips)))
# for s_ip in src_ips:
# s_prt = str(sess['src_prt'])
# # if 'sl_class_list' in self.sset[sset] or 'syslog_class' in self.sset[sset]:
# if 'sl_class_list' in self.sset[sset]:
# if self.sset[sset]['sl_class_list'] is not None:
# exp_data = {}
# exp_data['src_ip'] = s_ip
# exp_data['dst_ip'] = sess['dst_ip']
# exp_data['proto'] = sess['protocol'].lower()
# if 'stateful-firewall-logs' in self.sset[sset]['sl_class_list']:
# msg = 'JSERVICES_SFW_RULE_ACCEPT'
# act_value = self.get_syslogs(msg, src_ip=s_ip, src_port=s_prt, **kwargs)
# if 'nat-logs' in self.sset[sset]['sl_class_list']:
# msg = 'JSERVICES_NAT_RULE_MATCH'
# act_value = self.get_syslogs(msg, src_ip=s_ip, src_port=s_prt, **kwargs)
# if 'session-logs' in self.sset[sset]['sl_class_list']:
# msg = 'JSERVICES_SESSION_OPEN'
# act_value = self.get_syslogs(msg, src_ip=s_ip, src_port=s_prt, **kwargs)
# act_val = act_value[msg][s_ip][s_prt]
# act_nat_ip = act_val['nat_ip']
# if utils.is_ip_in_subnet(act_nat_ip, nat_ip) == False:
# self.log('ERROR', 'Actual NAT IP({}) is **NOT** within expected NAT
# Pool({})'.format(act_nat_ip, nat_ip))
# result = False
# continue
# else:
# self.log('INFO', ' Actual NAT IP({}) is **IS** within expected NAT \
# Pool({})'.format(act_nat_ip, nat_ip))
# if nat_port is not None:
# act_nat_port = act_val['nat_port']
# if act_nat_port < nat_port_low and act_nat_port > nat_port_high:
# self.log('ERROR', 'Actual NAT Port({}) is **NOT** within \
# expected NAT Port Range({})'.format(act_nat_port, nat_port))
# result = False
# continue
# else:
# self.log('INFO', 'Actual NAT Port({}) **IS** within\
# expected NAT Port Range({}))'.format(act_nat_port, nat_port))
# if s_prt is not None:
# try:
# act_val = act_value[msg][s_ip][s_prt]
# except:
# self.log('ERROR', 'No syslog message({}) found for {}'.format(
# msg, s_ip))
# result &= False
# continue
# else:
# try:
# act_val = act_value[msg][s_ip]
# except:
# self.log('ERROR', 'No syslog message({}) found for {}'.format(
# msg, s_ip))
# result &= False
# continue
# # self.log('INFO', '{} , {}'.format(pp(act_val), pp(act_value)))
# if act_val is None:
# self.log('ERROR', 'Unable to find data for src_ip and src_port in
# actual data')
# return False
# result &= utils.cmp_dicts(exp_data=exp_data, act_data=act_val)
return self.fn_checkout(result)
def _get_ss_from_pool(self, **kwargs):
"""Get SS/SP from pool name"""
self.fn_checkin("Building required mappings")
# Build pool name to service set mapping
for pool_name in self.nat_pool:
if pool_name not in self.pool_map:
self.pool_map[pool_name] = {}
if pool_name in self.nat_pool_rule_map['src_pool']:
_nat_rule = self.nat_pool_rule_map['src_pool'][pool_name]
elif pool_name in self.nat_pool_rule_map['dst_pool']:
_nat_rule = self.nat_pool_rule_map['dst_pool'][pool_name]
else:
#Adding print as without it continue not show as executed in unit testing coverage
print
continue
_ss_name = self.ss_map['nat_rules'][_nat_rule]
sset = self.pool_map[pool_name]['sset'] = _ss_name
spic = self.pool_map[pool_name]['spic'] = self.sset[_ss_name]['intf']
self.pool_map[pool_name]['total_sess'] = self.tg_sess_cnt[spic][sset]
self.ss_map['nat_pool'][pool_name] = _ss_name
#self._get_ss_for_intf()
self._get_tg_port_and_config_mapping(**kwargs)
self.log('INFO', "Pool-ss map: {}".format(self.pool_map))
return self.fn_checkout()
def _get_tg_port_and_config_mapping(self, **kwargs):
"""Determines sp,sset,rule,pool etc. thats going to service traffic for every tg port"""
self.fn_checkin("Mapping TG Port and config")
if self.tg_sess_cnt is None:
super()._get_tg_port_and_config_mapping(**kwargs)
#_pool_cnt = self.tg_sess_cnt['nat_pool'] = {}
for tg_if in self.tg_cfg_map:
_conf_map = self.tg_cfg_map[tg_if]
nat_rule = _conf_map['nat_rules'] = self.sset[_conf_map['sset']]['nat_rules']
if nat_rule is not None:
# todo Will handle only one NAT Rule for now
nat_rule = nat_rule[0]
nat_pool = None
self.log("NAT Rule, {}: {}".format(nat_rule, self.nat_rule[nat_rule]))
# Check if NAT Pool is configured
if 'src_pool' in self.nat_rule[nat_rule]:
nat_pool = _conf_map['nat_pool'] = self.nat_rule[nat_rule]['src_pool']
#_conf_map['nat_ip'] = self.nat_pool[nat_pool]['addr']
pool_cfg = self.nat_pool[nat_pool]
if 'port_low' in pool_cfg:
_conf_map['nat_port_low'] = pool_cfg['port_low']
_conf_map['nat_port_high'] = pool_cfg['port_high']
_conf_map['nat_port'] = pool_cfg['port_low'] + '-' + pool_cfg['port_high']
elif ('port_auto' in pool_cfg and pool_cfg['port_auto']) or \
('port_auto_auto' in pool_cfg and pool_cfg['port_auto_auto']) or\
('port_auto_random' in pool_cfg and \
pool_cfg['port_auto_random']):
_conf_map['nat_port_low'] = 1024
_conf_map['nat_port_high'] = 65535
_conf_map['nat_port'] = '1024-65535'
if 'dst_pool' in self.nat_rule[nat_rule]:
nat_pool = _conf_map['nat_pool'] = self.nat_rule[nat_rule]['dst_pool']
#_conf_map['nat_ip'] = self.nat_pool[nat_pool]['addr']
if nat_pool is not None:
_conf_map['nat_ip'] = self.nat_pool[nat_pool]['addr']
#if nat_pool not in _pool_cnt:
#_pool_cnt[nat_pool] = {}
#_pool_cnt[nat_pool]['tot_sess'] = self.tg_sess[tg_if]['total']
self.log('INFO', "TG Port and config mapping: {}".format(self.tg_cfg_map))
self.fn_checkout()
def _get_src_ip_flow_from_data(self, src_ip, cfg_map, data):
"""Return mapping from the actual data for the given Source ip.
Service pic, service set, NAT pool are picked from the configuration map
created by _get_tg_port_and_config_mapping
"""
spic, sset, nat_pool = [cfg_map[key] for key in ['spic', 'sset', 'nat_pool']]
_msg = "Flow(spic={}, sset={}, pool={}".format(spic, sset, nat_pool)
_msg += ", ip={})".format(src_ip)
self.fn_checkin("Finding {}".format(_msg))
self.log("Actual data: {}".format(data))
try:
_flow = data[spic][sset][nat_pool][src_ip]
except (TypeError, KeyError):
self.log('ERROR', '{} not found in actual data({})'.format(_msg, data))
return None
if _flow is None:
self.log('ERROR', '{} not found in actual data({})'.format(_msg, data))
return None
self.log("Flow: {}".format(_flow))
self.log("Actual data: {}".format(data))
self.fn_checkout()
return _flow
def _get_src_ip_port_flow_from_data(self, src_ip, src_port, cfg_map, data):
"""Return mapping from the actual data for the given Source ip and port.
Service pic, service set, NAT pool are picked from the configuration map
created by _get_tg_port_and_config_mapping
"""
spic, sset, nat_pool = [cfg_map[key] for key in ['spic', 'sset', 'nat_pool']]
_msg = "Flow(spic={}, sset={}, pool={}".format(spic, sset, nat_pool)
_msg += ", ip={}, port={})".format(src_ip, src_port)
self.fn_checkin("Finding {}".format(_msg))
self.log("Actual data: {}".format(data))
try:
_flow = data[spic][sset][nat_pool][src_ip][src_port]
except (TypeError, KeyError):
self.log('ERROR', '{} not found in actual data({})'.format(_msg, data))
return None
if _flow is None:
self.log('ERROR', '{} not found in actual data({})'.format(_msg, data))
return None
self.log("Flow: {}".format(_flow))
self.log("Actual data: {}".format(data))
self.fn_checkout()
return _flow
def _is_nat_ip_in_pool(self, flow, cfg_map):
"""Verify if actual NAT IP is with in NAT Pool IP range"""
self.fn_checkin("Checking if given nat ip is in flow: {}".format(flow))
nat_ip = None
if 'nat_ip' in flow:
nat_ip = flow['nat_ip']
if 'eim_nat_ip' in flow:
# NAT IP is saved as eim_nat_ip in nat mappings detail
nat_ip = flow['eim_nat_ip']
if nat_ip is None:
self.log("{} is not there in the flow({})".format(nat_ip, flow))
return False
act_nat_ip_str = "Actual NAT IP({})".format(nat_ip)
nat_ip_str = "within expected NAT Pool({})".format(cfg_map['nat_ip'])
if not iputils.is_ip_in_subnet(nat_ip, cfg_map['nat_ip']):
self.log('ERROR', '{} is **NOT** {}'.format(act_nat_ip_str, nat_ip_str))
return False
self.log('INFO', '{} **IS** {}'.format(act_nat_ip_str, nat_ip_str))
return self.fn_checkout(True)
def _is_nat_port_in_pool(self, flow, cfg_map):
"""Verify if actual NAT Port is with in NAT Pool port range"""
nat_port = None
if 'nat_port' not in cfg_map or cfg_map['nat_port'] is None:
self.log('ERROR', "nat_port is not there in the config({})".format(cfg_map))
#print("nat_port is not there in the config({})".format(cfg_map))
return False
if 'nat_port' in flow:
nat_port = int(flow['nat_port'])
if 'eim_nat_port' in flow:
# NAT Port is saved as eim_nat_port in nat mappings detail
nat_port = int(flow['eim_nat_port'])
if nat_port is None:
self.log('ERROR', "nat_port is not there in the flow({})".format(flow))
#print("nat_port is not there in the flow({})".format(flow))
return False
_act_port_str = "Actual NAT Port({})".format(nat_port)
_range_str = "within expected NAT Pool Port Range({})".format(cfg_map['nat_port'])
self.log('INFO', "Verifying if {} is {}".format(_act_port_str, _range_str))
if nat_port > int(cfg_map['nat_port_low']) and nat_port < int(cfg_map['nat_port_high']):
self.log('INFO', '{} **IS** {}'.format(_act_port_str, _range_str))
return True
self.log('ERROR', '{} is **NOT** {}'.format(_act_port_str, _range_str))
return False
def _get_profile_from_pool(self, sset, pool):
"""Update the service set profile pool address"""
if 'addr' in self.nat_pool[pool]:
self.ss_profile[self.nat_pool[pool]['addr']] = sset
def _get_profile_from_ss(self, sset=None):
"""Update the object with the service set profile"""
self.fn_checkin("Retrieving profile from sset")
ss_names = [sset] if sset is not None else self.sset.keys()
for ss_name in ss_names:
if 'nat_rules' in self.sset[ss_name]:
# Fetch NAT Rule/pool info
for nat_rule in self.sset[ss_name]['nat_rules']:
if 'src_pool' in self.nat_rule[nat_rule]:
self._get_profile_from_pool(ss_name, self.nat_rule[nat_rule]['src_pool'])
self.fn_checkout()
| [
"[email protected]"
] | |
15d426a7e4643fabc70e15e94441389afefc2ce9 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/network/v20200701/get_azure_firewall.py | 3e936ac33077dad9aa63947bf571cfffcc65267b | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,805 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetAzureFirewallResult',
'AwaitableGetAzureFirewallResult',
'get_azure_firewall',
]
@pulumi.output_type
class GetAzureFirewallResult:
"""
Azure Firewall resource.
"""
def __init__(__self__, additional_properties=None, application_rule_collections=None, etag=None, firewall_policy=None, hub_ip_addresses=None, id=None, ip_configurations=None, ip_groups=None, location=None, management_ip_configuration=None, name=None, nat_rule_collections=None, network_rule_collections=None, provisioning_state=None, sku=None, tags=None, threat_intel_mode=None, type=None, virtual_hub=None, zones=None):
if additional_properties and not isinstance(additional_properties, dict):
raise TypeError("Expected argument 'additional_properties' to be a dict")
pulumi.set(__self__, "additional_properties", additional_properties)
if application_rule_collections and not isinstance(application_rule_collections, list):
raise TypeError("Expected argument 'application_rule_collections' to be a list")
pulumi.set(__self__, "application_rule_collections", application_rule_collections)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if firewall_policy and not isinstance(firewall_policy, dict):
raise TypeError("Expected argument 'firewall_policy' to be a dict")
pulumi.set(__self__, "firewall_policy", firewall_policy)
if hub_ip_addresses and not isinstance(hub_ip_addresses, dict):
raise TypeError("Expected argument 'hub_ip_addresses' to be a dict")
pulumi.set(__self__, "hub_ip_addresses", hub_ip_addresses)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ip_configurations and not isinstance(ip_configurations, list):
raise TypeError("Expected argument 'ip_configurations' to be a list")
pulumi.set(__self__, "ip_configurations", ip_configurations)
if ip_groups and not isinstance(ip_groups, list):
raise TypeError("Expected argument 'ip_groups' to be a list")
pulumi.set(__self__, "ip_groups", ip_groups)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if management_ip_configuration and not isinstance(management_ip_configuration, dict):
raise TypeError("Expected argument 'management_ip_configuration' to be a dict")
pulumi.set(__self__, "management_ip_configuration", management_ip_configuration)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if nat_rule_collections and not isinstance(nat_rule_collections, list):
raise TypeError("Expected argument 'nat_rule_collections' to be a list")
pulumi.set(__self__, "nat_rule_collections", nat_rule_collections)
if network_rule_collections and not isinstance(network_rule_collections, list):
raise TypeError("Expected argument 'network_rule_collections' to be a list")
pulumi.set(__self__, "network_rule_collections", network_rule_collections)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if threat_intel_mode and not isinstance(threat_intel_mode, str):
raise TypeError("Expected argument 'threat_intel_mode' to be a str")
pulumi.set(__self__, "threat_intel_mode", threat_intel_mode)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_hub and not isinstance(virtual_hub, dict):
raise TypeError("Expected argument 'virtual_hub' to be a dict")
pulumi.set(__self__, "virtual_hub", virtual_hub)
if zones and not isinstance(zones, list):
raise TypeError("Expected argument 'zones' to be a list")
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="additionalProperties")
def additional_properties(self) -> Optional[Mapping[str, str]]:
"""
The additional properties used to further config this azure firewall.
"""
return pulumi.get(self, "additional_properties")
@property
@pulumi.getter(name="applicationRuleCollections")
def application_rule_collections(self) -> Optional[Sequence['outputs.AzureFirewallApplicationRuleCollectionResponse']]:
"""
Collection of application rule collections used by Azure Firewall.
"""
return pulumi.get(self, "application_rule_collections")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="firewallPolicy")
def firewall_policy(self) -> Optional['outputs.SubResourceResponse']:
"""
The firewallPolicy associated with this azure firewall.
"""
return pulumi.get(self, "firewall_policy")
@property
@pulumi.getter(name="hubIPAddresses")
def hub_ip_addresses(self) -> Optional['outputs.HubIPAddressesResponse']:
"""
IP addresses associated with AzureFirewall.
"""
return pulumi.get(self, "hub_ip_addresses")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipConfigurations")
def ip_configurations(self) -> Optional[Sequence['outputs.AzureFirewallIPConfigurationResponse']]:
"""
IP configuration of the Azure Firewall resource.
"""
return pulumi.get(self, "ip_configurations")
@property
@pulumi.getter(name="ipGroups")
def ip_groups(self) -> Sequence['outputs.AzureFirewallIpGroupsResponse']:
"""
IpGroups associated with AzureFirewall.
"""
return pulumi.get(self, "ip_groups")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="managementIpConfiguration")
def management_ip_configuration(self) -> Optional['outputs.AzureFirewallIPConfigurationResponse']:
"""
IP configuration of the Azure Firewall used for management traffic.
"""
return pulumi.get(self, "management_ip_configuration")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="natRuleCollections")
def nat_rule_collections(self) -> Optional[Sequence['outputs.AzureFirewallNatRuleCollectionResponse']]:
"""
Collection of NAT rule collections used by Azure Firewall.
"""
return pulumi.get(self, "nat_rule_collections")
@property
@pulumi.getter(name="networkRuleCollections")
def network_rule_collections(self) -> Optional[Sequence['outputs.AzureFirewallNetworkRuleCollectionResponse']]:
"""
Collection of network rule collections used by Azure Firewall.
"""
return pulumi.get(self, "network_rule_collections")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the Azure firewall resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.AzureFirewallSkuResponse']:
"""
The Azure Firewall Resource SKU.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="threatIntelMode")
def threat_intel_mode(self) -> Optional[str]:
"""
The operation mode for Threat Intelligence.
"""
return pulumi.get(self, "threat_intel_mode")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> Optional['outputs.SubResourceResponse']:
"""
The virtualHub to which the firewall belongs.
"""
return pulumi.get(self, "virtual_hub")
@property
@pulumi.getter
def zones(self) -> Optional[Sequence[str]]:
"""
A list of availability zones denoting where the resource needs to come from.
"""
return pulumi.get(self, "zones")
class AwaitableGetAzureFirewallResult(GetAzureFirewallResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAzureFirewallResult(
additional_properties=self.additional_properties,
application_rule_collections=self.application_rule_collections,
etag=self.etag,
firewall_policy=self.firewall_policy,
hub_ip_addresses=self.hub_ip_addresses,
id=self.id,
ip_configurations=self.ip_configurations,
ip_groups=self.ip_groups,
location=self.location,
management_ip_configuration=self.management_ip_configuration,
name=self.name,
nat_rule_collections=self.nat_rule_collections,
network_rule_collections=self.network_rule_collections,
provisioning_state=self.provisioning_state,
sku=self.sku,
tags=self.tags,
threat_intel_mode=self.threat_intel_mode,
type=self.type,
virtual_hub=self.virtual_hub,
zones=self.zones)
def get_azure_firewall(azure_firewall_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAzureFirewallResult:
"""
Azure Firewall resource.
:param str azure_firewall_name: The name of the Azure Firewall.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['azureFirewallName'] = azure_firewall_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network/v20200701:getAzureFirewall', __args__, opts=opts, typ=GetAzureFirewallResult).value
return AwaitableGetAzureFirewallResult(
additional_properties=__ret__.additional_properties,
application_rule_collections=__ret__.application_rule_collections,
etag=__ret__.etag,
firewall_policy=__ret__.firewall_policy,
hub_ip_addresses=__ret__.hub_ip_addresses,
id=__ret__.id,
ip_configurations=__ret__.ip_configurations,
ip_groups=__ret__.ip_groups,
location=__ret__.location,
management_ip_configuration=__ret__.management_ip_configuration,
name=__ret__.name,
nat_rule_collections=__ret__.nat_rule_collections,
network_rule_collections=__ret__.network_rule_collections,
provisioning_state=__ret__.provisioning_state,
sku=__ret__.sku,
tags=__ret__.tags,
threat_intel_mode=__ret__.threat_intel_mode,
type=__ret__.type,
virtual_hub=__ret__.virtual_hub,
zones=__ret__.zones)
| [
"[email protected]"
] | |
daface475f5b0ebaca1216489715e0ecde64d1d3 | a9375ae0cecba2f70e01fe9455af7173dab6a3da | /scheduled_tasks/reddit/stocks/AutoDD.py | 96dd32db152c51dc9f3397d42fd14a80b105b866 | [
"MIT"
] | permissive | kannavue/Stocksera | ca3c2680371fb106e7850ed7a1b956f64e56c8a0 | 80fbbfb7d38cf8bf09367d67bda85bdc0c7801d4 | refs/heads/master | 2023-09-04T01:59:27.353642 | 2021-11-04T13:13:09 | 2021-11-04T13:13:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,029 | py | import sys
import os
import re
import locale
import praw
from collections import Counter
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import yfinance.ticker as yf
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import sqlite3
from scheduled_tasks.reddit.stocks.fast_yahoo import *
import scheduled_tasks.reddit.config as cfg
from custom_extensions.stopwords import stopwords_list
from custom_extensions.custom_words import new_words
conn = sqlite3.connect(r"database/database.db", check_same_thread=False)
db = conn.cursor()
analyzer = SentimentIntensityAnalyzer()
analyzer.lexicon.update(new_words)
# x base point of for a ticker that appears on a subreddit title or text body that fits the search criteria
base_points = 2
# x bonus points for each flair matching 'DD' or 'Catalyst' of for a ticker that appears on the subreddit
bonus_points = 2
# every x upvotes on the thread counts for 1 point (rounded down)
upvote_factor = 3
# every x comments on the thread counts for 1 point (rounded down)
comments_factor = 3
# x bonus points for the sentiment of ticker
positive_sentiment = 2
neutral_sentiment = 1
negative_sentiment = 0
# rocket emoji
rocket = '🚀'
# Python regex pattern for stocks codes
pattern = "(?<=\$)?\\b[A-Z]{2,5}\\b(?:\.[A-Z]{1,2})?"
# Create folder to store price chart
if not os.path.exists("static/graph_chart"):
os.mkdir("static/graph_chart")
def get_sentiment(text, increment):
vs = analyzer.polarity_scores(text)
sentiment_score = vs['compound']
if sentiment_score >= 0.05:
increment += positive_sentiment
elif sentiment_score <= -0.05:
increment += negative_sentiment
else:
increment += neutral_sentiment
return increment, sentiment_score
def get_submission_praw(n, sub, n_num):
"""
Returns a list of results for submission in past:
1st list: current result from n hours ago until now
2nd list: prev result from 2n hours ago until n hours ago
"""
mid_interval = datetime.today() - timedelta(hours=n)
timestamp_mid = int(mid_interval.timestamp())
timestamp_start = int((mid_interval - timedelta(hours=n)).timestamp())
timestamp_end = int(datetime.today().timestamp())
reddit = praw.Reddit(client_id=cfg.API_REDDIT_CLIENT_ID,
client_secret=cfg.API_REDDIT_CLIENT_SECRET,
user_agent=cfg.API_REDDIT_USER_AGENT)
recent = {}
prev = {}
subreddit = reddit.subreddit(sub)
all_results = []
for post in subreddit.new(limit=n_num):
all_results.append([post.title, post.link_flair_text, post.selftext, post.score, post.num_comments,
post.created_utc])
# start --> mid --> end
recent[sub] = [posts for posts in all_results if timestamp_mid <= posts[5] <= timestamp_end]
prev[sub] = [posts for posts in all_results if timestamp_start <= posts[5] < timestamp_mid]
return recent, prev
def get_submission_generators(n, sub, n_num):
"""
Returns dictionary of current scores, previous score, total score, upvote score and comment score
"""
recent, prev = get_submission_praw(n, sub, n_num)
print("Searching for tickers in {}...".format(sub))
current_scores, current_rocket_scores, current_posts_dict, current_upvotes_dict, current_comments_dict = get_ticker_scores_praw(recent)
prev_scores, prev_rocket_scores, prev_posts_dict, prev_upvotes_dict, prev_comments_dict = get_ticker_scores_praw(prev)
total_rocket_score = Counter(current_rocket_scores) + Counter(prev_rocket_scores)
total_posts_score = Counter(current_posts_dict) + Counter(prev_posts_dict)
total_upvotes_score = Counter(current_upvotes_dict) + Counter(prev_upvotes_dict)
total_comments_score = Counter(current_comments_dict) + Counter(prev_comments_dict)
return current_scores, prev_scores, total_rocket_score, total_posts_score, total_upvotes_score, total_comments_score
def get_ticker_scores_praw(sub_gen_dict):
"""
Return two dictionaries:
--sub_scores_dict: a dictionary of dictionaries. This dictionaries' keys are the requested subreddit: all subreddits
if args.allsub is True, and just args.sub otherwise. The value paired with each subreddit key is a dictionary of
scores, where each key is a ticker found in the reddit submissions.
--rocket_scores_dict: a dictionary whose keys are the tickers found in reddit submissions, and value is the number
of rocker emojis found for each ticker.
:param sub_gen_dict: A dictionary of generators for each subreddit, as outputted by get_submission_generators
"""
# Dictionaries containing the summaries
sub_scores_dict = {}
# Dictionaries containing the rocket count
rocket_scores_dict = {}
num_posts_dict = {}
num_upvotes_dict = {}
num_comments_dict = {}
for sub, submission_list in sub_gen_dict.items():
sub_scores_dict[sub] = {}
for submission in submission_list:
# every ticker in the title will earn this base points
increment = base_points
# search the title for the ticker/tickers
title = ' ' + submission[0].upper() + ' '
title_extracted = set(re.findall(pattern, title))
# print(submission[5], title, title_extracted)
# flair is worth bonus points
if submission[1] is not None:
flair = submission[1].lower()
if 'dd' in flair or 'catalyst' in flair or 'technical analysis' in flair:
increment += bonus_points
# search the text body for the ticker/tickers and find sentiment score
self_text_extracted = set()
if submission[2] is not None:
self_text = ' ' + submission[2] + ' '
self_text_extracted = set(re.findall(pattern, self_text))
increment, sentiment_score = get_sentiment(self_text, increment)
else:
increment, sentiment_score = get_sentiment(title, increment)
# every 3 upvotes are worth 1 extra point
if upvote_factor > 0 and submission[3] is not None:
increment += math.ceil(submission[3] / upvote_factor)
# every 2 comments are worth 1 extra point
if comments_factor > 0 and submission[4] is not None:
increment += math.ceil(submission[4] / comments_factor)
extracted_tickers = self_text_extracted.union(title_extracted)
extracted_tickers = {ticker.replace('.', '-') for ticker in extracted_tickers}
count_rocket = title.count(rocket) + self_text.count(rocket)
for ticker in extracted_tickers:
rocket_scores_dict[ticker] = rocket_scores_dict.get(ticker, 0) + count_rocket
num_posts_dict[ticker] = num_posts_dict.get(ticker, 0) + 1
num_upvotes_dict[ticker] = num_upvotes_dict.get(ticker, 0) + submission[3]
num_comments_dict[ticker] = num_comments_dict.get(ticker, 0) + submission[4]
# title_extracted is a set, duplicate tickers from the same title counted once only
sub_scores_dict[sub][ticker] = sub_scores_dict[sub].get(ticker, 0) + increment
return sub_scores_dict, rocket_scores_dict, num_posts_dict, num_upvotes_dict, num_comments_dict
def populate_df(current_scores_dict, prev_scores_dict, interval):
"""
Combine two score dictionaries, one from the current time interval, and one from the past time interval
:returns: the populated dataframe
"""
dict_result = {}
total_sub_scores = {}
for sub, current_sub_scores_dict in current_scores_dict.items():
total_sub_scores[sub] = {}
for symbol, current_score in current_sub_scores_dict.items():
if symbol in dict_result.keys():
dict_result[symbol][0] += current_score
dict_result[symbol][1] += current_score
else:
dict_result[symbol] = [current_score, current_score, 0, 0]
total_sub_scores[sub][symbol] = total_sub_scores[sub].get(symbol, 0) + current_score
for sub, prev_sub_scores_dict in prev_scores_dict.items():
for symbol, prev_score in prev_sub_scores_dict.items():
if symbol in dict_result.keys():
# total, recent, prev, change
dict_result[symbol][0] += prev_score
dict_result[symbol][2] += prev_score
dict_result[symbol][3] = ((dict_result[symbol][1] - dict_result[symbol][2]) / dict_result[symbol][2]) * 100
else:
dict_result[symbol] = [prev_score, 0, prev_score, 0]
total_sub_scores[sub][symbol] = total_sub_scores[sub].get(symbol, 0) + prev_score
columns = ['total', 'recent', 'previous', 'change']
df = pd.DataFrame.from_dict(dict_result, orient='index', columns=columns)
if len(current_scores_dict) > 1:
dtype_dict = {}
for sub, total_score_dict in total_sub_scores.items():
# add each total score dict as new column of df
df[sub] = pd.Series(total_score_dict)
dtype_dict[sub] = 'int32'
df = df.fillna(value=0).astype(dtype_dict)
return df
def filter_df(df, min_val):
"""
Filter the score dataframe
:param dataframe df: the dataframe to be filtered
:param int min_val: the minimum total score
:returns: the filtered dataframe
"""
filtered_words = stopwords_list
# compares the first column, which is the total score to the min val
df = df[df.iloc[:, 0] >= min_val]
drop_index = pd.Index(filtered_words).intersection(df.index)
df = df.drop(index=drop_index)
return df
def get_financial_stats(results_df, min_vol, min_mkt_cap, threads=True):
# dictionary of ticker summary profile information to get from yahoo
summary_profile_measures = {'industry': 'industry', 'website': 'website'}
# dictionary of ticker financial information to get from yahoo
financial_measures = {'targetMeanPrice': 'target', 'recommendationKey': 'recommend'}
# dictionary of ticker summary information to get from yahoo
summary_measures = {'previousClose': 'prev_close', 'open': 'open', 'dayLow': 'day_low', 'dayHigh': 'day_high'}
# dictionary of ticker key stats summary
key_stats_measures = {'shortPercentOfFloat': 'short_per_float'}
# mapping of yahoo module names to dictionaries containing data we want to retrieve
module_name_map = {'defaultKeyStatistics': key_stats_measures, 'summaryProfile': summary_profile_measures,
'summaryDetail': summary_measures, 'financialData': financial_measures}
# check for valid symbols and get quick stats
ticker_list = list(results_df.index.values)
quick_stats_df = get_quick_stats(ticker_list, min_vol, min_mkt_cap, threads)
valid_ticker_list = list(quick_stats_df.index.values)
summary_stats_df = download_advanced_stats(valid_ticker_list, module_name_map, threads)
summary_stats_df["website"] = summary_stats_df["website"].apply(lambda x: "https://logo.clearbit.com/" + str(x).replace("http://", "").replace("www.", "").split('/')[0])
results_df_valid = results_df.loc[valid_ticker_list]
results_df = pd.concat([results_df_valid, quick_stats_df, summary_stats_df], axis=1)
results_df.index.name = 'ticker'
return results_df
def get_quick_stats(ticker_list, min_vol, min_mkt_cap, threads=True):
quick_stats = {'regularMarketPreviousClose': 'prvCls', 'fiftyDayAverage': '50DayAvg',
'regularMarketVolume': 'volume', 'averageDailyVolume3Month': '3MonthVolAvg',
'regularMarketPrice': 'price', 'regularMarketChangePercent': '1DayChange%',
'floatShares': 'floating_shares', 'beta': 'beta', 'marketCap': 'mkt_cap'}
unprocessed_df = download_quick_stats(ticker_list, quick_stats, threads)
processed_stats_table = []
for index, row in unprocessed_df.iterrows():
symbol = index
prev_close = row['prvCls']
avg50day = row['50DayAvg']
price = row['price']
day_change = row['1DayChange%']
volume = row['volume']
stock_float = row['floating_shares']
beta = row['beta']
mkt_cap = row['mkt_cap']
valid = False
if price != "N/A" and price != 0:
valid = True
if day_change != "N/A" and day_change != 0 or (day_change == 0 and price == prev_close):
day_change = "{:.2f}".format(day_change)
if day_change != 0:
valid = True
elif prev_close != "N/A" and prev_close != 0 and price != "N/A":
day_change = ((float(price) - float(prev_close))/float(prev_close))*100
day_change = "{:.2f}".format(day_change)
if day_change != 0:
valid = True
if volume != "N/A":
if volume <= 50000:
volume_text = volume
elif 50000 < volume < 1000000:
volume_text = str(round(volume / 1000, 2)) + "K"
elif 1000000 <= volume < 1000000000:
volume_text = str(round(volume / 1000000, 2)) + "M"
else:
volume_text = str(round(volume / 1000000000, 2)) + "T"
valid = True
else:
volume = 0
volume_text = ""
if avg50day != "N/A":
avg50day = round(avg50day, 2)
if mkt_cap != "N/A":
if mkt_cap < 1000000000:
mkt_cap_text = str(round(mkt_cap / 1000000, 2)) + "M"
elif 1000000000 <= mkt_cap < 1000000000000:
mkt_cap_text = str(round(mkt_cap / 1000000000, 2)) + "B"
else:
mkt_cap_text = str(round(mkt_cap / 1000000000000, 2)) + "T"
valid = True
else:
mkt_cap = 0
mkt_cap_text = ""
if stock_float != "N/A":
stock_float = stock_float
valid = True
if beta != "N/A":
beta = "{:.2f}".format(beta)
valid = True
# if the ticker has any valid column, and mkt_cap is in the range, append
if valid and mkt_cap >= min_mkt_cap and volume >= min_vol:
stat_list = [symbol, price, day_change, avg50day, volume_text, mkt_cap_text, stock_float, beta]
processed_stats_table.append(stat_list)
# construct dataframe
columns = ['symbol', 'price', 'one_day_change_percent', 'fifty_day_change_percent', 'volume',
'mkt_cap', 'floating_shares', 'beta']
stats_df = pd.DataFrame(processed_stats_table, columns=columns)
stats_df['floating_shares'] = stats_df['floating_shares'].str.replace(',', '')
stats_df.set_index('symbol', inplace=True)
return stats_df
def print_df(df, filename, writesql, writecsv, subreddit):
df.reset_index(inplace=True)
df.index += 1
df.reset_index(inplace=True)
df.rename(columns={'index': 'rank'}, inplace=True)
now = datetime.utcnow()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
df['date_updated'] = dt_string
df['subreddit'] = subreddit
cols_to_change = ["rank", "total", "recent", "previous", "rockets", "posts", "upvotes", "comments"]
for col in cols_to_change:
df[col] = df[col].fillna(0).astype(float)
df['change'] = df['change'].apply(lambda x: round(x, 2))
df['change'] = df['change'].replace(0, "N/A")
df['industry'] = df['industry'].str.replace("—", "-")
df['recommend'] = df['recommend'].str.replace("_", " ")
df['website'] = df['website'].str.replace("alibabagroup.com", "alibaba.com")
df['website'] = df['website'].str.replace("tesla.com", "tesla.cn")
df['website'] = df['website'].str.replace("https://logo.clearbit.com/modernatx.com",
"https://g.foolcdn.com/art/companylogos/mark/mrna.png")
# Save to sql database
if writesql:
for row_num in range(len(df)):
db.execute(
"INSERT INTO {} VALUES "
"(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, NULL)".format(subreddit),
tuple(df.loc[row_num].tolist()))
conn.commit()
print("Saved to {} SQL Database successfully.".format(subreddit))
# Write to csv
if writecsv:
completeName = os.path.join(sys.path[0], filename)
completeName += '.csv'
df.to_csv(completeName, index=False, float_format='%.2f', mode='a', encoding=locale.getpreferredencoding())
print("Wrote to file successfully {}".format(completeName))
# Create past 1 month chart
print("Saving last 1 month chart now...")
chart_path = r"static/graph_chart/stocks"
top_35 = df[:35]
for index, i in top_35.iterrows():
trending_ticker = i[1]
ticker = yf.Ticker(trending_ticker)
price_df = ticker.history(interval="1d", period="1mo")["Close"]
price_list = price_df.to_list()
if price_list:
start_price = price_list[0]
end_price = price_list[-1]
if start_price > end_price:
color = "red"
else:
color = "green"
days_list = [i for i in range(len(price_list))]
plt.figure(figsize=(1, 0.5))
plt.axis("off")
plt.xticks([])
plt.yticks([])
plt.plot(days_list, price_list, color=color)
plt.savefig(os.path.join(chart_path, r"{}.svg".format(trending_ticker)), transparent=True)
plt.close()
# Remove old charts
to_delete_date = datetime.utcnow().date() - timedelta(days=15)
for img_name in os.listdir(chart_path):
img_last_modified = datetime.fromtimestamp(os.path.getmtime(os.path.join(chart_path, img_name))).date()
if img_last_modified <= to_delete_date:
os.remove(os.path.join(chart_path, img_name))
| [
"[email protected]"
] | |
e1b06b52231c9bcd3a40ec3a68471706dc7781ed | b471470126befc48d61bf3e17c8231b33e8d3e33 | /1117-mid-term-6.py | ff3fe16f7e1178ab27682245ea8b118fe673391a | [] | no_license | Xi-Plus/KUAS-DIIWS-Code | 5ccd5ff512b3aad5fcf9ca37c7ca095e796aca9d | 60e0e6c8c80847b5270d4d0f45028becabd08230 | refs/heads/master | 2021-09-03T21:48:31.755611 | 2018-01-12T08:37:51 | 2018-01-12T08:37:51 | 105,234,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | from urllib.request import urlopen
from bs4 import BeautifulSoup
url = "http://www.books.com.tw/activity/gold66_day/?loc=P_021_1_more_001"
f = urlopen(url)
html = f.read()
obj = BeautifulSoup(html, "html.parser")
days = []
for i in obj.findAll("div", {"class":"day"}):
days.append(i.get_text())
names = []
for i in obj.findAll("div", {"class":"sec_day"}):
names.append(i.findAll("a")[1].get_text())
publishs = []
prices = []
prices2 = []
for i in obj.findAll("div", {"class":"sec_day"}):
temp = i.findAll("h2")
publishs.append(temp[0].get_text())
prices.append(temp[1].get_text())
prices2.append(temp[2].get_text())
for i in range(len(days)):
print(days[i])
print(names[i])
print(prices[i])
print(prices[i])
print(prices2[i])
print()
| [
"[email protected]"
] | |
bc00dc511cc641182e6e0f77d4976d1b1b3a07ca | 9471259e3cf5d6772e553dd847d23a4fef7d7fe7 | /customer_phonecall_feedback/models/__init__.py | 926e3b5eea574f2f44b776d5ef1e68f8db13407a | [] | no_license | TranPhucDang/translate_module | 16e0bae287f83d174f73d2271c4a343f012e0b5e | 9135913921240594babe51362fd5f5fd37803d9f | refs/heads/master | 2020-08-03T13:36:58.899783 | 2019-12-31T15:39:28 | 2019-12-31T15:39:28 | 211,767,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | # -*- coding: utf-8 -*-
#################################################################################
#
# Odoo, Open Source Management Solution
# Copyright (C) 2017 Ascetic Business Solution <www.asceticbs.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
import crm_phonecall
import survey
| [
"[email protected]"
] | |
72fd0fbf13da481b25826bed1e553261679655c5 | c340835e4444c664fc2b261238e3738cf5bf7196 | /combination_sum.py | 58093ee3ed445d253ce8b136bea5e6b489626c29 | [] | no_license | z-o-e/LeetCode_OJ_Python | 49f2a7378eb98f707c97d7757cc19ef19622db42 | ad7f5152fe404bdd4e91710d9a719f392bec7a96 | refs/heads/master | 2021-03-12T22:39:44.898502 | 2014-10-16T05:30:25 | 2014-10-16T05:30:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | class Solution:
# @param candidates, a list of integers
# @param target, integer
# @return a list of lists of integers
def combinationSum(self, candidates, target):
candidates = sorted(candidates)
self.res = []
self.dfs(candidates, [], target)
return self.res
def dfs(self, candidates, cur, target):
if target==0:
if sorted(cur) not in self.res:
self.res.append(cur)
return
for c in candidates:
if target-c>=0:
tmp = cur[:]
tmp.append(c)
self.dfs(candidates, tmp, target-c)
| [
"[email protected]"
] | |
9b329bf9c03eb7fd5e50cebfbbc7e2fca726f262 | fbc0d5c081dbfead483a1e1e226a380446bcd56e | /TDT4117 - Informasjonsgjenfinning/exercises/2/subtask2.2.py | 88a2dba3adc75dd1a6e69012d2e012ad9648eba4 | [] | no_license | blauks/ntnu-2 | 58eec17a8f6db7424a6cb44d74e029b68256320a | 38fa0ddfaa726408f087d1792fd0e00810f9243c | refs/heads/master | 2020-08-16T11:01:10.946232 | 2019-08-08T11:28:48 | 2019-08-08T11:28:48 | 215,494,235 | 0 | 1 | null | 2019-10-16T08:15:48 | 2019-10-16T08:15:48 | null | UTF-8 | Python | false | false | 242 | py | #!/usr/bin/python3
d1 = 'an apple a day keeps the doctor away.'
d2 = 'the best doctor is the one you run to and can’t find.'
d3 = 'one rotten apple spoils the whole barrel.'
q1 = 'doctor'
q2 = 'apple orange'
q3 = 'doctor apple'
λ = 0.5
| [
"[email protected]"
] | |
96cd5e74289dadeea430e3920b049093288eb0a1 | fcb628087b05031f2ffec5d6719714d210a9ebd2 | /sukonbu/json_schema_parser.py | 53b2420adf760f6b16937f06af94f854c676e54e | [
"MIT"
] | permissive | ousttrue/sukonbu | 165b4aa0dcbb416367fa51bd2cfb0724dcaa475f | aca6121c3afa1fe404e6208553070895829df780 | refs/heads/master | 2023-08-29T17:30:28.099703 | 2022-03-15T17:21:01 | 2022-03-15T17:21:01 | 224,555,772 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,741 | py | from typing import Optional, NamedTuple, List
import json
import pathlib
from .json_schema import JsonSchema
class JsonSchemaItem(NamedTuple):
key: str
item: JsonSchema
parent: Optional[JsonSchema] = None
class JsonSchemaParser:
def __init__(self, dir: Optional[pathlib.Path] = None):
self.root: Optional[JsonSchema] = None
self.path_map = {}
self.dir: Optional[pathlib.Path] = dir
def from_dict(self, root: dict) -> 'JsonSchema':
'''
replace dict to JsonSchema by depth first
'''
def traverse(node: dict,
parent: Optional[dict] = None,
key: Optional[str] = None) -> JsonSchema:
# pass replace leaf to JsonSchema
props = node.get('properties')
if props:
node['properties'] = {
key: traverse(prop, node, key)
for key, prop in props.items()
}
items = node.get('items')
if items:
node['items'] = traverse(items, node)
additionalProperties = node.get('additionalProperties')
if additionalProperties:
node['additionalProperties'] = traverse(
additionalProperties, node)
if node.get('anyOf') and parent and key:
# enum
node['title'] = parent['title'].replace(
' ', '') + key[0:1].upper() + key[1:]
if 'properties' not in node:
node['properties'] = {}
if 'dependencies' not in node:
node['dependencies'] = []
if 'required' not in node:
node['required'] = []
return JsonSchema(**node)
return traverse(root)
def get_or_read_ref(self, dir: pathlib.Path, filename: str,
current: List[str]) -> dict:
path = dir / filename
if not path.exists():
assert(self.dir)
path = self.dir / filename
# ref = self.path_map.get(path)
# if ref:
# return ref
text = path.read_text(encoding='utf-8')
ref_parsed = json.loads(text)
ref = self.preprocess(ref_parsed, path.parent, current)
self.path_map[path] = ref
ref['path'] = path
return ref
def preprocess(self, parsed: dict, directory: pathlib.Path,
current: List[str]):
'''
* `$ref` などを展開して1つの json に連結する
* allOf を継承と見なして親 JsonSchema の属性を展開する
* anyOf はひとつめの type と見なす(gltf では enum的に使われる)
* properties は class として階層化する
* items は list として階層化する
* additionalProperties は dict として階層化する
'''
if '$schema' in parsed:
del parsed['$schema']
if '$ref' in parsed:
# replace
# print(path)
ref = self.get_or_read_ref(directory, parsed['$ref'], current)
for k, v in ref.items():
parsed[k] = v
del parsed['$ref']
if 'allOf' in parsed:
# inherited
ref = self.get_or_read_ref(directory, parsed['allOf'][0]['$ref'],
current)
for k, v in ref.items():
if k in parsed:
if k == 'properties':
for pk, pv in ref[k].items():
parsed[k][pk] = pv
continue
elif k in ['title']:
continue
parsed[k] = v
del parsed['allOf']
if 'anyOf' in parsed:
for x in parsed['anyOf']:
if 'type' in x:
parsed['type'] = x['type']
break
for key in ['not']:
# skip
if key in parsed:
del parsed[key]
keys = [key for key in parsed.keys()]
for key in keys:
if key == 'properties':
for k, v in parsed[key].items():
self.preprocess(v, directory, current + [k])
elif key == 'items':
parsed[key] = self.preprocess(parsed[key], directory,
current + ['Item']) # array item
elif key == 'additionalProperties':
tmp = parsed[key]
if tmp is False:
# do nothing
continue
parsed[key] = self.preprocess(tmp, directory,
current + ['Value']) # kv value
elif key in [
'path',
'title',
'type',
'description',
'gltf_detailedDescription',
'gltf_webgl',
'gltf_uriType',
'default',
#
'enum',
#
'additionalProperties',
'minProperties',
#
'uniqueItems',
'minItems',
'maxItems',
#
'minimum',
'maximum',
'multipleOf',
'exclusiveMinimum',
'pattern',
'format',
#
'anyOf',
'oneOf',
#
'required',
'dependencies',
]:
pass
else:
raise Exception(f'unknown {key}')
if 'title' not in parsed:
parsed['title'] = '.'.join(current)
if parsed['title'] == 'Extension':
# set name to extension
if current:
parsed['title'] = '.'.join(current[0:-1] + [parsed['title']])
elif parsed['title'] == 'Extras':
# set name to extras
if current:
parsed['title'] = '.'.join(current[0:-1] + [parsed['title']])
return parsed
def process(self, entry_point: pathlib.Path):
text = entry_point.read_text()
parsed = json.loads(text)
processed = self.preprocess(parsed, entry_point.parent, [])
self.root = self.from_dict(processed)
| [
"[email protected]"
] | |
df840439989d5650df6e6a7988fa5b59caa9850d | 88994e2e840a70ec702cee09e1a13813aa6f800c | /tests/meta/upload/scout/test_scout_config_builder.py | 811298718aac85b422b13148020a410369823810 | [] | no_license | Clinical-Genomics/cg | 1e9eb0852f742d555a48e8696914ebe177f7d436 | d2ec6d25b577dd6938bbf92317aeff1d6b3c5b08 | refs/heads/master | 2023-09-01T02:04:04.229120 | 2023-08-31T13:50:31 | 2023-08-31T13:50:31 | 82,567,026 | 19 | 8 | null | 2023-09-14T15:24:13 | 2017-02-20T14:29:43 | Python | UTF-8 | Python | false | false | 7,857 | py | """Tests for the file handlers."""
import logging
from housekeeper.store.models import Version
from cg.meta.upload.scout.balsamic_config_builder import BalsamicConfigBuilder
from cg.meta.upload.scout.hk_tags import CaseTags
from cg.meta.upload.scout.mip_config_builder import MipConfigBuilder
from cg.meta.upload.scout.rnafusion_config_builder import RnafusionConfigBuilder
from cg.store.models import Analysis
from tests.mocks.limsmock import MockLimsAPI
from tests.mocks.madeline import MockMadelineAPI
from tests.mocks.mip_analysis_mock import MockMipAnalysis
def test_mip_config_builder(
hk_version: Version,
mip_dna_analysis: Analysis,
lims_api: MockLimsAPI,
mip_analysis_api: MockMipAnalysis,
madeline_api: MockMadelineAPI,
):
"""Test MIP config builder class."""
# GIVEN a MIP analysis
# WHEN instantiating
config_builder = MipConfigBuilder(
hk_version_obj=hk_version,
analysis_obj=mip_dna_analysis,
lims_api=lims_api,
mip_analysis_api=mip_analysis_api,
madeline_api=madeline_api,
)
# THEN assert that the correct case tags was used
assert isinstance(config_builder.case_tags, CaseTags)
def test_balsamic_config_builder(
hk_version: Version, balsamic_analysis_obj: Analysis, lims_api: MockLimsAPI
):
"""Test Balsamic config builder class."""
# GIVEN a balsamic file handler
# WHEN instantiating
file_handler = BalsamicConfigBuilder(
hk_version_obj=hk_version, analysis_obj=balsamic_analysis_obj, lims_api=lims_api
)
# THEN assert that the correct case tags was used
assert isinstance(file_handler.case_tags, CaseTags)
def test_rnafusion_config_builder(
hk_version: Version,
rnafusion_analysis_obj: Analysis,
lims_api: MockLimsAPI,
):
"""Test RNAfusion config builder class."""
# GIVEN a rnafusion file handler
# WHEN instantiating
file_handler = RnafusionConfigBuilder(
hk_version_obj=hk_version, analysis_obj=rnafusion_analysis_obj, lims_api=lims_api
)
# THEN assert that the correct case tags was used
assert isinstance(file_handler.case_tags, CaseTags)
def test_include_delivery_report_mip(mip_config_builder: MipConfigBuilder):
"""Test include delivery report."""
# GIVEN a config builder with data
# GIVEN a config without a delivery report
assert mip_config_builder.load_config.delivery_report is None
# WHEN including the delivery report
mip_config_builder.include_delivery_report()
# THEN assert that the delivery report was added
assert mip_config_builder.load_config.delivery_report is not None
def test_include_synopsis(mip_config_builder: MipConfigBuilder):
"""Test include synopsis."""
# GIVEN a config builder with some data
# GIVEN a config without synopsis
assert mip_config_builder.load_config.synopsis is None
# WHEN including the synopsis
mip_config_builder.build_load_config()
# THEN assert that the synopsis was added
assert mip_config_builder.load_config.synopsis
def test_include_phenotype_groups(mip_config_builder: MipConfigBuilder):
"""Test include phenotype groups."""
# GIVEN a config builder with some data
# GIVEN a config without a phenotype groups
assert mip_config_builder.load_config.phenotype_groups is None
# WHEN including the phenotype groups
mip_config_builder.include_phenotype_groups()
# THEN assert that the phenotype groups were added
assert mip_config_builder.load_config.phenotype_groups is not None
def test_include_phenotype_terms(mip_config_builder: MipConfigBuilder):
"""Test include phenotype terms."""
# GIVEN a config builder with some data
# GIVEN a config without a phenotype terms
assert mip_config_builder.load_config.phenotype_terms is None
# WHEN including the phenotype terms
mip_config_builder.include_phenotype_terms()
# THEN assert that the phenotype terms were added
assert mip_config_builder.load_config.phenotype_terms is not None
def test_include_alignment_file_individual(mip_config_builder: MipConfigBuilder, sample_id: str):
"""Test include alignment files."""
# GIVEN a mip config builder with some information
# WHEN building the scout load config
mip_config_builder.build_load_config()
# THEN assert that the alignment file was added to sample id
file_found = False
for sample in mip_config_builder.load_config.samples:
if sample.sample_id == sample_id:
assert sample.alignment_path is not None
file_found = True
assert file_found
def test_include_mip_case_files(mip_config_builder: MipConfigBuilder):
"""Test include MIP case files."""
# GIVEN a Housekeeper version bundle with MIP analysis files
# GIVEN a case load object
# GIVEN a MIP file handler
# WHEN including the case level files
mip_config_builder.build_load_config()
# THEN assert that the mandatory SNV VCF was added
assert mip_config_builder.load_config.vcf_snv
def test_include_mip_sample_files(mip_config_builder: MipConfigBuilder, sample_id: str):
"""Test include MIP sample files."""
# GIVEN a Housekeeper version bundle with MIP analysis files
# GIVEN a case load object
# GIVEN that there are no sample level mt_bam
# GIVEN a MIP file handler
# WHEN including the case level files
mip_config_builder.build_load_config()
# THEN assert that the mandatory SNV VCF was added
file_found = False
for sample in mip_config_builder.load_config.samples:
if sample.sample_id == sample_id:
assert sample.mt_bam is not None
file_found = True
assert file_found
def test_include_mip_sample_subject_id(
mip_config_builder: MipConfigBuilder, sample_id: str, caplog
):
"""Test include MIP sample subject id."""
# GIVEN subject_id on the sample
caplog.set_level(level=logging.DEBUG)
# WHEN building the config
mip_config_builder.build_load_config()
# THEN the subject_id was added to the scout sample
subject_id_found = False
for sample in mip_config_builder.load_config.samples:
if sample.sample_id == sample_id:
subject_id_found = True
assert sample.subject_id is not None
assert subject_id_found
def test_include_balsamic_case_files(balsamic_config_builder: BalsamicConfigBuilder):
"""Test include Balsamic case files."""
# GIVEN a Housekeeper version bundle with balsamic analysis files
# GIVEN a case load object
# WHEN including the case level files
balsamic_config_builder.build_load_config()
# THEN assert that the mandatory snv vcf was added
assert balsamic_config_builder.load_config.vcf_cancer
def test_include_balsamic_delivery_report(balsamic_config_builder: BalsamicConfigBuilder):
"""Test include Balsamic delivery report."""
# GIVEN a Housekeeper version bundle with balsamic analysis files
# GIVEN a case load object
# WHEN including the case level files
balsamic_config_builder.build_load_config()
# THEN assert that the delivery_report exists
assert balsamic_config_builder.load_config.delivery_report
def test_extract_generic_filepath(mip_config_builder: MipConfigBuilder):
"""Test that parsing of file path."""
# GIVEN files paths ending with
file_path1 = "/some/path/gatkcomb_rhocall_vt_af_chromograph_sites_X.png"
file_path2 = "/some/path/gatkcomb_rhocall_vt_af_chromograph_sites_12.png"
# THEN calling extracting the generic path will remove numeric id and fuffix
generic_path = "/some/path/gatkcomb_rhocall_vt_af_chromograph_sites_"
# THEN
assert mip_config_builder.extract_generic_filepath(file_path1) == generic_path
assert mip_config_builder.extract_generic_filepath(file_path2) == generic_path
| [
"[email protected]"
] | |
1461437e9ebeb4c81603608e67f1504f0e628c17 | ad5b72656f0da99443003984c1e646cb6b3e67ea | /tools/mo/openvino/tools/mo/back/offline_transformations.py | d4615aeb524e518a1b292b6c45fd267a8d1ac306 | [
"Apache-2.0"
] | permissive | novakale/openvino | 9dfc89f2bc7ee0c9b4d899b4086d262f9205c4ae | 544c1acd2be086c35e9f84a7b4359439515a0892 | refs/heads/master | 2022-12-31T08:04:48.124183 | 2022-12-16T09:05:34 | 2022-12-16T09:05:34 | 569,671,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,184 | py | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import argparse
from typing import List
from openvino.tools.mo.front.extractor import create_params_with_custom_types
from openvino.tools.mo.utils.cli_parser import parse_transform
from openvino.tools.mo.utils.error import Error
from openvino.runtime import Model
def get_available_transformations():
try:
from openvino._offline_transformations import apply_low_latency_transformation # pylint: disable=import-error,no-name-in-module
from openvino._offline_transformations import apply_make_stateful_transformation # pylint: disable=import-error,no-name-in-module
from openvino._offline_transformations import apply_pruning_transformation # pylint: disable=import-error,no-name-in-module
return {
'MakeStateful': apply_make_stateful_transformation,
'LowLatency2': apply_low_latency_transformation,
'Pruning': apply_pruning_transformation,
}
except Exception as e:
return {}
# net should be openvino.inference_engine.IENetwork type, but IE Engine is still optional dependency
def apply_user_transformations(func: object, transforms: list):
available_transformations = get_available_transformations()
for name, args in transforms:
if name not in available_transformations.keys():
raise Error("Transformation {} is not available.".format(name))
available_transformations[name](func, **args)
def apply_moc_transformations(func: object):
from openvino._offline_transformations import apply_moc_transformations # pylint: disable=import-error,no-name-in-module
apply_moc_transformations(func, cf=False, smart_reshape=True)
def apply_moc_legacy_transformations(func: object, params_with_custom_types: List[str]):
from openvino._offline_transformations import apply_moc_legacy_transformations # pylint: disable=import-error,no-name-in-module
apply_moc_legacy_transformations(func, params_with_custom_types)
def compress_model(func: object):
from openvino._offline_transformations import compress_model_transformation # pylint: disable=import-error,no-name-in-module
compress_model_transformation(func)
def apply_fused_names_cleanup(func: object):
from openvino._offline_transformations import apply_fused_names_cleanup # pylint: disable=import-error,no-name-in-module
apply_fused_names_cleanup(func)
def apply_offline_transformations(func: Model, argv: argparse.Namespace):
from openvino.tools.mo.back.preprocessing import apply_preprocessing # pylint: disable=no-name-in-module,import-error
# Apply preprocessing (mean/scale/reverse_channels/convert_layout/etc)
apply_preprocessing(ov_function=func, argv=argv)
apply_moc_transformations(func)
params_with_custom_types = create_params_with_custom_types(argv.packed_user_shapes)
apply_moc_legacy_transformations(func, params_with_custom_types)
apply_user_transformations(func, parse_transform(argv.transform))
if "compress_fp16" in argv and argv.compress_fp16:
compress_model(func)
apply_fused_names_cleanup(func)
return func
| [
"[email protected]"
] | |
359cf7c3dac736613c7ebff4364f35e0721ed001 | a1bffcd8854e1843e56bb812d4d83b3161a5211e | /tests/unit/modules/network/fortios/test_fortios_firewall_address6_template.py | cad670fda6582598f43a5c2e43e0574e5e04c14d | [] | no_license | goneri/ansible.community | 1a71f9d98c164b77f8ed2ed7f558b4963005ff8f | f26f612dd0a3154050d90b51a75502018c95f6e4 | refs/heads/master | 2020-12-29T07:47:35.353515 | 2020-01-22T17:43:18 | 2020-01-22T17:43:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,805 | py | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible_collections.ansible.community.plugins.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible_collections.ansible.community.plugins.modules import fortios_firewall_address6_template
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible_collections.ansible.community.plugins.modules.fortios_firewall_address6_template.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_firewall_address6_template_creation(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.community.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible_collections.ansible.community.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_address6_template': {
'ip6': 'test_value_3',
'name': 'default_name_4',
'subnet_segment_count': '5'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_address6_template.fortios_firewall(input_data, fos_instance)
expected_data = {
'ip6': 'test_value_3',
'name': 'default_name_4',
'subnet-segment-count': '5'
}
set_method_mock.assert_called_with('firewall', 'address6-template', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_firewall_address6_template_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.community.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible_collections.ansible.community.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_address6_template': {
'ip6': 'test_value_3',
'name': 'default_name_4',
'subnet_segment_count': '5'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_address6_template.fortios_firewall(input_data, fos_instance)
expected_data = {
'ip6': 'test_value_3',
'name': 'default_name_4',
'subnet-segment-count': '5'
}
set_method_mock.assert_called_with('firewall', 'address6-template', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_firewall_address6_template_removal(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.community.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible_collections.ansible.community.plugins.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'firewall_address6_template': {
'ip6': 'test_value_3',
'name': 'default_name_4',
'subnet_segment_count': '5'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_address6_template.fortios_firewall(input_data, fos_instance)
delete_method_mock.assert_called_with('firewall', 'address6-template', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_firewall_address6_template_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.community.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible_collections.ansible.community.plugins.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'firewall_address6_template': {
'ip6': 'test_value_3',
'name': 'default_name_4',
'subnet_segment_count': '5'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_address6_template.fortios_firewall(input_data, fos_instance)
delete_method_mock.assert_called_with('firewall', 'address6-template', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_firewall_address6_template_idempotent(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.community.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible_collections.ansible.community.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_address6_template': {
'ip6': 'test_value_3',
'name': 'default_name_4',
'subnet_segment_count': '5'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_address6_template.fortios_firewall(input_data, fos_instance)
expected_data = {
'ip6': 'test_value_3',
'name': 'default_name_4',
'subnet-segment-count': '5'
}
set_method_mock.assert_called_with('firewall', 'address6-template', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_firewall_address6_template_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible_collections.ansible.community.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible_collections.ansible.community.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_address6_template': {
'random_attribute_not_valid': 'tag',
'ip6': 'test_value_3',
'name': 'default_name_4',
'subnet_segment_count': '5'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_address6_template.fortios_firewall(input_data, fos_instance)
expected_data = {
'ip6': 'test_value_3',
'name': 'default_name_4',
'subnet-segment-count': '5'
}
set_method_mock.assert_called_with('firewall', 'address6-template', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| [
"[email protected]"
] | |
c2fb539bc071787bca42c1d5bdc7550f71769d0f | 7def22f9e61a125a8a02d85018fdc3fa34f4d060 | /superlists/urls.py | 7ba2fb600869500c7397a9c0af482c74b3afb792 | [] | no_license | uglyboxer/superlists | 188e7c659f97e77ebddeba3b07dc1b5bc03c928a | e0cf2e828991f04c4050170c13f9c4b6cc2be0e8 | refs/heads/master | 2021-01-10T02:24:48.031207 | 2015-11-25T00:24:23 | 2015-11-25T00:24:23 | 46,455,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | """superlists URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from lists import views
urlpatterns = [
url(r'^$', views.home_page, name='home'),
url(r'^lists/the-only-list-in-the-world/$', views.view_list, name='view_list'),
url(r'^lists/new$', views.new_list, name='new_list'),
# url(r'^admin/', include(admin.site.urls)),
]
| [
"[email protected]"
] | |
0a9e7a6f204293778dc37164aff561a6817fcb7d | bbae6df729e03314064301f791af50d7f6ebcaa8 | /density_to_files.py | ced429cd23619ed1594d42c890cfbfae85494a64 | [
"BSD-2-Clause"
] | permissive | mbrothers18/lmatools | b240a4890ee7be23aace4b43e4e5e3ddf99470ed | 084137e265c8520322d5bbbd067ffab3e95801f3 | refs/heads/master | 2021-01-18T06:51:33.945343 | 2016-03-07T18:23:07 | 2016-03-07T18:23:07 | 50,527,487 | 0 | 0 | null | 2016-01-27T18:12:27 | 2016-01-27T18:12:26 | null | UTF-8 | Python | false | false | 13,707 | py | import glob
import gc
import numpy as np
from density_tools import unique_vectors
# --------------------------------------------------------------------------
# ----- This section could be replaced with stormdrain.pipeline imports ----
# --------------------------------------------------------------------------
def coroutine(func):
def start(*args,**kwargs):
cr = func(*args,**kwargs)
cr.next()
return cr
return start
@coroutine
def broadcast(targets):
while True:
stuff = (yield)
for target in targets:
target.send(stuff)
del stuff
class Branchpoint(object):
""" Class-based version useful for tracking a changing state or adjusting targets
at a later time. Some '.dot' access overhead this way, of course.
>>> brancher = Branchpoint( [target1, target2, ...] )
Allows for flexible branching by maintaining a set (in the formal sense) of targets.
brancher.targets.append(newtarget)
brancher.targets.remove(existingtarget)
"""
def __init__(self, targets):
""" Accepts a sequence of targets """
self.targets = set(targets) # this perhaps should be a set and not a list, so it remains unique
@coroutine
def broadcast(self):
while True:
stuff = (yield)
for target in self.targets:
target.send(stuff)
del stuff
# class map_projector(object):
# def __init__(self, ctr_lat, ctr_lon, proj_name='eqc'):
# self.mapProj = MapProjection(projection=proj_name, ctrLat=ctr_lat, ctrLon=ctr_lon, lat_ts=ctr_lat, lon_0=ctr_lon)
# self.geoProj = GeographicSystem()
#
# def __call__(self, lon, lat, alt):
# x,y,z = self.mapProj.fromECEF(
# *self.geoProj.toECEF(lon, lat, alt)
# )
# return x, y, z
#
# @coroutine
# def map_projector(ctr_lat, ctr_lon, target, proj_name='eqc'):
# mapProj = MapProjection(projection=proj_name, ctrLat=ctr_lat, ctrLon=ctr_lon, lat_ts=ctr_lat, lon_0=ctr_lon)
# geoProj = GeographicSystem()
# while True:
# lon, lat, alt = (yield)
# x,y,z = self.mapProj.fromECEF(
# *self.geoProj.toECEF(lon, lat, alt)
# )
# target.send((x,y,z))
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
@coroutine
def flash_count_log(logfile, format_string="%s flashes in frame starting at %s"):
""" Write flash count for some frame to a file-like object. File open/close should be handled
by the calling routine."""
# Track flash count for each frame
frame_times = {}
try:
while True:
# Receive list of flashes, frame start time
flashes, frame_start_time = (yield)
n_flashes = len(flashes)
try:
frame_times[frame_start_time] += n_flashes
except KeyError:
# Key doesn't exist, so can't increment flash count
frame_times[frame_start_time] = n_flashes
except GeneratorExit:
all_times = frame_times.keys()
all_times.sort()
for frame_start_time in all_times:
flash_count_status = format_string % (frame_times[frame_start_time], frame_start_time)
logfile.write(flash_count_status+'\n')
@coroutine
def filter_flash(target, min_points=10):
""" Filters flash by minimum number of points.
"""
while True:
evs, flash = (yield) # Receive a flash
if (flash['n_points'] >= 10):
target.send((evs, flash))
del evs, flash
@coroutine
def flashes_to_frames(time_edges, targets, time_key='start', time_edges_datetime=None, flash_counter=None):
""" time_edges_datetime is same len as time_edges but with datetime objects instead of floats.
allows
"""
if time_edges_datetime is None:
# print "Datetime-style time edges not found, using time edges in seconds for flash count label"
time_edges_datetime = time_edges
flash_count_messages = []
assert len(time_edges) == (len(time_edges_datetime))
assert len(time_edges) == (len(targets)+1)
while True:
events, flashes = (yield)
start_times = flashes[time_key]
sort_idx = np.argsort(start_times) #, order=[time_key])
idx = np.searchsorted(start_times[sort_idx], time_edges)
slices = [slice(*i) for i in zip(idx[0:-1], idx[1:])]
for target, s, frame_start_time in zip(targets, slices, time_edges_datetime[:-1]):
these_flashes = flashes[sort_idx][s]
if flash_counter is not None:
flash_counter.send((these_flashes, frame_start_time))
# flash_count_status = "Sending %s flashes to frame starting at %s" % (len(these_flashes), frame_start_time)
# flash_count_messages += flash_count_status
# print flash_count_status
target.send((events, these_flashes))
del events, flashes, start_times, sort_idx, idx, slices
print flash_count_messages
def event_yielder(evs, fls):
for fl in fls:
these_events = evs[evs['flash_id'] == fl['flash_id']]
# if len(these_events) <> fl['n_points']:
# print 'not giving all ', fl['n_points'], ' events? ', these_events.shape
for an_ev in these_events:
yield an_ev
@coroutine
def extract_events_for_flashes(target, flashID_key='flash_id'):
""" Takes a large table of events and grabs only the events belonging to the flashes.
"""
while True:
evs, fls = (yield)
# print 'extracting events'
# event_dtype = evs[0].dtype
event_dtype = evs.dtype
events = np.fromiter( (event_yielder(evs, fls)) , dtype=event_dtype)
# The line below (maybe maybe maybe)
# events = np.fromiter((evs[evs['flash_id'] == fl['flash_id']] for fl in fls), dtype=event_dtype)
# does the same thing as the two following lines, but is 10x slower.
# The 'mapper' could actually be optimized further by calculating it globally, once per events table,
# but this is fast enough and saves having to pass through another variable.
# mapper = dict(zip(evs['flash_id'],evs))
# events = np.fromiter( (mapper[fl['flash_id']] for fl in fls), dtype=event_dtype)
target.send((events, fls))
del events, evs, fls
# @coroutine
# def extract_events(target, flashID_key='flash_id'):
# """ Takes a large table of events and grabs only the events belonging to the flash.
# This is useful if you filter out a bunch of flashes before going to the trouble of
# reading the flashes in.
# """
# while True:
# evs, flash = (yield)
# flash_id = flash[flashID_key]
# event_dtype = evs[0].dtype
# # events = [ev[:] for ev in evs if ev[flashID_key] == flash_id]
# # events = np.asarray(events, dtype=event_dtype)
# # events = evs[:]
# events = evs[evs[flashID_key]==flash_id]
# # events = np.fromiter((ev[:] for ev in evs if ev[flashID_key] == flash_id), dtype=event_dtype)
# target.send((events, flash))
@coroutine
def no_projection(x_coord, y_coord, z_coord, target, use_flashes=False):
while True:
events, flashes = (yield)
if use_flashes==True:
points = flashes
else:
points = events
x,y,z = points[x_coord], points[y_coord], points[z_coord]
target.send((events, flashes, x,y,z))
del events, flashes, x,y,z, points
@coroutine
def project(x_coord, y_coord, z_coord, mapProj, geoProj, target, use_flashes=False):
""" Adds projected coordinates to the flash and events stream"""
while True:
events, flashes = (yield)
if use_flashes==True:
points = flashes
else:
points = events
x,y,z = mapProj.fromECEF(
*geoProj.toECEF(points[x_coord], points[y_coord], points[z_coord])
)
target.send((events, flashes, x,y,z))
del events, flashes, x,y,z, points
@coroutine
def footprint_mean(flash_id_key='flash_id', area_key='area'):
""" Takes x, y, z flash locations and gets
Extent density unique pixels, average all flashes
"""
while True:
events, flash, x,y,z = (yield)
# print 'Doing extent density',
x_i = np.floor( (x-x0)/dx ).astype('int32')
y_i = np.floor( (y-y0)/dy ).astype('int32')
if len(x_i) > 0:
footprints = dict(zip(flash[flash_id_key], flash[area_key]))
# print 'with points numbering', len(x_i)
unq_idx = unique_vectors(x_i, y_i, events['flash_id'])
# if x[unq_idx].shape[0] > 1:
fl_id = events['flash_id'][unq_idx]
areas = [footprints[fi] for fi in fl_id] #puts areas in same order as x[unq_idx], y[unq_idx]
# counts normalized by areas
target.send((x[unq_idx],y[unq_idx],areas))
del footprints, unq_idx, fl_id, areas
# else:
# print ''
del events, flash, x, y, z, x_i, y_i
@coroutine
def point_density(target):
""" Sends event x, y, z location directly
"""
while True:
events, flash, x, y, z = (yield)
# print 'Doing point density',
if len(x) > 0:
print 'with points numbering', len(x)
target.send((x, y, None))
del events, flash ,x,y,z
# else:
# print ''
@coroutine
def extent_density(x0, y0, dx, dy, target, flash_id_key='flash_id', weight_key=None):
""" This function assumes a regular grid in x and y with spacing dx, dy
x0, y0 is the x coordinate of the lower left corner of the lower-left grid cell,
i.e., the lower left node of the grid mesh in cartesian space
Eliminates duplicate points in gridded space and sends the reduced
set of points to the target.
"""
while True:
# assumes x,y,z are in same order as events
events, flash, x,y,z = (yield)
# print 'Doing extent density',
x_i = np.floor( (x-x0)/dx ).astype('int32')
y_i = np.floor( (y-y0)/dy ).astype('int32')
if len(x_i) > 0:
print 'extent with points numbering', len(x_i), ' with weights', weight_key
unq_idx = unique_vectors(x_i, y_i, events[flash_id_key])
# if x[unq_idx].shape[0] > 1:
if weight_key <> None:
weight_lookup = dict(zip(flash[flash_id_key], flash[weight_key]))
weights = [weight_lookup[fi] for fi in events[unq_idx]['flash_id']] #puts weights in same order as x[unq_idx], y[unq_idx]
del weight_lookup
else:
weights = None
target.send((x[unq_idx], y[unq_idx], weights))
del weights, unq_idx
# else:
# print ''
del events, flash, x, y, z, x_i, y_i
@coroutine
def accumulate_points_on_grid(grid, xedge, yedge, out=None, label=''):
assert xedge.shape[0] == grid.shape[0]+1
assert yedge.shape[0] == grid.shape[1]+1
if out == None:
out = {}
# grid = None
try:
while True:
x, y, weights = (yield)
if len(x) > 0:
x = np.atleast_1d(x)
y = np.atleast_1d(y)
print 'accumulating ', len(x), 'points for ', label
count, edges = np.histogramdd((x,y), bins=(xedge, yedge), weights=None, normed=False)
if weights <> None:
# histogramdd sums up weights in each bin for normed=False
total, edges = np.histogramdd((x,y), bins=(xedge, yedge), weights=weights, normed=False)
# return the mean of the weights in each bin
bad = (count <= 0)
count = np.asarray(total, dtype='float32')/count
count[bad] = 0.0
del total, edges, bad
# try:
# count, edges = np.histogramdd((x,y), bins=(xedge, yedge), weights=weights)
# except AttributeError:
# # if x,y are each scalars, need to make 1D arrays
# x = np.asarray((x,))
# y = np.asarray((y,))
# count, edges = np.histogramdd((x,y), bins=(xedge, yedge), weights=weights)
# using += (as opposed to grid = grid + count) is essential
# so that the user can retain a reference to the grid object
# outside this routine.
if grid == None:
grid = count
out['out'] = grid
else:
grid += count.astype(grid.dtype)
del count
del x, y, weights
gc.collect()
except GeneratorExit:
out['out'] = grid
# if __name__ == '__main__':
# do_profile=False
# if do_profile:
# import hotshot
# from hotshot import stats
# prof = hotshot.Profile("density_test_profile")
# prof.runcall(example)
# prof.close()
# s=stats.load("density_test_profile")
# s.sort_stats("time").print_stats()
# else:
# x_coord, y_coord, lons, lats, test_grid = example()
| [
"[email protected]"
] | |
8384fef2b16fdeea0adf1819f3c0122330a5d974 | e828c22a75dfaa7f3643352d4f1b08559a12edca | /ScrapyProject/TenxunSpider/TenxunSpider/pipelines.py | 8c9505521ec8a1ee4c05f3cdfcfa153491efb9f5 | [] | no_license | csgvsjay1000/spider | 9c545fac9c63f89e7503a7c045ce2b83df044e49 | 10f584440d23b0b17a3486cde6cbc39c9d13692e | refs/heads/master | 2022-02-16T20:29:53.384245 | 2019-08-12T03:06:37 | 2019-08-12T03:06:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 551 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json,codecs
class TenxunspiderPipeline(object):
def __init__(self):
self.file=codecs.open('duty_file.json','w',encoding='utf-8')
def process_item(self, item, spider):
line=json.dumps(dict(item),ensure_ascii=False)+'\n'
self.file.write(line)
return item
def close_file(self,spider):
self.file.close() | [
"[email protected]"
] | |
4093735ca3af4a42d6bb8b85700aa046e5c1677c | 52243c4a05a296e7c042663b5942faa47eb66aee | /common_nlp/classifier_legal_phrases_regex.py | e2b84d6ec884fb5abb3de73d4361f5b499d1dbe2 | [
"MIT"
] | permissive | joaoppadua/Pesquisas | fbe0311b59340c041732d6d1f7f4862fa6c53198 | 808d8b0ef9e432e05a4f284ce18778ed8b3acd96 | refs/heads/master | 2023-07-16T02:50:30.846205 | 2021-09-03T13:34:54 | 2021-09-03T13:34:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,048 | py | import re
from regex_classifier_legal_phrases import palavras_interesse
def break_sentences(text, nlp):
# return re.split(r'\w\.\s',text)
text = re.sub(r"\s+", " ", text)
text = re.sub(r"art\.", "art ", text)
text = re.sub(r"fls?\.", "fls ", text)
text = re.sub(r"inc\.", "inc ", text)
doc = nlp(text)
return [sent.text for sent in doc.sents]
def dicionario_frases_tipos():
dic_tipos_frases = {}
for frase, tipo in palavras_interesse.items():
if tipo not in dic_tipos_frases:
dic_tipos_frases[tipo] = []
dic_tipos_frases[tipo].append(r"{}".format(frase))
return dic_tipos_frases
def classifier_legal_phrases_regex(phrase, dic_tipos_frases):
for conj_exp in dic_tipos_frases["decisao"]:
for exp in conj_exp:
if re.search(exp, phrase, re.I):
return "decisao"
for tipo, conj_exp in dic_tipos_frases.items():
for exp in conj_exp:
if re.search(exp, phrase, re.I):
return tipo
return "argumento"
| [
"[email protected]"
] | |
8805a885442eb234a6deec918fabf62a57b7f574 | 2d1649a7a00d49b72ed7e53afa4abb3c9281ce03 | /.history/ParticleFilter/go_to_goal_20190421184527.py | f8de310580a16f067b5c6a35f5dd7e7b18b58cac | [] | no_license | joshzhang5/CS3630Lab6 | 9547dc6c89198e9bb4aebd8359d4feb974082d20 | 69e6df12829e18a211ae850236d74b4d728046ef | refs/heads/master | 2020-05-15T13:59:51.906195 | 2019-04-22T18:21:42 | 2019-04-22T18:21:42 | 182,317,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,731 | py | # Jiaxi Zhang
# George McAlear
try:
import matplotlib
matplotlib.use('TkAgg')
except ImportError:
pass
from skimage import color
import numpy as np
from numpy.linalg import inv
import threading
import time
import sys
import asyncio
from PIL import Image
from markers import detect, annotator
from grid import CozGrid
from gui import GUIWindow
from particle import Particle, Robot
from setting import *
from particle_filter import *
from utils import *
from time import sleep
import time
import cozmo
from cozmo.util import distance_mm, degrees, speed_mmps, Pose
#particle filter functionality
class ParticleFilter:
def __init__(self, grid):
self.particles = Particle.create_random(PARTICLE_COUNT, grid)
self.grid = grid
def update(self, odom, r_marker_list):
# ---------- Motion model update ----------
self.particles = motion_update(self.particles, odom)
# ---------- Sensor (markers) model update ----------
self.particles = measurement_update(self.particles, r_marker_list, self.grid)
# ---------- Show current state ----------
# Try to find current best estimate for display
m_x, m_y, m_h, m_confident = compute_mean_pose(self.particles)
return (m_x, m_y, m_h, m_confident)
class CozmoWarehouseWorker:
def __init__(self, robot: cozmo.robot.Robot):
self.current_arena_pose = None
self.last_robot_pose = robot.pose
self.robot = robot
# start streaming
await robot.set_head_angle(degrees(3)).wait_for_completed()
robot.camera.image_stream_enabled = True
robot.camera.color_image_enabled = False
robot.camera.enable_auto_exposure()
# Obtain the camera intrinsics matrix
fx, fy = robot.camera.config.focal_length.x_y
cx, cy = robot.camera.config.center.x_y
self.camera_settings = np.array([
[fx, 0, cx],
[ 0, fy, cy],
[ 0, 0, 1]
], dtype=np.float)
self.grid = CozGrid("map_arena.json")
self.pf = ParticleFilter(self.grid)
self.gui = GUIWindow(self.grid, show_camera=True)
self.pick_up_pose = Pose(x=4.5, y=20, 0, angle_z=degrees(90))
self.drop_off_pose = Pose(x=21.75, y=13.75, 0, angle_z=degrees(90))
self.drop_off_directions = [Pose(x=3, y=4.5, 0, angle_z=degrees(0)), Pose(x=21.75, y=4.5, 0, angle_z=degrees(90)), self.drop_off_pose]
self.pick_up_directions = [Pose(x=21.75, y=4.5, 0, angle_z=degrees(90)), Pose(x=3, y=4.5, 0, angle_z=degrees(0)), self.pick_up_pose]
async def drive_to(pose):
print("-" * 20)
print("We are at ", self.current_arena_pose, " and we are driving to ", pose)
directions = pose - self.current_arena_pose
print("We will follow these directions: ", directions)
await self.__execute_directions(directions)
print("Directions followed!")
print("-" * 20)
def update_current_arena_pose(self):
local_to_arena_angle_diff = diff_heading_deg(self.last_robot_pose.rotation.degrees, self.current_arena_pose.rotation.degrees)
arena_initial_pose_mm = rotate_point(self.last_robot_pose.position.x, self.last_robot_pose.position.y, local_to_arena_angle_diff)
arena_final_pose_mm = rotate_point(self.robot.pose.position.x, self.robot.pose.position.y )
self.current_arena_pose = self.current_arena_pose + convertPoseFromMmToInches(rotate_point(self.robot.pose, - last_robot_pose)
async def __execute_directions(self, directions):
print("Robot is at: ", self.robot.pose)
await self.robot.turn_in_place(angle=directions.rotation.angle_z).wait_for_completed()
print("ROBOT is at AFTER TURNING to be parallel to X: ", self.robot.pose)
await self.robot.drive_straight(distance=distance_mm(directions.position.x * grid.scale), speed=speed_mmps(80)).wait_for_completed()
print("ROBOT is at AFTER DRIVING in the X direction: ", self.robot.pose)
await self.robot.turn_in_place(angle=degrees(90)).wait_for_completed()
print("ROBOT is at AFTER TURNING to be parallel to Y: ", self.robot.pose)
await self.robot.drive_straight(distance=distance_mm(directions.position.y * grid.scale), speed=speed_mmps(80)).wait_for_completed()
print("ROBOT is at AFTER DRIVING in the Y direction: ", self.robot.pose)
async def localize(self):
# reset our location estimates
conf = False
self.current_arena_pose = Pose(0,0,0,angle_z=degrees(0))
self.pf = ParticleFilter(grid)
# reset lift and head
await self.robot.set_lift_height(0.0).wait_for_completed()
await self.robot.set_head_angle(degrees(3)).wait_for_completed()
while not conf:
# move a little
self.last_robot_pose = self.robot.pose
await self.robot.turn_in_place(angle=degrees(20)).wait_for_completed()
odometry = self.__compute_odometry()
detected_markers, camera_image = await self.__marker_processing()
# update, motion, and measurment with the odometry and marker data
curr_x, curr_y, curr_h, conf = pf.update(odometry, detected_markers)
# update gui
self.gui.show_particles(self.pf.particles)
self.gui.show_mean(curr_x, curr_y, curr_h)
self.gui.show_camera_image(camera_image)
self.gui.updated.set()
self.current_arena_pose = Pose(curr_x , curr_y, 0, angle_z=degrees(curr_h))
def __compute_odometry(self, cvt_inch=True):
'''
Compute the odometry given the current pose of the robot (use robot.pose)
Input:
- curr_pose: a cozmo.robot.Pose representing the robot's current location
- cvt_inch: converts the odometry into grid units
Returns:
- 3-tuple (dx, dy, dh) representing the odometry
'''
last_x, last_y, last_h = self.last_robot_pose.position.x, self.last_robot_pose.position.y, \
self.last_robot_pose.rotation.angle_z.degrees
curr_x, curr_y, curr_h = self.robot.pose.position.x, self.robot.pose.position.y, \
self.robot.pose.rotation.angle_z.degrees
dx, dy = rotate_point(curr_x-last_x, curr_y-last_y, -last_h)
if cvt_inch:
dx, dy = dx / grid.scale, dy / grid.scale
return (dx, dy, diff_heading_deg(curr_h, last_h))
async def __marker_processing(self, show_diagnostic_image=False):
'''
Obtain the visible markers from the current frame from Cozmo's camera.
Since this is an async function, it must be called using await, for example:
markers, camera_image = await marker_processing(robot, camera_settings, show_diagnostic_image=False)
Input:
- robot: cozmo.robot.Robot object
- camera_settings: 3x3 matrix representing the camera calibration settings
- show_diagnostic_image: if True, shows what the marker detector sees after processing
Returns:
- a list of detected markers, each being a 3-tuple (rx, ry, rh)
(as expected by the particle filter's measurement update)
- a PIL Image of what Cozmo's camera sees with marker annotations
'''
# Wait for the latest image from Cozmo
image_event = await self.robot.world.wait_for(cozmo.camera.EvtNewRawCameraImage, timeout=30)
# Convert the image to grayscale
image = np.array(image_event.image)
image = color.rgb2gray(image)
# Detect the markers
markers, diag = detect.detect_markers(image, self.camera_settings, include_diagnostics=True)
# Measured marker list for the particle filter, scaled by the grid scale
marker_list = [marker['xyh'] for marker in markers]
marker_list = [(x/self.grid.scale, y/self.grid.scale, h) for x,y,h in marker_list]
# Annotate the camera image with the markers
if not show_diagnostic_image:
annotated_image = image_event.image.resize((image.shape[1] * 2, image.shape[0] * 2))
annotator.annotate_markers(annotated_image, markers, scale=2)
else:
diag_image = color.gray2rgb(diag['filtered_image'])
diag_image = Image.fromarray(np.uint8(diag_image * 255)).resize((image.shape[1] * 2, image.shape[0] * 2))
annotator.annotate_markers(diag_image, markers, scale=2)
annotated_image = diag_image
return marker_list, annotated_image
async def run(robot: cozmo.robot.Robot):
cosimo = CozmoWarehouseWorker()
cosimo.localize()
cosimo.drive_to(cosimo.pick_up_pose)
directions = goal_pose - last_pose
current_pose = last_pose
last_robot_pose = robot.pose
print("SETTING LAST ROBOT POSE TO: ", last_robot_pose)
print("SO WE GOING TO FOLLOW THIS TO PICKUP ZONE:", directions)
await execute_directions(robot, directions)
await robot.turn_in_place(angle=degrees(45)).wait_for_completed()
print("LAST ROBOT POSE IS: ", last_robot_pose)
print("CURRENT POSE IS:", robot.pose)
print("WE THINK WE MOVED THIS MUCH TO GO TO PICKUP ZONE: ", convertPoseToInches(robot.pose - last_robot_pose))
last_robot_pose = robot.pose
print("COZMO THINKS IT IS AT AFTER DRIVING TO PICKUPZONE: ", current_pose)
# await robot.say_text('Ready for pick up!').wait_for_completed()
while True:
cube = await robot.world.wait_for_observed_light_cube(timeout=30)
print("Found cube: %s" % cube)
await robot.pickup_object(cube, num_retries=5).wait_for_completed()
current_pose = current_pose + convertPoseToInches(robot.pose - last_robot_pose)
print("WE THINK WE MOVED THIS MUCH TO PICK UP CUBE: ", convertPoseToInches(robot.pose - last_robot_pose))
last_robot_pose = robot.pose
#cosimo.update_pose()
print("COZMO THINKS IT IS AT AFTER PICKING UP CUBE: ", current_pose)
#await look_around_until_converge(robot)
# intialize an explorer after localized
#cosimo = CozmoExplorer(robot, x_0=last_pose.position.x, y_0=last_pose.position.y, theta_0=last_pose.rotation.angle_z.radians)
# move robot to pickup zone once localized
#print("COZMO CONVERTED THAT TO A START AT:", cosimo.last_arena_pose)
#current_pose = last_pose
# rrt to drop zone and drop off cube
for destination in drop_off_directions:
directions = convertInchesToPose(destination) - current_pose
await execute_directions(robot,directions)
current_pose = current_pose + convertPoseToInches(robot.pose - last_robot_pose)
print("WE THINK WE MOVED THIS MUCH TO FOLLOW DIRECTIONS: ", convertPoseToInches(robot.pose - last_robot_pose))
last_robot_pose = robot.pose
print("COZMO THINKS IT IS AT AFTER FOLLOWING DIRECTIONS: ", current_pose)
#await cosimo.go_to_goal(goal_node=dropoff_node)
await robot.set_lift_height(0.0).wait_for_completed()
# rrt to just in front of pick up zone
# await cosimo.go_to_goal(goal_node=pickup_node)
class CozmoThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self, daemon=False)
def run(self):
cozmo.robot.Robot.drive_off_charger_on_connect = False # Cozmo can stay on his charger
cozmo.run_program(run, use_viewer=False)
if __name__ == '__main__':
# cozmo thread
cozmo_thread = CozmoThread()
cozmo_thread.start()
# init
gui.show_particles(pf.particles)
gui.show_mean(0, 0, 0)
gui.start() | [
"[email protected]"
] | |
0624acb274bdaacc13d24078b701c3efd0584ce4 | 43ab8c000781c073e6723b3e93013e5f509b84ea | /attractors/3D/Pickover.py | b1953fcdf4020eec7d0559a9c3155b2e2c0695d3 | [
"MIT"
] | permissive | tisnik/fractals | e6e25dbbf5675be5a78f15a2504b25f7de504cf6 | 2e852489cb473394e4dd9103d12d717fed53a51d | refs/heads/master | 2023-04-29T03:17:28.897352 | 2023-04-17T14:01:59 | 2023-04-17T14:01:59 | 202,321,272 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,098 | py | """Výpočet a vykreslení Pickoverova podivného atraktoru v 3D."""
# MIT License
#
# Copyright (c) 2020 Pavel Tišnovský
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Pickover 3D attractor
# In[1]:
# import všech potřebných knihoven - Numpy a Matplotlibu
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
from math import sin, cos
# In[2]:
def pickover(x, y, z, a=2.24, b=0.43, c=-0.65, d=-2.43, e=1.0):
"""Výpočet dalšího bodu Pickoverova atraktoru."""
x_dot = sin(a * y) - z * cos(b * x)
y_dot = z * sin(c * x) - cos(d * y)
z_dot = e * sin(x)
return x_dot, y_dot, z_dot
# celkový počet vypočtených bodů na Lorenzově atraktoru
n = 100000
# prozatím prázdné pole připravené pro výpočet
x = np.zeros((n,))
y = np.zeros((n,))
z = np.zeros((n,))
# počáteční hodnoty
x[0], y[0], z[0] = (0.0, 0.0, 0.0)
# vlastní výpočet atraktoru
for i in range(n - 1):
x_dot, y_dot, z_dot = pickover(x[i], y[i], z[i], 2.24, 0.43, -0.65, -2.43, 0.8)
x[i + 1] = x_dot
y[i + 1] = y_dot
z[i + 1] = z_dot
fig = plt.figure()
ax = fig.gca(projection="3d")
# vykreslení grafu
ax.plot(x, y, z, "o", markersize=0.1)
# zobrazení grafu
plt.tight_layout()
# plt.show()
ch_3d = np.stack((x, y, z))
lim_xyz = [(np.min(ch_3d[ii]), np.max(ch_3d[ii])) for ii in range(3)]
fig2 = plt.figure("3D Coordinates")
plt.subplot(2, 2, 1)
plt.plot(y, x, "o", linewidth=0.75, markersize=0.1)
plt.grid()
plt.xlabel("X")
plt.ylabel("Y")
plt.xlim(lim_xyz[1])
plt.ylim(lim_xyz[0])
plt.subplot(2, 2, 2)
plt.plot(y, z, "o", linewidth=0.75, markersize=0.1)
plt.grid()
plt.xlabel("Z")
plt.ylabel("Y")
plt.xlim(lim_xyz[1])
plt.ylim(lim_xyz[2])
plt.subplot(2, 2, 3)
plt.plot(z, x, "o", linewidth=0.75, markersize=0.1)
plt.grid()
plt.xlabel("X")
plt.ylabel("Z")
plt.xlim(lim_xyz[2])
plt.ylim(lim_xyz[0])
ax = fig2.add_subplot(2, 2, 4, projection="3d")
ax.plot(x, y, z, "o", linewidth=0.7, markersize=0.1)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.tight_layout()
plt.tight_layout()
plt.show()
| [
"[email protected]"
] | |
d6612826db3167305287e00458171da010bc260d | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /3XtrKPMbxAf86QjjS_10.py | e7446c6b7139fcd63536aa540e62ca83c04cc65e | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 81 | py |
def same_case(txt):
return True if txt.isupper() or txt.islower() else False
| [
"[email protected]"
] | |
c69b864f21b100901da859214899a8f7346b41a7 | 1796043fc26c958b8fc45d9c058e382473c4f3af | /Fabio 01 Parte 02/f1_p2_q2_metro_km.py | 9fa23256bbcf9cb486f2779b637e721651f19242 | [] | no_license | Lucakurotaki/ifpi-ads-algoritmos2020 | a69adec27dbb10aceab1bc7038a0b56a760f99d1 | 34d5fedd5825a85404cf9340e42be618981679c1 | refs/heads/master | 2022-03-22T04:44:14.211359 | 2022-02-19T18:48:36 | 2022-02-19T18:48:36 | 246,585,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py | #Entrada
m = int(input("Digite a medida em metro: "))
#Processamento
km = m/1000
#Saída
print("O equivalente de {} m em kilômetros é: {} km.".format(m,km))
| [
"[email protected]"
] | |
48d254577be2dfe57ef8a4d19ba7c41709aced4e | 007ae03cfe5abf41a0ad864eade451141c267cca | /auto-docs/executables/python/legend_style.py | 6f09c9c36aef49e5259e1ae46fe2bd16241aee10 | [] | no_license | VukDukic/documentation | ca96eb1994eeb532fe60c542960b017354bcede1 | 8e5aefdc38788956cfe31d8fe8b4b77cdf790e57 | refs/heads/master | 2021-01-18T09:02:27.034396 | 2015-01-20T23:46:58 | 2015-01-20T23:46:58 | 30,007,728 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | # Learn about API authentication here: {{BASE_URL}}/python/getting-started
# Find your api_key here: {{BASE_URL}}/settings/api
import plotly.plotly as py
from plotly.graph_objs import *
py.sign_in('TestBot', 'r1neazxo9w')
trace1 = Scatter(
x=[0, 1, 2, 3, 4, 5, 6, 7, 8],
y=[0, 3, 6, 4, 5, 2, 3, 5, 4]
)
trace2 = Scatter(
x=[0, 1, 2, 3, 4, 5, 6, 7, 8],
y=[0, 4, 7, 8, 3, 6, 3, 3, 4]
)
data = Data([trace1, trace2])
layout = Layout(
legend=Legend(
x=0,
y=1,
traceorder='normal',
font=Font(
family='sans-serif',
size=12,
color='#000'
),
bgcolor='#E2E2E2',
bordercolor='#FFFFFF',
borderwidth=2
)
)
fig = Figure(data=data, layout=layout)
plot_url = py.plot(fig, filename='legend-style', auto_open=False)
| [
"[email protected]"
] | |
7041bb1082d5baec910b892c4c325173285c5c89 | 572c828b5ef8c17f48cceada08f7a373c2d31e91 | /DES_1_2020/quizG.py | 87ebfa5c0eceb5a8ccb76bec213b13d12a69e9da | [
"MIT"
] | permissive | refeed/PAlgoritmaTRPLA | 4262387011a4942e137674f92c5606eacfec4c1e | e0c79c1d57bee0869e2344651718e8cf053c035f | refs/heads/master | 2023-02-03T19:19:43.210447 | 2020-12-17T03:46:13 | 2020-12-17T03:46:13 | 297,596,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,430 | py | '''
DESKRIPSI SOAL
Kali ini Adi belajar baris-berbaris. Ada N peserta baris-berbaris yang ikut. Seperti biasa, peserta harus berbaris sesuai urutan tinggi badan (yang paling tinggi di belakang). Setelah berbaris dengan urut, peserta akan diberi no urut. No 1 untuk peserta yang paling pendek dan no N untuk peserta paling tinggi. Tinggi badan Adi adalah 165 cm. Tentukan no peserta yang dikenakan oleh Adi (asumsi tidak ada peserta yang tingginya sama).
PETUNJUK MASUKAN
Baris pertama adalah bilangan bulat N yang menunjukkan banyaknya peserta.
N baris berikutnya adalah tinggi dari setiap peserta. Tinggi Adi adalah 165 cm.
PETUNJUK KELUARAN
Bilangan antara 1-N yang menunjukkan no peserta dari Adi.
CONTOH MASUKAN
4
170
158
165
168
CONTOH KELUARAN
2
Adi akan diberi no urut 2 karena tinggi badannya nomor 2 dihitung dari yang paling pendek.
'''
import sys
def merge_sort(list_awal):
list_awal_length = len(list_awal)
if list_awal_length == 1:
return
# Split
tengah = list_awal_length // 2
list_kiri = list_awal[:tengah]
list_kanan = list_awal[tengah:]
merge_sort(list_kiri)
merge_sort(list_kanan)
# Merge
i = 0 # Iterator untuk list kiri
j = 0 # Iterator untuk list kanan
k = 0 # Iterator untuk list awal
while i < len(list_kiri) and j < len(list_kanan):
if list_kiri[i] < list_kanan[j]:
list_awal[k] = list_kiri[i]
i += 1
else:
list_awal[k] = list_kanan[j]
j += 1
k += 1
while i < len(list_kiri):
list_awal[k] = list_kiri[i]
i += 1
k += 1
while j < len(list_kanan):
list_awal[k] = list_kanan[j]
j += 1
k += 1
def binary_search(list_awal, wanted_value, lo, hi):
mid_index = (lo + hi) // 2
mid_value = list_awal[mid_index]
if mid_value == wanted_value:
return mid_index
elif (hi - lo) <= 1:
return -1 # Not found
elif wanted_value > mid_value:
return binary_search(list_awal, wanted_value, mid_index+1, hi)
elif wanted_value < mid_value:
return binary_search(list_awal, wanted_value, lo, mid_index)
if __name__ == "__main__":
input_list = []
list_length = int(input())
for _ in range(list_length):
input_list.append(int(input()))
merge_sort(input_list)
sys.stdout.write(str(binary_search(input_list, 165, 0, list_length-1) + 1))
| [
"[email protected]"
] | |
e882be5c62481dfe01e05dc2076dc494788b242a | 577ab02be20b264023c86af0b1f7598611b1d3bc | /mysite/urls.py | 98af82e60031a42f6647e718f392a3be3cbfc95f | [] | no_license | yoongyo/festigo | 323316d79796e4fc5a6ad42f26c0c8f181100e1e | 73c3f6c619acb70d8031efb62a90fb8d60acbc66 | refs/heads/master | 2020-05-18T09:49:35.673560 | 2019-04-30T21:56:49 | 2019-04-30T21:56:49 | 184,336,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,252 | py | from django.contrib import admin
from django.urls import re_path, include, path
from . import views
from django.conf import settings
from django.conf.urls.static import static
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = [
re_path(r'^$', views.main, name='main'),
re_path(r'^admin/', admin.site.urls),
path('summernote/', include('django_summernote.urls')),
path('accounts/', include(('accounts.urls', 'accounts'), namespace='accounts')),
re_path(r'^festival/', include(('festival.urls', 'festival'), namespace='festival')),
]
urlpatterns += staticfiles_urlpatterns()
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
from django.conf import settings
from django.views import static
static_list = [
(settings.STATIC_URL, settings.STATIC_ROOT),
(settings.MEDIA_URL, settings.MEDIA_ROOT),
]
for (prefix_url, root) in static_list:
if '://' not in prefix_url: # 외부 서버에서 서빙하는 것이 아니라면
prefix_url = prefix_url.lstrip('/')
url_pattern = r'^' + prefix_url + r'(?P<path>.+)'
pattern = re_path(url_pattern, static.serve, kwargs={'document_root': root})
urlpatterns.append(pattern) | [
"[email protected]"
] | |
6ad8c2e578a0cb6ab15070cb8096f5f080fdf4e8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03071/s708387820.py | 520d43f8cf4e6541711b3aeb0e5432df8395660d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 99 | py | c,d=map(int,input().split())
a=max(c,d)
b=min(c,d)
if a-1>=b:
print(a+a-1)
else:
print(a+b) | [
"[email protected]"
] | |
34cd5538526fee07eb981f2d6937354f1f70c30c | 967a5ec427a3332c289659fb3a529a0a0b353dc6 | /sbc_ngs/__init__.py | 78d9b9c0c571e83ad0d46d233c43cacd3ebf84de | [
"MIT"
] | permissive | neilswainston/SequenceGenie | 7807cd29dd3504ee33d98b704a9f57e8cf657997 | 65fce1df487afd2de32e9d3ebc487874e71436bc | refs/heads/master | 2020-05-16T11:36:32.206010 | 2020-03-09T12:15:45 | 2020-03-09T12:15:45 | 183,021,243 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | '''
sbc-ngs (c) University of Manchester 2019
All rights reserved.
@author: neilswainston
'''
| [
"[email protected]"
] | |
e6977d6dd44bf82a13a5f184e4f035a11668c00b | 40c578693ed88b52db0b5d45b581d72507596b76 | /03-Sentiment/CMP462 HW03 Data/python/NaiveBayes.py | 64a8d809395e59f3f32afa240d5ca1963db89188 | [] | no_license | Anwarvic/Dan-Jurafsky--Chris-Manning--NLP | 6eafaee56363007f15f15ca09e4441330b055e18 | 64c4870f9d89952f252eee9a238397220bb43061 | refs/heads/master | 2023-05-10T19:53:19.507959 | 2023-05-02T01:07:06 | 2023-05-02T01:07:06 | 114,249,702 | 44 | 25 | null | null | null | null | UTF-8 | Python | false | false | 9,290 | py | # NLP Programming Assignment #3
# NaiveBayes
# 2012
#
# The area for you to implement is marked with TODO!
# Generally, you should not need to touch things *not* marked TODO
#
# Remember that when you submit your code, it is not run from the command line
# and your main() will *not* be run. To be safest, restrict your changes to
# addExample() and classify() and anything you further invoke from there.
#
from collections import Counter
import os
import math
class NaiveBayes:
class Example:
"""Represents a document with a label. klass is 'pos' or 'neg' by convention.
words is a list of strings.
"""
def __init__(self):
self.klass = ''
self.words = []
class TrainSplit:
"""Represents a set of training/testing data. self.train is a list of Examples, as is self.test.
"""
def __init__(self):
self.train = []
self.test = []
def __init__(self):
"""NaiveBayes initialization"""
self.FILTER_STOP_WORDS = False
self.BOOLEAN = False
self.stopList = set(self.readFile('../data/english.stop'))
self.numFolds = 10
#The following two Counter objects are used to save the words in the positive reviews
#and the negative ones respectively
self.posDict = Counter()
self.negDict = Counter()
#############################################################################
# TODO TODO TODO TODO TODO
def addExample(self, klass, words):
"""
* TODO
* Train your model on an example document with label klass ('pos' or 'neg') and
* words, a list of strings.
* You should store whatever data structures you use for your classifier
* in the NaiveBayes class.
* Returns nothing
"""
if self.BOOLEAN:
for w in set(words):
if klass == 'pos':
self.posDict[w] += 1
elif klass == 'neg':
self.negDict[w] += 1
else:
for w in words:
if klass == 'pos':
self.posDict[w] += 1
elif klass == 'neg':
self.negDict[w] += 1
def classify(self, words):
""" TODO
'words' is a list of words to classify. Return 'pos' or 'neg' classification.
"""
V = len(set(self.posDict.keys() + self.negDict.keys()))
posLen = sum(self.posDict.values())
negLen = sum(self.negDict.values())
#We used 0.5, because we are using 10 fold out of 1000 positive reviews which will be 900 for train and 100 for test.
#And the same with negative reviews.
#So, we have 900 positive reviews and 900 negative reviews which makes the probability of positive reviews is 0.5 of the
#whole corpus. And the same happens with negative reviews
posScore, negScore = math.log(0.5), math.log(0.5)
for w in words:
posScore += math.log( (self.posDict[w]+1.) / (posLen + V) )
negScore += math.log( (self.negDict[w]+1.) / (negLen + V) )
# print posScore, negScore
if posScore > negScore:
return 'pos'
else:
return 'neg'
def filterStopWords(self, words):
"""
* TODO
* Filters stop words found in self.stopList.
"""
output = []
for i in words:
if i in self.stopList or i.strip() == '':
pass
else:
output.append(i)
return output
# TODO TODO TODO TODO TODO
#############################################################################
def readFile(self, fileName):
"""
* Code for reading a file. you probably don't want to modify anything here,
* unless you don't like the way we segment files.
"""
contents = []
f = open(fileName)
for line in f:
contents.append(line)
f.close()
result = self.segmentWords('\n'.join(contents))
return result
def segmentWords(self, s):
"""
* Splits lines on whitespace for file reading
"""
return s.split()
def trainSplit(self, trainDir):
"""Takes in a trainDir, returns one TrainSplit with train set."""
split = self.TrainSplit()
posTrainFileNames = os.listdir('%s/pos/' % trainDir)
negTrainFileNames = os.listdir('%s/neg/' % trainDir)
for fileName in posTrainFileNames:
example = self.Example()
example.words = self.readFile('%s/pos/%s' % (trainDir, fileName))
example.klass = 'pos'
split.train.append(example)
for fileName in negTrainFileNames:
example = self.Example()
example.words = self.readFile('%s/neg/%s' % (trainDir, fileName))
example.klass = 'neg'
split.train.append(example)
return split
def train(self, split):
for example in split.train:
words = example.words
if self.FILTER_STOP_WORDS:
words = self.filterStopWords(words)
self.addExample(example.klass, words)
def buildSplits(self):
"""Builds the splits for training/testing"""
trainData = []
testData = []
splits = []
trainDir = '../data/imdb1'
print '[INFO]\tPerforming %d-fold cross-validation on data set:\t%s' % (self.numFolds, trainDir)
posTrainFileNames = os.listdir('%s/pos/' % trainDir)
negTrainFileNames = os.listdir('%s/neg/' % trainDir)
for fold in range(0, self.numFolds):
split = self.TrainSplit()
for fileName in posTrainFileNames:
example = self.Example()
example.words = self.readFile('%s/pos/%s' % (trainDir, fileName))
example.klass = 'pos'
if fileName[2] == str(fold):
split.test.append(example)
else:
split.train.append(example)
for fileName in negTrainFileNames:
example = self.Example()
example.words = self.readFile('%s/neg/%s' % (trainDir, fileName))
example.klass = 'neg'
if fileName[2] == str(fold):
split.test.append(example)
else:
split.train.append(example)
splits.append(split)
return splits
def main():
nB = NaiveBayes()
splits = nB.buildSplits()
for _ in range(2):
for _ in range(2):
#FILTER_STOP_WORDS and BOOLEAN is False by default
if nB.FILTER_STOP_WORDS:
if nB.BOOLEAN:
print "[INFO]\tTraining Boolean classifier with filtering STOP WORDS:"
else:
print "[INFO]\tTraining classifier with filtering STOP WORDS:"
else:
if nB.BOOLEAN:
print "[INFO]\tTraining Boolean classifier without filtering STOP WORDS:"
else:
print "[INFO]\tTraining classifier without filtering STOP WORDS:"
avgAccuracy = 0.0
fold = 0
for split in splits:
classifier = NaiveBayes()
accuracy = 0.0
for example in split.train:
words = example.words
if nB.FILTER_STOP_WORDS:
words = classifier.filterStopWords(words)
classifier.addExample(example.klass, words)
for example in split.test:
words = example.words
if nB.FILTER_STOP_WORDS:
words = classifier.filterStopWords(words)
guess = classifier.classify(words)
if example.klass == guess:
accuracy += 1.0
accuracy = accuracy / len(split.test)
avgAccuracy += accuracy
print '\t[INFO]\tFold %d Accuracy: %f' % (fold, accuracy)
fold += 1
avgAccuracy = avgAccuracy / fold
print '\t[INFO]\tAccuracy: %f' % avgAccuracy
nB.FILTER_STOP_WORDS = True
nB.BOOLEAN = True
nB.FILTER_STOP_WORDS = False
if __name__ == "__main__":
main()
"""
The output would be:
[INFO] Performing 10-fold cross-validation on data set: ../data/imdb1
[INFO] Training classifier without filtering STOP WORDS:
[INFO] Fold 0 Accuracy: 0.765000
[INFO] Fold 1 Accuracy: 0.850000
[INFO] Fold 2 Accuracy: 0.835000
[INFO] Fold 3 Accuracy: 0.825000
[INFO] Fold 4 Accuracy: 0.815000
[INFO] Fold 5 Accuracy: 0.820000
[INFO] Fold 6 Accuracy: 0.835000
[INFO] Fold 7 Accuracy: 0.825000
[INFO] Fold 8 Accuracy: 0.755000
[INFO] Fold 9 Accuracy: 0.840000
[INFO] Accuracy: 0.816500
[INFO] Training classifier with filtering STOP WORDS:
[INFO] Fold 0 Accuracy: 0.760000
[INFO] Fold 1 Accuracy: 0.825000
[INFO] Fold 2 Accuracy: 0.825000
[INFO] Fold 3 Accuracy: 0.830000
[INFO] Fold 4 Accuracy: 0.800000
[INFO] Fold 5 Accuracy: 0.830000
[INFO] Fold 6 Accuracy: 0.830000
[INFO] Fold 7 Accuracy: 0.835000
[INFO] Fold 8 Accuracy: 0.755000
[INFO] Fold 9 Accuracy: 0.820000
[INFO] Accuracy: 0.811000
[INFO] Training Boolean classifier without filtering STOP WORDS:
[INFO] Fold 0 Accuracy: 0.765000
[INFO] Fold 1 Accuracy: 0.850000
[INFO] Fold 2 Accuracy: 0.835000
[INFO] Fold 3 Accuracy: 0.825000
[INFO] Fold 4 Accuracy: 0.815000
[INFO] Fold 5 Accuracy: 0.820000
[INFO] Fold 6 Accuracy: 0.835000
[INFO] Fold 7 Accuracy: 0.825000
[INFO] Fold 8 Accuracy: 0.755000
[INFO] Fold 9 Accuracy: 0.840000
[INFO] Accuracy: 0.816500
[INFO] Training Boolean classifier with filtering STOP WORDS:
[INFO] Fold 0 Accuracy: 0.760000
[INFO] Fold 1 Accuracy: 0.825000
[INFO] Fold 2 Accuracy: 0.825000
[INFO] Fold 3 Accuracy: 0.830000
[INFO] Fold 4 Accuracy: 0.800000
[INFO] Fold 5 Accuracy: 0.830000
[INFO] Fold 6 Accuracy: 0.830000
[INFO] Fold 7 Accuracy: 0.835000
[INFO] Fold 8 Accuracy: 0.755000
[INFO] Fold 9 Accuracy: 0.820000
[INFO] Accuracy: 0.811000
""" | [
"[email protected]"
] | |
00b1cbab25918390731f619547ab4a86739d7d02 | b212ec9d705fb77cac102dceb12eb668099fd1ae | /oop/inheritance_exercise/zoo/project/mammal.py | 9c5f6fb59f2fe47998752247f7a8b1a440eca991 | [] | no_license | xpucko/Software-University-SoftUni | 20ef91a0be91a8a09a56d9fdc15888f91409de2f | a1fc1781424f025954948299be7f75d317e32dc1 | refs/heads/master | 2023-02-04T11:58:33.068431 | 2020-12-24T00:39:11 | 2020-12-24T00:39:11 | 280,227,310 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | from project.animal import Animal
class Mammal(Animal):
def __init__(self, name):
super().__init__(name) | [
"[email protected]"
] | |
5dbfdb24f29942128a5169a17742ce17ac9fd1e3 | 89de54a5ee1628bceb08d804407e4d43434fa2e0 | /backend/home/migrations/0002_load_initial_data.py | 83735bcf6368729c0f275f5250287e0dd41ff7e4 | [] | no_license | crowdbotics-apps/tkabs-podcast-20287 | 430f4896c2b5ad2ec2c65ad145151569304469d6 | a3980e01490f67f3bcd2af18e970ba949780c3bb | refs/heads/master | 2022-12-13T02:10:38.199166 | 2020-09-15T15:25:22 | 2020-09-15T15:25:22 | 295,769,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,312 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "TKABS - Podcast"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">TKABS - Podcast</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "tkabs-podcast-20287.botics.co"
site_params = {
"name": "TKABS - Podcast",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"[email protected]"
] | |
b366a560cc962b8a35af64cdeb311111005dbad4 | 4dade4f29881e99d8602144744e09ed870bd1034 | /Python/path_algorithms/greedy_best_first.py | d4296e908f40cc690cd6ffa05663905c49b40e48 | [] | no_license | alexbaryzhikov/codebase-archive | 9795347c19a82c098983c6d0fe4959c3162ca868 | c78c189002a26296a552f30078578cc0cf72e426 | refs/heads/master | 2023-02-19T21:54:21.310865 | 2021-01-11T15:47:50 | 2021-01-11T15:47:50 | 106,846,461 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,406 | py | from tkinter import *
import numpy as np
from queue import PriorityQueue
G = {}
MOVE_COST = 10
MOVE_COST_DIAG = 14
class MyCanvas(Canvas):
def __init__(self, master, shape):
self.cwidth = 50
self.shape = shape
Canvas.__init__(self, master, width=shape[0]*self.cwidth, height=shape[1]*self.cwidth)
self.pack()
self.bind("<Button-1>", self.on_mclick)
self.bind("<Button-2>", self.on_mclick)
self.bind("<Button-3>", self.on_mclick)
self.tiles = {}
self.labels = {}
for y in range(0, self.shape[1]):
for x in range(0, self.shape[0]):
x_, y_ = x*self.cwidth, y*self.cwidth
tile = self.create_rectangle(x_+1, y_+1, x_+self.cwidth, y_+self.cwidth, \
fill='white', outline='')
self.tiles[(x, y)] = tile
label = self.create_text((x_+self.cwidth//2, y_+self.cwidth//2), \
fill='black', text='')
self.labels[(x, y)] = label
for node in G['grid'].walls:
self.itemconfig(self.tiles[node], fill = 'gray')
def on_mclick(self, event):
start, goal = G['pathf'].start, G['pathf'].goal
G['pathf'].reset()
self.reset()
x, y = event.x//self.cwidth, event.y//self.cwidth
if event.num == 1:
G['pathf'].set_start((x, y))
if goal:
G['pathf'].set_goal(goal)
update()
elif event.num == 3:
G['pathf'].set_goal((x, y))
if start:
G['pathf'].set_start(start)
update()
elif event.num == 2:
if (x, y) in G['grid'].walls:
G['grid'].walls.remove((x, y))
else:
G['grid'].walls.append((x, y))
self.reset()
def reset(self):
for y in range(0, self.shape[1]):
for x in range(0, self.shape[0]):
self.itemconfig(self.tiles[(x, y)], fill = 'white')
self.itemconfig(self.labels[(x, y)], text = '')
for node in G['grid'].walls:
self.itemconfig(self.tiles[node], fill = 'gray')
class Grid:
def __init__(self, x, y):
self.nodes = np.zeros((x, y), dtype = np.int)
self.walls = []
for i in range(10):
self.walls.append((i, 4))
def neighbors(self, node):
res = [(node[0]+x, node[1]+y) for x in range(-1, 2) for y in range(-1,2) \
if (node[0]+x >= 0) and (node[0]+x < self.nodes.shape[0]) \
and (node[1]+y >= 0) and (node[1]+y < self.nodes.shape[1]) \
and (x != 0 or y != 0)]
res = [node for node in res if node not in self.walls]
return res
class Pathfinder:
def __init__(self):
self.reset()
def reset(self):
self.start = None
self.goal = None
self.frontier = PriorityQueue()
self.came_from = {}
self.done = False
def set_start(self, node):
self.start = node
self.frontier.put((0, node))
self.came_from[node] = None
G['c'].itemconfig(G['c'].tiles[node], fill='#ef7373')
def set_goal(self, node):
self.goal = node
G['c'].itemconfig(G['c'].tiles[node], fill='#785bef')
def expand(self):
if self.frontier.empty(): return # there is no path
current = self.frontier.get()[1]
if current == self.goal: self.get_path(); return
for next_node in G['grid'].neighbors(current):
if next_node not in self.came_from:
priority = manhattan_dist(next_node, self.goal)
self.frontier.put((priority, next_node))
self.came_from[next_node] = current
text = '{}'.format(priority)
G['c'].itemconfig(G['c'].labels[next_node], text=text)
## coloring pass
for x in range(G['grid'].nodes.shape[0]):
for y in range(G['grid'].nodes.shape[1]):
if (x, y) == self.start or (x, y) == self.goal: pass
elif (x, y) in [item[1] for item in self.frontier.queue]:
G['c'].itemconfig(G['c'].tiles[(x, y)], fill='#62aac9')
elif (x, y) in self.came_from.keys():
G['c'].itemconfig(G['c'].tiles[(x, y)], fill='#bee2c8')
def get_path(self):
current = self.goal
path = []
while current != self.start:
path.append(current)
current = self.came_from[current]
path.reverse()
## coloring pass
for node in path[:-1]:
G['c'].itemconfig(G['c'].tiles[node], fill='#82ef5b')
self.done = True
def manhattan_dist(p1, p2):
dx, dy = abs(p1[0]-p2[0]), abs(p1[1]-p2[1])
if dx > dy: dx, dy = dy, dx
return dx*MOVE_COST_DIAG+(dy-dx)*MOVE_COST
def update():
if not G['pathf'].done:
G['pathf'].expand()
root.after(20, update)
def main():
global root
root = Tk()
root.title('Greedy best first search')
root.resizable(0, 0)
G['grid'] = Grid(15, 15)
G['pathf'] = Pathfinder()
G['c'] = MyCanvas(root, G['grid'].nodes.shape)
mainloop()
if __name__ == '__main__': main()
| [
"[email protected]"
] | |
dbea1e2f703f52422cd2ff921f3c74b39d260485 | 67dc60b68e6bba3dd2738af2cf83ff7b528069e0 | /krcal/core/map_functions.py | d869ae973cb0a80cff4060797fb4288f67cfdc02 | [] | no_license | jmbenlloch/KrCalib | 4693e5db288ae917f0c71328d7fc68ee99bfa225 | fe81e68cc6eb38474d739761448d272f88fb3cd9 | refs/heads/master | 2020-04-05T07:43:59.798504 | 2018-11-08T17:33:40 | 2018-11-08T17:33:40 | 156,686,578 | 0 | 0 | null | 2018-11-08T10:02:14 | 2018-11-08T10:02:14 | null | UTF-8 | Python | false | false | 12,278 | py | """Module map_functions.
This module includes functions to manipulate maps.
Notes
-----
KrCalib code depends on the IC library.
Public functions are documented using numpy style convention
Documentation
-------------
Insert documentation https
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas import DataFrame
from . stat_functions import mean_and_std
from . core_functions import NN
from . kr_types import FitParTS
from . kr_types import ASectorMap
from . kr_types import SectorMapTS
from . kr_types import FitMapValue
from typing import List, Tuple, Dict, Sequence, Iterable
from typing import Optional
from numpy import sqrt
import logging
log = logging.getLogger()
def tsmap_from_fmap(fMap : Dict[int, List[FitParTS]])->SectorMapTS:
"""
Obtain a time-series of maps (tsmap) from a fit-map (fmap).
Parameters
----------
fMap
A Dictionary (key = R sector for Rphi maps, X for XYmaps) containing a list of FitParTS
(list runs over Phi wedges for RPhi maps, Y for Ymaps)
class ASectorMap:
chi2 : DataFrame
e0 : DataFrame
lt : DataFrame
e0u : DataFrame
ltu : DataFrame
class FitParTS:
ts : np.array -> contains the time series (integers expressing time differences)
e0 : np.array ->e0 fitted in time series
lt : np.array
c2 : np.array
e0u : np.array
ltu : np.array
Returns
-------
SectorMapTS : Maps in chamber sector containing time series of parameters
class SectorMapTS:
chi2 : Dict[int, List[np.array]]
e0 : Dict[int, List[np.array]]
lt : Dict[int, List[np.array]]
e0u : Dict[int, List[np.array]]
ltu : Dict[int, List[np.array]]
"""
logging.debug(f' --tsmap_from_fmap')
tmChi2 = {}
tmE0 = {}
tmLT = {}
tmE0u = {}
tmLTu = {}
for sector, fps in fMap.items():
logging.debug(f' filling maps for sector {sector}')
tCHI2 = [fp.c2 for fp in fps]
tE0 = [fp.e0 for fp in fps]
tLT = [fp.lt for fp in fps]
tE0u = [fp.e0u for fp in fps]
tLTu = [fp.ltu for fp in fps]
tmChi2[sector] = tCHI2
tmE0 [sector] = tE0
tmLT [sector] = tLT
tmE0u [sector] = tE0u
tmLTu [sector] = tLTu
return SectorMapTS(chi2 = tmChi2,
e0 = tmE0,
lt = tmLT,
e0u = tmE0u,
ltu = tmLTu)
def amap_from_tsmap(tsMap : SectorMapTS,
ts : int = 0,
range_e : Tuple[float, float] = (5000, 13000),
range_chi2 : Tuple[float, float] = (0,3),
range_lt : Tuple[float, float] = (1800, 3000)) ->ASectorMap:
"""
Obtain the correction maps for time bin ts.
Parameters
----------
tsMap
A SectorMapTS : Maps in chamber sector containing time series of parameters
class SectorMapTS:
chi2 : Dict[int, List[np.array]]
e0 : Dict[int, List[np.array]]
lt : Dict[int, List[np.array]]
e0u : Dict[int, List[np.array]]
ltu : Dict[int, List[np.array]]
ts
time bin (an integer starting at 0: if -1 take the average of the series).
range_e
Defines the range of e in pes (e.g, (8000,14000)).
range_chi2
Defines the range of chi2
range_lt
Defines the range of lt in mus.
Returns
-------
A container of maps ASectorMap
class ASectorMap:
chi2 : DataFrame
e0 : DataFrame
lt : DataFrame
e0u : DataFrame
ltu : DataFrame
"""
def fill_map_ts(tsm : Dict[int, List[float]], ts : int):
M = {}
for sector, w in tsm.items():
M[sector] = [v[ts] for v in w]
return M
def fill_maps_av(tsm : Dict[int, List[float]], range_v : Tuple[float, float]):
M = {}
Mu = {}
for sector, w in tsm.items():
T = [mean_and_std(v, range_v) for v in w]
P = list(zip(*T))
M[sector] = P[0]
Mu[sector] = P[1]
return M, Mu
if ts >=0:
mChi2 = fill_map_ts(tsMap.chi2, ts)
mE0 = fill_map_ts(tsMap.e0, ts)
mLT = fill_map_ts(tsMap.lt, ts)
mE0u = fill_map_ts(tsMap.e0u, ts)
mLTu = fill_map_ts(tsMap.ltu, ts)
else:
mChi2, _ = fill_maps_av(tsMap.chi2, range_chi2)
mE0, mE0u = fill_maps_av(tsMap.e0, range_e)
mLT, mLTu = fill_maps_av(tsMap.lt, range_lt)
return ASectorMap(chi2 = pd.DataFrame.from_dict(mChi2),
e0 = pd.DataFrame.from_dict(mE0),
lt = pd.DataFrame.from_dict(mLT),
e0u = pd.DataFrame.from_dict(mE0u),
ltu = pd.DataFrame.from_dict(mLTu))
def map_average(aMaps : List[ASectorMap])->ASectorMap:
"""
Compute average maps from a list of maps.
Parameters
----------
aMaps
A list of containers of maps (a list of ASectorMap)
class ASectorMap:
chi2 : DataFrame
e0 : DataFrame
lt : DataFrame
e0u : DataFrame
ltu : DataFrame
Returns
-------
The average ASectorMap
"""
mapAV = aMaps[0]
chi2 = mapAV.chi2
e0 = mapAV.e0
lt = mapAV.lt
e0u = mapAV.e0u
ltu = mapAV.ltu
for i in range(1, len(aMaps)):
chi2 = chi2.add(aMaps[i].chi2)
e0 = e0. add(aMaps[i].e0)
lt = lt. add(aMaps[i].lt)
e0u = e0u. add(aMaps[i].e0u)
ltu = ltu. add(aMaps[i].ltu)
return ASectorMap(chi2 / len(aMaps),
e0 / len(aMaps),
lt/ len(aMaps),
e0u/ len(aMaps),
ltu/ len(aMaps))
def get_maps_from_tsmap(tsm : SectorMapTS,
times : np.array,
erange : Tuple[float, float] = (2000, 14000),
ltrange : Tuple[float, float] = (500,5000),
c2range : Tuple[float, float] = (0,3))->List[ASectorMap]:
"""
Obtain the correction maps for each time tranch, regularizes the maps and sets relative errors.
Parameters
----------
tsm
A SectorMapTS : Maps in chamber sector containing time series of parameters
class SectorMapTS:
chi2 : Dict[int, List[np.array]]
e0 : Dict[int, List[np.array]]
lt : Dict[int, List[np.array]]
e0u : Dict[int, List[np.array]]
ltu : Dict[int, List[np.array]]
times
an np.array describing the time series.
erange
Defines the range of e in pes (e.g, (8000,14000)).
c2range
Defines the range of chi2
ltrange
Defines the range of lt in mus.
Returns
-------
A list of ASectorMap
class ASectorMap:
chi2 : DataFrame
e0 : DataFrame
lt : DataFrame
e0u : DataFrame
ltu : DataFrame
"""
aMaps = []
for i, _ in enumerate(times):
am = amap_from_tsmap(tsm,
ts = i,
range_e = erange,
range_chi2 = c2range,
range_lt = ltrange)
rmap = regularize_maps(am, erange=erange, ltrange=ltrange, debug=debug)
asm = relative_errors(rmap)
aMaps.append(asm)
return aMaps
def amap_valid_mask(amap : ASectorMap)->ASectorMap:
def valid_mask(df):
vMask ={}
for i in df.columns:
vMask[i] =~np.isnan(df[i])
return pd.DataFrame.from_dict(vMask)
return ASectorMap(chi2 = valid_mask(amap.chi2),
e0 = valid_mask(amap.e0),
lt = valid_mask(amap.lt),
e0u = valid_mask(amap.e0u),
ltu = valid_mask(amap.ltu))
def amap_average(amap : ASectorMap)->FitMapValue:
return ASectorMap(chi2 = amap.chi2.mean().mean(),
e0 = amap.e0.mean().mean(),
lt = amap.lt.mean().mean(),
e0u = amap.e0u.mean().mean(),
ltu = amap.ltu.mean().mean())
def amap_max(amap : ASectorMap)->FitMapValue:
return ASectorMap(chi2 = amap.chi2.max().max(),
e0 = amap.e0.max().max(),
lt = amap.lt.max().max(),
e0u = amap.e0u.max().max(),
ltu = amap.ltu.max().max())
def amap_min(amap : ASectorMap)->FitMapValue:
return ASectorMap(chi2 = amap.chi2.min().min(),
e0 = amap.e0.min().min(),
lt = amap.lt.min().min(),
e0u = amap.e0u.min().min(),
ltu = amap.ltu.min().min())
def amap_replace_nan_by_mean(amap : ASectorMap, amMean : FitMapValue)->ASectorMap:
return ASectorMap(chi2 = amap.chi2.copy().fillna(amMean.chi2),
e0 = amap.e0.copy().fillna(amMean.e0),
lt = amap.lt.copy().fillna(amMean.lt),
e0u = amap.e0u.copy().fillna(amMean.e0u),
ltu = amap.ltu.copy().fillna(amMean.ltu))
def amap_replace_nan_by_zero(amap : ASectorMap)->ASectorMap:
return ASectorMap(chi2 = amap.chi2.copy().fillna(0),
e0 = amap.e0.copy().fillna(0),
lt = amap.lt.copy().fillna(0),
e0u = amap.e0u.copy().fillna(0),
ltu = amap.ltu.copy().fillna(0))
def amap_valid_fraction(vmask: ASectorMap)->FitMapValue:
def count_valid(df):
C = []
for i in df.columns:
C.append(np.count_nonzero(df[i]))
return np.sum(C) /df.size
return FitMapValue(chi2 = count_valid(vmask.chi2),
e0 = count_valid(vmask.e0),
lt = count_valid(vmask.lt),
e0u = count_valid(vmask.e0u),
ltu = count_valid(vmask.ltu))
FitMapValue
def relative_errors(am : ASectorMap)->ASectorMap:
return ASectorMap(chi2 = am.chi2,
e0 = am.e0,
lt = am.lt,
e0u = 100 * am.e0u / am.e0,
ltu = 100 * am.ltu / am.lt)
def regularize_maps(amap : ASectorMap,
erange : Tuple[float, float] = (2000, 14000),
ltrange : Tuple[float, float] = (500,5000))->ASectorMap:
OL = find_outliers(amap.e0, xr=erange)
me0 = set_outliers_to_nan(amap.e0, OL)
me0u = set_outliers_to_nan(amap.e0u, OL)
OL = find_outliers(amap.lt, xr=ltrange)
mlt = set_outliers_to_nan(amap.lt, OL)
mltu = set_outliers_to_nan(amap.ltu, OL)
return ASectorMap(chi2 = amap.chi2,
e0 = me0,
lt = mlt,
e0u = me0u,
ltu = mltu)
def set_outliers_to_nan(dfmap : DataFrame, OL : Dict[int, List[int]])->DataFrame:
newmap = dfmap.copy()
for i, lst in OL.items():
for j in lst:
newmap[i][j] = np.nan
return newmap
def find_outliers(dfmap : DataFrame,
xr : Tuple[float,float])->Dict[int, List[int]]:
OL = {}
v = (xr[1] + xr[0]) / 2
logging.info(f' set nans to average value of interval = {v}')
newmap = (dfmap.copy()).fillna(v)
for i in newmap.columns:
ltc = newmap[i]
gltc = ltc.between(*xr)
lst = list(gltc[gltc==False].index)
if len(lst) > 0:
OL[i] = lst
logging.debug(f'column {i}')
for il in lst:
logging.debug(f'outlier found, index = {il}, value ={ltc[il]}')
return OL
| [
"[email protected]"
] | |
150a4b4cea6b36d6f4771da3f007d667e73a0ecf | dd3a28907c440d18f7c7c13591ad7c3587acdfbf | /juriscraper/pacer/docket_history_report.py | e26eb249f4192d6aeb13f496be7f6b9551a47b0c | [
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | swipswaps/juriscraper | f57aacf731c9c7682100db644f7d22be175890d8 | fec54f7fc53096db16345b35c73aca9a52aaecb2 | refs/heads/master | 2022-02-25T09:42:14.391641 | 2019-07-24T00:30:27 | 2019-07-24T00:30:27 | 198,718,908 | 0 | 0 | NOASSERTION | 2022-02-17T22:50:36 | 2019-07-24T22:42:51 | HTML | UTF-8 | Python | false | false | 10,009 | py | # coding=utf-8
import re
from .docket_report import DocketReport
from .utils import clean_pacer_object, get_nonce_from_form, \
get_pacer_doc_id_from_doc1_url, get_pacer_seq_no_from_doc1_anchor
from ..lib.judge_parsers import normalize_judge_string
from ..lib.log_tools import make_default_logger
from ..lib.string_utils import clean_string, convert_date_string, \
force_unicode, harmonize
from ..lib.utils import previous_and_next
logger = make_default_logger()
date_regex = r'[—\d\-–/]*'
class DocketHistoryReport(DocketReport):
assigned_to_regex = re.compile(r'(.*),\s+presiding', flags=re.IGNORECASE)
referred_to_regex = re.compile(r'(.*),\s+referral', flags=re.IGNORECASE)
date_filed_regex = re.compile(r'[fF]iled:\s+(%s)' % date_regex)
date_last_filing_regex = re.compile(r'last\s+filing:\s+(%s)' % date_regex,
flags=re.IGNORECASE)
date_filed_and_entered_regex = re.compile(r'& Entered:\s+(%s)' % date_regex)
PATH = 'cgi-bin/HistDocQry.pl'
@property
def data(self):
"""Get all the data back from this endpoint."""
if self.is_valid is False:
return {}
data = self.metadata.copy()
data[u'docket_entries'] = self.docket_entries
return data
@property
def metadata(self):
if self._metadata is not None:
return self._metadata
self._set_metadata_values()
data = {
u'court_id': self.court_id,
u'docket_number': self._get_docket_number(),
u'case_name': self._get_case_name(),
u'date_filed': self._get_value(self.date_filed_regex,
self.metadata_values,
cast_to_date=True),
u'date_last_filing': self._get_value(self.date_last_filing_regex,
self.metadata_values,
cast_to_date=True),
u'date_terminated': self._get_value(self.date_terminated_regex,
self.metadata_values,
cast_to_date=True),
u'date_discharged': self._get_value(self.date_discharged_regex,
self.metadata_values,
cast_to_date=True),
u'assigned_to_str': self._get_assigned_judge(),
u'referred_to_str': self._get_judge(self.referred_to_regex),
}
data = clean_pacer_object(data)
self._metadata = data
return data
def query(self, pacer_case_id, query_type="History", order_by='asc',
show_de_descriptions=False):
"""Query the docket history report and return the results. Because of
the way this works, you have to hit PACER twice. Once to get a nonce,
and a second time to make your query.
:param pacer_case_id: The internal PACER case ID for a case.
:param query_type: The type of query placed. Either "History" or
"Documents".
:param show_de_descriptions: Whether to show docket entry descriptions
in the report.
:param order_by: The ordering desired for the results, either 'asc' or
'desc'.
:return: request response object
"""
# Set up and sanity tests
assert self.session is not None, \
"session attribute of DocketHistoryReport cannot be None."
if query_type not in [u'History', u'Documents']:
raise ValueError(u"Invalid value for 'query_type' parameter.")
if show_de_descriptions is not True and show_de_descriptions is not False:
raise ValueError(u"")
if order_by not in ['asc', 'desc']:
raise ValueError(u"Invalid value for 'order_by' parameter.")
logger.info(u'Getting nonce for docket history report with '
u'pacer_case_id: %s' % pacer_case_id)
r = self.session.get('%s?%s' % (self.url, pacer_case_id))
nonce = get_nonce_from_form(r)
query_params = {
u'QueryType': query_type,
u'sort1': order_by,
}
if show_de_descriptions:
query_params['DisplayDktText'] = u'DisplayDktText'
logger.info(u"Querying docket history report for case ID '%s' with "
u"params %s and nonce %s" % (pacer_case_id, query_params,
nonce))
self.response = self.session.post(self.url + '?' + nonce,
data=query_params)
self.parse()
@property
def docket_entries(self):
if self._docket_entries is not None:
return self._docket_entries
docket_header = './/th/text()[contains(., "Description")]'
docket_entry_rows = self.tree.xpath(
'//table[%s]//tr' % docket_header
)[1:] # Skip first row
docket_entries = []
for row in docket_entry_rows:
cells = row.xpath('./td')
if len(cells) == 3:
# Normal row, parse the document_number, date, etc.
de = {}
de[u'document_number'] = clean_string(cells[0].text_content())
if de[u'document_number'] == '':
de[u'document_number'] = None
anchors = cells[0].xpath('.//a')
if len(anchors) == 1:
doc1_url = anchors[0].xpath('./@href')[0]
de[u'pacer_doc_id'] = get_pacer_doc_id_from_doc1_url(
doc1_url)
de[u'pacer_seq_no'] = get_pacer_seq_no_from_doc1_anchor(
anchors[0])
else:
# Unlinked minute entry; may or may not be numbered
de[u'pacer_doc_id'] = None
de[u'pacer_seq_no'] = None
de[u'date_filed'] = self._get_date_filed(cells[1])
de[u'short_description'] = force_unicode(cells[2].text_content())
de[u'description'] = u''
docket_entries.append(de)
elif len(cells) == 1:
# Document long description. Get it, and add it to previous de.
desc = force_unicode(cells[0].text_content())
label = 'Docket Text: '
if desc.startswith(label):
desc = desc[len(label):]
docket_entries[-1]['description'] = desc
# Some docket history entries show the word "doc" instead of an entry
# number. These items aren't on the docket itself, and so for now we
# just skip them.
docket_entries = [de for de in docket_entries if
de['document_number'] is None or
de['document_number'].isdigit()]
docket_entries = clean_pacer_object(docket_entries)
self._docket_entries = docket_entries
return docket_entries
def _get_date_filed(self, cell):
s = clean_string(cell.text_content())
regexes = [self.date_filed_regex, self.date_filed_and_entered_regex]
for regex in regexes:
m = regex.search(s)
if m:
return convert_date_string(m.group(1))
def _set_metadata_values(self):
text_nodes = self.tree.xpath('//center[not(.//table)]//text()')
values = []
for s in text_nodes:
s = clean_string(force_unicode(s))
if s:
values.append(s)
values.append(' '.join(values))
self.metadata_values = values
def _get_case_name(self):
if self.is_bankruptcy:
# Uses both b/c sometimes the bankr. cases have a dist-style docket
# number.
regexes = [self.docket_number_dist_regex,
self.docket_number_bankr_regex]
else:
regexes = [self.docket_number_dist_regex]
matches = set()
# Skip the last value, it's a concat of all previous values and
# isn't needed for case name matching.
for prev, v, nxt in previous_and_next(self.metadata_values[:-1]):
if prev is None:
continue
for regex in regexes:
match = regex.search(prev)
if match:
if self.is_bankruptcy:
return harmonize(v)
for cn_regex in self.case_name_regexes:
cn_match = cn_regex.match(v)
if cn_match:
matches.add(cn_match.group(1))
if len(matches) == 1:
case_name = list(matches)[0]
else:
case_name = u"Unknown Case Title"
return harmonize(case_name)
def _get_docket_number(self):
if self.is_bankruptcy:
# Uses both b/c sometimes the bankr. cases have a dist-style docket
# number.
regexes = [self.docket_number_dist_regex,
self.docket_number_bankr_regex]
else:
regexes = [self.docket_number_dist_regex]
nodes = self.tree.xpath('//center//font[@size="+1"]')
string_nodes = [s.text_content() for s in nodes]
for regex in regexes:
for s in string_nodes:
match = regex.search(s)
if match:
return match.group(1)
def _get_assigned_judge(self):
if self.is_bankruptcy:
# Look for string like "Judge: Michael J. Fox"
for prev, v, nxt in previous_and_next(self.metadata_values[:-1]):
if prev is not None and re.search('Judge:', prev, flags=re.I):
return normalize_judge_string(v)[0]
else:
# Look for string like "Michael J. Fox, presiding"
return self._get_judge(self.assigned_to_regex)
| [
"[email protected]"
] | |
046e48003c5bea511c67085711ae1a3269a8078e | c6fa53212eb03017f9e72fad36dbf705b27cc797 | /SimG4CMS/Calo/test/python/runPhase0_cfg.py | aa9513f0a903b7fd6a2c67f3f7d9056b01e5ae00 | [] | no_license | gem-sw/cmssw | a31fc4ef2233b2157e1e7cbe9a0d9e6c2795b608 | 5893ef29c12b2718b3c1385e821170f91afb5446 | refs/heads/CMSSW_6_2_X_SLHC | 2022-04-29T04:43:51.786496 | 2015-12-16T16:09:31 | 2015-12-16T16:09:31 | 12,892,177 | 2 | 4 | null | 2018-11-22T13:40:31 | 2013-09-17T10:10:26 | C++ | UTF-8 | Python | false | false | 3,839 | py | import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD")
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.load("IOMC.EventVertexGenerators.VtxSmearedGauss_cfi")
process.load("Geometry.HcalCommonData.testPhase0GeometryXML_cfi")
process.load("Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi")
process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi")
process.load("Geometry.HcalCommonData.hcalSimNumberingInitialization_cfi")
process.load("Configuration.StandardSequences.MagneticField_cff")
process.load("Configuration.EventContent.EventContent_cff")
process.load("SimG4Core.Application.g4SimHits_cfi")
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring('cout'),
categories = cms.untracked.vstring('CaloSim', 'G4cout', 'G4cerr',
'HCalGeom', 'HcalSim', 'HFShower',
'SimTrackManager', 'SimG4CoreGeometry'),
debugModules = cms.untracked.vstring('*'),
cout = cms.untracked.PSet(
threshold = cms.untracked.string('DEBUG'),
INFO = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
DEBUG = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
G4cerr = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
G4cout = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
SimTrackManager = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
SimG4CoreGeometry = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
HCalGeom = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
CaloSim = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
HFShower = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
),
HcalSim = cms.untracked.PSet(
limit = cms.untracked.int32(-1)
)
)
)
process.load("IOMC.RandomEngine.IOMC_cff")
process.RandomNumberGeneratorService.generator.initialSeed = 456789
process.RandomNumberGeneratorService.g4SimHits.initialSeed = 9876
process.RandomNumberGeneratorService.VtxSmeared.initialSeed = 123456789
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(2)
)
process.source = cms.Source("EmptySource",
firstRun = cms.untracked.uint32(1),
firstEvent = cms.untracked.uint32(1)
)
process.generator = cms.EDProducer("FlatRandomPtGunProducer",
PGunParameters = cms.PSet(
PartID = cms.vint32(211),
MinEta = cms.double(-3.0),
MaxEta = cms.double(3.0),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
MinPt = cms.double(100.),
MaxPt = cms.double(100.)
),
Verbosity = cms.untracked.int32(0),
AddAntiParticle = cms.bool(False)
)
process.o1 = cms.OutputModule("PoolOutputModule",
process.FEVTSIMEventContent,
fileName = cms.untracked.string('simevent_QGSP_FTFP_BERT_EML.root')
)
process.Timing = cms.Service("Timing")
process.SimpleMemoryCheck = cms.Service("SimpleMemoryCheck",
oncePerEventMode = cms.untracked.bool(True),
showMallocInfo = cms.untracked.bool(True),
dump = cms.untracked.bool(True),
ignoreTotal = cms.untracked.int32(1)
)
process.Tracer = cms.Service("Tracer")
process.common_maximum_timex = cms.PSet(
MaxTrackTime = cms.double(1000.0),
MaxTimeNames = cms.vstring(),
MaxTrackTimes = cms.vdouble()
)
process.p1 = cms.Path(process.generator*process.VtxSmeared*process.g4SimHits)
process.outpath = cms.EndPath(process.o1)
process.g4SimHits.Physics.type = 'SimG4Core/Physics/QGSP_FTFP_BERT_EML'
process.g4SimHits.Physics.Verbosity = 0
| [
"[email protected]"
] | |
ce3a72884799fcfbb6b12ca458cc2c63fc7ba3c0 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03556/s017006854.py | cf4f61bfaa4d7ae384a0da06d7afd45991633e75 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | import numpy as np
for i in range(int(input()), 0, -1):
if np.sqrt(i) == int(np.sqrt(i)):
print(i)
exit() | [
"[email protected]"
] | |
8fa09a09949d2e27f5072dec8771473a7d3045b5 | 5095200e9ca55cd3a37af34ed44448c02e2a1bb5 | /modules/image/text_to_image/disco_diffusion_clip_rn50/resize_right/resize_right.py | 4f6cb94a8ee1bed552a40083c83b3bf1d0f1eb87 | [
"Apache-2.0"
] | permissive | PaddlePaddle/PaddleHub | 8712603ef486c45e83eb0bc5725b0b3ed3ddbbde | b402610a6f0b382a978e82473b541ea1fc6cf09a | refs/heads/develop | 2023-07-24T06:03:13.172978 | 2023-03-28T11:49:55 | 2023-03-28T11:49:55 | 162,672,577 | 12,914 | 2,239 | Apache-2.0 | 2023-07-06T21:38:19 | 2018-12-21T06:00:48 | Python | UTF-8 | Python | false | false | 18,318 | py | import warnings
from fractions import Fraction
from math import ceil
from typing import Tuple
import disco_diffusion_clip_rn50.resize_right.interp_methods as interp_methods
class NoneClass:
pass
try:
import paddle
from paddle import nn
nnModuleWrapped = nn.Layer
except ImportError:
warnings.warn('No PyTorch found, will work only with Numpy')
paddle = None
nnModuleWrapped = NoneClass
try:
import numpy
import numpy as np
except ImportError:
warnings.warn('No Numpy found, will work only with PyTorch')
numpy = None
if numpy is None and paddle is None:
raise ImportError("Must have either Numpy or PyTorch but both not found")
def resize(input,
scale_factors=None,
out_shape=None,
interp_method=interp_methods.cubic,
support_sz=None,
antialiasing=True,
by_convs=False,
scale_tolerance=None,
max_numerator=10,
pad_mode='constant'):
# get properties of the input tensor
in_shape, n_dims = input.shape, input.ndim
# fw stands for framework that can be either numpy or paddle,
# determined by the input type
fw = numpy if type(input) is numpy.ndarray else paddle
eps = np.finfo(np.float32).eps if fw == numpy else paddle.to_tensor(np.finfo(np.float32).eps)
device = input.place if fw is paddle else None
# set missing scale factors or output shapem one according to another,
# scream if both missing. this is also where all the defults policies
# take place. also handling the by_convs attribute carefully.
scale_factors, out_shape, by_convs = set_scale_and_out_sz(in_shape, out_shape, scale_factors, by_convs,
scale_tolerance, max_numerator, eps, fw)
# sort indices of dimensions according to scale of each dimension.
# since we are going dim by dim this is efficient
sorted_filtered_dims_and_scales = [(dim, scale_factors[dim], by_convs[dim], in_shape[dim], out_shape[dim])
for dim in sorted(range(n_dims), key=lambda ind: scale_factors[ind])
if scale_factors[dim] != 1.]
# unless support size is specified by the user, it is an attribute
# of the interpolation method
if support_sz is None:
support_sz = interp_method.support_sz
# output begins identical to input and changes with each iteration
output = input
# iterate over dims
for (dim, scale_factor, dim_by_convs, in_sz, out_sz) in sorted_filtered_dims_and_scales:
# STEP 1- PROJECTED GRID: The non-integer locations of the projection
# of output pixel locations to the input tensor
projected_grid = get_projected_grid(in_sz, out_sz, scale_factor, fw, dim_by_convs, device)
# STEP 1.5: ANTIALIASING- If antialiasing is taking place, we modify
# the window size and the interpolation method (see inside function)
cur_interp_method, cur_support_sz = apply_antialiasing_if_needed(interp_method, support_sz, scale_factor,
antialiasing)
# STEP 2- FIELDS OF VIEW: for each output pixels, map the input pixels
# that influence it. Also calculate needed padding and update grid
# accoedingly
field_of_view = get_field_of_view(projected_grid, cur_support_sz, fw, eps, device)
# STEP 2.5- CALCULATE PAD AND UPDATE: according to the field of view,
# the input should be padded to handle the boundaries, coordinates
# should be updated. actual padding only occurs when weights are
# aplied (step 4). if using by_convs for this dim, then we need to
# calc right and left boundaries for each filter instead.
pad_sz, projected_grid, field_of_view = calc_pad_sz(in_sz, out_sz, field_of_view, projected_grid, scale_factor,
dim_by_convs, fw, device)
# STEP 3- CALCULATE WEIGHTS: Match a set of weights to the pixels in
# the field of view for each output pixel
weights = get_weights(cur_interp_method, projected_grid, field_of_view)
# STEP 4- APPLY WEIGHTS: Each output pixel is calculated by multiplying
# its set of weights with the pixel values in its field of view.
# We now multiply the fields of view with their matching weights.
# We do this by tensor multiplication and broadcasting.
# if by_convs is true for this dim, then we do this action by
# convolutions. this is equivalent but faster.
if not dim_by_convs:
output = apply_weights(output, field_of_view, weights, dim, n_dims, pad_sz, pad_mode, fw)
else:
output = apply_convs(output, scale_factor, in_sz, out_sz, weights, dim, pad_sz, pad_mode, fw)
return output
def get_projected_grid(in_sz, out_sz, scale_factor, fw, by_convs, device=None):
# we start by having the ouput coordinates which are just integer locations
# in the special case when usin by_convs, we only need two cycles of grid
# points. the first and last.
grid_sz = out_sz if not by_convs else scale_factor.numerator
out_coordinates = fw_arange(grid_sz, fw, device)
# This is projecting the ouput pixel locations in 1d to the input tensor,
# as non-integer locations.
# the following fomrula is derived in the paper
# "From Discrete to Continuous Convolutions" by Shocher et al.
return (out_coordinates / float(scale_factor) + (in_sz - 1) / 2 - (out_sz - 1) / (2 * float(scale_factor)))
def get_field_of_view(projected_grid, cur_support_sz, fw, eps, device):
# for each output pixel, map which input pixels influence it, in 1d.
# we start by calculating the leftmost neighbor, using half of the window
# size (eps is for when boundary is exact int)
left_boundaries = fw_ceil(projected_grid - cur_support_sz / 2 - eps, fw)
# then we simply take all the pixel centers in the field by counting
# window size pixels from the left boundary
ordinal_numbers = fw_arange(ceil(cur_support_sz - eps), fw, device)
return left_boundaries[:, None] + ordinal_numbers
def calc_pad_sz(in_sz, out_sz, field_of_view, projected_grid, scale_factor, dim_by_convs, fw, device):
if not dim_by_convs:
# determine padding according to neighbor coords out of bound.
# this is a generalized notion of padding, when pad<0 it means crop
pad_sz = [-field_of_view[0, 0].item(), field_of_view[-1, -1].item() - in_sz + 1]
# since input image will be changed by padding, coordinates of both
# field_of_view and projected_grid need to be updated
field_of_view += pad_sz[0]
projected_grid += pad_sz[0]
else:
# only used for by_convs, to calc the boundaries of each filter the
# number of distinct convolutions is the numerator of the scale factor
num_convs, stride = scale_factor.numerator, scale_factor.denominator
# calculate left and right boundaries for each conv. left can also be
# negative right can be bigger than in_sz. such cases imply padding if
# needed. however if# both are in-bounds, it means we need to crop,
# practically apply the conv only on part of the image.
left_pads = -field_of_view[:, 0]
# next calc is tricky, explanation by rows:
# 1) counting output pixels between the first position of each filter
# to the right boundary of the input
# 2) dividing it by number of filters to count how many 'jumps'
# each filter does
# 3) multiplying by the stride gives us the distance over the input
# coords done by all these jumps for each filter
# 4) to this distance we add the right boundary of the filter when
# placed in its leftmost position. so now we get the right boundary
# of that filter in input coord.
# 5) the padding size needed is obtained by subtracting the rightmost
# input coordinate. if the result is positive padding is needed. if
# negative then negative padding means shaving off pixel columns.
right_pads = (((out_sz - fw_arange(num_convs, fw, device) - 1) # (1)
// num_convs) # (2)
* stride # (3)
+ field_of_view[:, -1] # (4)
- in_sz + 1) # (5)
# in the by_convs case pad_sz is a list of left-right pairs. one per
# each filter
pad_sz = list(zip(left_pads, right_pads))
return pad_sz, projected_grid, field_of_view
def get_weights(interp_method, projected_grid, field_of_view):
# the set of weights per each output pixels is the result of the chosen
# interpolation method applied to the distances between projected grid
# locations and the pixel-centers in the field of view (distances are
# directed, can be positive or negative)
weights = interp_method(projected_grid[:, None] - field_of_view)
# we now carefully normalize the weights to sum to 1 per each output pixel
sum_weights = weights.sum(1, keepdim=True)
sum_weights[sum_weights == 0] = 1
return weights / sum_weights
def apply_weights(input, field_of_view, weights, dim, n_dims, pad_sz, pad_mode, fw):
# for this operation we assume the resized dim is the first one.
# so we transpose and will transpose back after multiplying
tmp_input = fw_swapaxes(input, dim, 0, fw)
# apply padding
tmp_input = fw_pad(tmp_input, fw, pad_sz, pad_mode)
# field_of_view is a tensor of order 2: for each output (1d location
# along cur dim)- a list of 1d neighbors locations.
# note that this whole operations is applied to each dim separately,
# this is why it is all in 1d.
# neighbors = tmp_input[field_of_view] is a tensor of order image_dims+1:
# for each output pixel (this time indicated in all dims), these are the
# values of the neighbors in the 1d field of view. note that we only
# consider neighbors along the current dim, but such set exists for every
# multi-dim location, hence the final tensor order is image_dims+1.
paddle.device.cuda.empty_cache()
neighbors = tmp_input[field_of_view]
# weights is an order 2 tensor: for each output location along 1d- a list
# of weights matching the field of view. we augment it with ones, for
# broadcasting, so that when multiplies some tensor the weights affect
# only its first dim.
tmp_weights = fw.reshape(weights, (*weights.shape, *[1] * (n_dims - 1)))
# now we simply multiply the weights with the neighbors, and then sum
# along the field of view, to get a single value per out pixel
tmp_output = (neighbors * tmp_weights).sum(1)
# we transpose back the resized dim to its original position
return fw_swapaxes(tmp_output, 0, dim, fw)
def apply_convs(input, scale_factor, in_sz, out_sz, weights, dim, pad_sz, pad_mode, fw):
# for this operations we assume the resized dim is the last one.
# so we transpose and will transpose back after multiplying
input = fw_swapaxes(input, dim, -1, fw)
# the stride for all convs is the denominator of the scale factor
stride, num_convs = scale_factor.denominator, scale_factor.numerator
# prepare an empty tensor for the output
tmp_out_shape = list(input.shape)
tmp_out_shape[-1] = out_sz
tmp_output = fw_empty(tuple(tmp_out_shape), fw, input.device)
# iterate over the conv operations. we have as many as the numerator
# of the scale-factor. for each we need boundaries and a filter.
for conv_ind, (pad_sz, filt) in enumerate(zip(pad_sz, weights)):
# apply padding (we pad last dim, padding can be negative)
pad_dim = input.ndim - 1
tmp_input = fw_pad(input, fw, pad_sz, pad_mode, dim=pad_dim)
# apply convolution over last dim. store in the output tensor with
# positional strides so that when the loop is comlete conv results are
# interwind
tmp_output[..., conv_ind::num_convs] = fw_conv(tmp_input, filt, stride)
return fw_swapaxes(tmp_output, -1, dim, fw)
def set_scale_and_out_sz(in_shape, out_shape, scale_factors, by_convs, scale_tolerance, max_numerator, eps, fw):
# eventually we must have both scale-factors and out-sizes for all in/out
# dims. however, we support many possible partial arguments
if scale_factors is None and out_shape is None:
raise ValueError("either scale_factors or out_shape should be "
"provided")
if out_shape is not None:
# if out_shape has less dims than in_shape, we defaultly resize the
# first dims for numpy and last dims for paddle
out_shape = (list(out_shape) +
list(in_shape[len(out_shape):]) if fw is numpy else list(in_shape[:-len(out_shape)]) +
list(out_shape))
if scale_factors is None:
# if no scale given, we calculate it as the out to in ratio
# (not recomended)
scale_factors = [out_sz / in_sz for out_sz, in_sz in zip(out_shape, in_shape)]
if scale_factors is not None:
# by default, if a single number is given as scale, we assume resizing
# two dims (most common are images with 2 spatial dims)
scale_factors = (scale_factors if isinstance(scale_factors, (list, tuple)) else [scale_factors, scale_factors])
# if less scale_factors than in_shape dims, we defaultly resize the
# first dims for numpy and last dims for paddle
scale_factors = (list(scale_factors) + [1] * (len(in_shape) - len(scale_factors)) if fw is numpy else [1] *
(len(in_shape) - len(scale_factors)) + list(scale_factors))
if out_shape is None:
# when no out_shape given, it is calculated by multiplying the
# scale by the in_shape (not recomended)
out_shape = [ceil(scale_factor * in_sz) for scale_factor, in_sz in zip(scale_factors, in_shape)]
# next part intentionally after out_shape determined for stability
# we fix by_convs to be a list of truth values in case it is not
if not isinstance(by_convs, (list, tuple)):
by_convs = [by_convs] * len(out_shape)
# next loop fixes the scale for each dim to be either frac or float.
# this is determined by by_convs and by tolerance for scale accuracy.
for ind, (sf, dim_by_convs) in enumerate(zip(scale_factors, by_convs)):
# first we fractionaize
if dim_by_convs:
frac = Fraction(1 / sf).limit_denominator(max_numerator)
frac = Fraction(numerator=frac.denominator, denominator=frac.numerator)
# if accuracy is within tolerance scale will be frac. if not, then
# it will be float and the by_convs attr will be set false for
# this dim
if scale_tolerance is None:
scale_tolerance = eps
if dim_by_convs and abs(frac - sf) < scale_tolerance:
scale_factors[ind] = frac
else:
scale_factors[ind] = float(sf)
by_convs[ind] = False
return scale_factors, out_shape, by_convs
def apply_antialiasing_if_needed(interp_method, support_sz, scale_factor, antialiasing):
# antialiasing is "stretching" the field of view according to the scale
# factor (only for downscaling). this is low-pass filtering. this
# requires modifying both the interpolation (stretching the 1d
# function and multiplying by the scale-factor) and the window size.
scale_factor = float(scale_factor)
if scale_factor >= 1.0 or not antialiasing:
return interp_method, support_sz
cur_interp_method = (lambda arg: scale_factor * interp_method(scale_factor * arg))
cur_support_sz = support_sz / scale_factor
return cur_interp_method, cur_support_sz
def fw_ceil(x, fw):
if fw is numpy:
return fw.int_(fw.ceil(x))
else:
return paddle.cast(x.ceil(), dtype='int64')
def fw_floor(x, fw):
if fw is numpy:
return fw.int_(fw.floor(x))
else:
return paddle.cast(x.floor(), dtype='int64')
def fw_cat(x, fw):
if fw is numpy:
return fw.concatenate(x)
else:
return fw.concat(x)
def fw_swapaxes(x, ax_1, ax_2, fw):
if fw is numpy:
return fw.swapaxes(x, ax_1, ax_2)
else:
if ax_1 == -1:
ax_1 = len(x.shape) - 1
if ax_2 == -1:
ax_2 = len(x.shape) - 1
perm0 = list(range(len(x.shape)))
temp = ax_1
perm0[temp] = ax_2
perm0[ax_2] = temp
return fw.transpose(x, perm0)
def fw_pad(x, fw, pad_sz, pad_mode, dim=0):
if pad_sz == (0, 0):
return x
if fw is numpy:
pad_vec = [(0, 0)] * x.ndim
pad_vec[dim] = pad_sz
return fw.pad(x, pad_width=pad_vec, mode=pad_mode)
else:
if x.ndim < 3:
x = x[None, None, ...]
pad_vec = [0] * ((x.ndim - 2) * 2)
pad_vec[0:2] = pad_sz
return fw_swapaxes(fw.nn.functional.pad(fw_swapaxes(x, dim, -1, fw), pad=pad_vec, mode=pad_mode), dim, -1, fw)
def fw_conv(input, filter, stride):
# we want to apply 1d conv to any nd array. the way to do it is to reshape
# the input to a 4D tensor. first two dims are singeletons, 3rd dim stores
# all the spatial dims that we are not convolving along now. then we can
# apply conv2d with a 1xK filter. This convolves the same way all the other
# dims stored in the 3d dim. like depthwise conv over these.
# TODO: numpy support
reshaped_input = input.reshape(1, 1, -1, input.shape[-1])
reshaped_output = paddle.nn.functional.conv2d(reshaped_input, filter.view(1, 1, 1, -1), stride=(1, stride))
return reshaped_output.reshape(*input.shape[:-1], -1)
def fw_arange(upper_bound, fw, device):
if fw is numpy:
return fw.arange(upper_bound)
else:
return fw.arange(upper_bound)
def fw_empty(shape, fw, device):
if fw is numpy:
return fw.empty(shape)
else:
return fw.empty(shape=shape)
| [
"[email protected]"
] | |
08a025d81f80adffff746115fa919a21066a3bdd | a0fb29f99a852089193e4cc9a11e7263dc3f8b5f | /mayan/apps/documents/serializers/document_file_serializers.py | e8d6fe80d51c51303bbc2cb48ba61d4ae2e18e07 | [
"Apache-2.0"
] | permissive | ikang9712/Mayan-EDMS | 0e22a944d63657cea59c78023b604a01a622b52a | d6e57e27a89805329fe0c5582caa8e17882d94e6 | refs/heads/master | 2023-07-28T19:41:55.269513 | 2021-09-07T14:16:14 | 2021-09-07T14:16:14 | 402,884,683 | 1 | 0 | NOASSERTION | 2021-09-03T20:00:09 | 2021-09-03T20:00:09 | null | UTF-8 | Python | false | false | 4,403 | py | from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from mayan.apps.rest_api.relations import MultiKwargHyperlinkedIdentityField
from mayan.apps.rest_api.serializer_mixins import CreateOnlyFieldSerializerMixin
from ..literals import DOCUMENT_FILE_ACTION_PAGE_CHOICES
from ..models.document_file_models import DocumentFile
from ..models.document_file_page_models import DocumentFilePage
class DocumentFileSerializer(
CreateOnlyFieldSerializerMixin, serializers.HyperlinkedModelSerializer
):
action = serializers.ChoiceField(
choices=DOCUMENT_FILE_ACTION_PAGE_CHOICES
)
document_url = serializers.HyperlinkedIdentityField(
lookup_url_kwarg='document_id',
view_name='rest_api:document-detail'
)
download_url = MultiKwargHyperlinkedIdentityField(
view_kwargs=(
{
'lookup_field': 'document_id',
'lookup_url_kwarg': 'document_id',
},
{
'lookup_field': 'pk',
'lookup_url_kwarg': 'document_file_id',
},
),
view_name='rest_api:documentfile-download'
)
file_new = serializers.FileField(
help_text=_('Binary content for the new file.'),
use_url=False
)
page_list_url = MultiKwargHyperlinkedIdentityField(
view_kwargs=(
{
'lookup_field': 'document_id',
'lookup_url_kwarg': 'document_id',
},
{
'lookup_field': 'pk',
'lookup_url_kwarg': 'document_file_id',
},
),
view_name='rest_api:documentfilepage-list'
)
size = serializers.SerializerMethodField()
url = MultiKwargHyperlinkedIdentityField(
view_kwargs=(
{
'lookup_field': 'document_id',
'lookup_url_kwarg': 'document_id',
},
{
'lookup_field': 'pk',
'lookup_url_kwarg': 'document_file_id',
},
),
view_name='rest_api:documentfile-detail'
)
class Meta:
create_only_fields = ('action', 'file_new',)
extra_kwargs = {
'file': {'use_url': False},
}
fields = (
'action', 'checksum', 'comment', 'document_url', 'download_url', 'encoding',
'file', 'filename', 'file_new', 'id', 'mimetype', 'page_list_url',
'size', 'timestamp', 'url'
)
model = DocumentFile
read_only_fields = ('document', 'file', 'size')
def get_size(self, instance):
return instance.size
class DocumentFilePageSerializer(serializers.HyperlinkedModelSerializer):
document_file_url = MultiKwargHyperlinkedIdentityField(
view_kwargs=(
{
'lookup_field': 'document_file.document.pk',
'lookup_url_kwarg': 'document_id',
},
{
'lookup_field': 'document_file_id',
'lookup_url_kwarg': 'document_file_id',
}
),
view_name='rest_api:documentfile-detail'
)
image_url = MultiKwargHyperlinkedIdentityField(
view_kwargs=(
{
'lookup_field': 'document_file.document.pk',
'lookup_url_kwarg': 'document_id',
},
{
'lookup_field': 'document_file_id',
'lookup_url_kwarg': 'document_file_id',
},
{
'lookup_field': 'pk',
'lookup_url_kwarg': 'document_file_page_id',
}
),
view_name='rest_api:documentfilepage-image'
)
url = MultiKwargHyperlinkedIdentityField(
view_kwargs=(
{
'lookup_field': 'document_file.document.pk',
'lookup_url_kwarg': 'document_id',
},
{
'lookup_field': 'document_file_id',
'lookup_url_kwarg': 'document_file_id',
},
{
'lookup_field': 'pk',
'lookup_url_kwarg': 'document_file_page_id',
}
),
view_name='rest_api:documentfilepage-detail'
)
class Meta:
fields = (
'document_file_url', 'id', 'image_url', 'page_number', 'url'
)
model = DocumentFilePage
| [
"[email protected]"
] | |
8348c1ef6bf74078986bbe932cf0607094123add | f7f58aa4ea9ec78b20532971ddebe1e3d985dc23 | /practica11/demo/apps/home/migrations/0001_initial.py | 8e2f4d3cff30b21377b2d4c6bd257cf6e022d8e4 | [] | no_license | guille1194/Django-Practices | 10b9ff4817d41cb086e198c07bb82aee201fb049 | 738cbfdd4a12089d93cd68a0cde8653c490e7fd9 | refs/heads/master | 2021-03-08T19:30:11.229921 | 2016-05-23T05:38:53 | 2016-05-23T05:38:53 | 59,388,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,806 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Cursos',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('numero_curso', models.IntegerField(unique=True)),
('curso', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='HorarioProfesionista',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
),
migrations.CreateModel(
name='Horarios',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('turno', models.CharField(max_length=1, choices=[(b'M', b'Matutino 6:00 - 14:00'), (b'V', b'Vespertino 14:00 - 22:00'), (b'N', b'Nocturno 22:00 - 6:00')])),
('curso', models.ForeignKey(to='home.Cursos')),
],
options={
'ordering': ['curso__numero_curso'],
},
),
migrations.CreateModel(
name='Paciente',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre_paciente', models.CharField(max_length=99)),
('apellido_paciente', models.CharField(max_length=99)),
('num_expediente', models.IntegerField()),
('area', models.CharField(max_length=30)),
('fecha_ingreso', models.DateField(default=django.utils.timezone.now)),
('fecha_conclusion', models.DateField(default=django.utils.timezone.now)),
('evaluacion_completa', models.CharField(max_length=2)),
('reportes', models.CharField(max_length=2)),
('diagnostico', models.CharField(max_length=45)),
('fecha_nacimiento', models.DateField(default=django.utils.timezone.now)),
('edad_ingreso', models.IntegerField()),
('telefono', models.IntegerField()),
('email', models.EmailField(max_length=254)),
('genero', models.CharField(max_length=1, choices=[(b'M', b'Masculino'), (b'F', b'Femenino')])),
('perfil_usuario', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['perfil_usuario'],
'permissions': (('puede_ser_paciente', 'Puede ser paciente'),),
},
),
migrations.CreateModel(
name='Profesionista',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('nombre_profesionista', models.CharField(max_length=68)),
('apellido_profesionista', models.CharField(max_length=68)),
('reportes', models.CharField(max_length=2)),
('horario', models.CharField(max_length=50)),
('telefono', models.IntegerField()),
('email', models.EmailField(max_length=254)),
('slug', models.SlugField(null=True, blank=True)),
('curso', models.ForeignKey(to='home.Cursos')),
('perfil_usuario', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
'permissions': (('puede_hacer_cosas', 'Puede hacer cosas'),),
},
),
migrations.CreateModel(
name='ProfesionistaPaciente',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('pacientes', models.ManyToManyField(to='home.Paciente', blank=True)),
('profesionista', models.OneToOneField(to='home.Profesionista')),
],
),
migrations.AddField(
model_name='horarioprofesionista',
name='horario',
field=models.OneToOneField(to='home.Horarios'),
),
migrations.AddField(
model_name='horarioprofesionista',
name='profesionista',
field=models.ForeignKey(to='home.Profesionista'),
),
migrations.AlterUniqueTogether(
name='horarios',
unique_together=set([('curso', 'turno')]),
),
]
| [
"[email protected]"
] | |
35c3ec42b0bed2b22113637433c0ccd79369c7e1 | b08b5932c92824b592ac15e73fdffc79a7da18f3 | /cauldron/cli/sync/comm.py | c66d111478c4100ba8854e7e5bc701845c5a0ee9 | [
"MIT"
] | permissive | mlund01/cauldron | 1de7426484d429703382c068c9704929e6c25bad | 9a51cad2e5d528727151e9b60fd5be6a37b70273 | refs/heads/master | 2021-07-10T20:35:14.376399 | 2017-09-30T13:53:31 | 2017-09-30T13:53:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,019 | py | import requests
from requests import Response as HttpResponse
from cauldron import environ
def assemble_url(
endpoint: str,
remote_connection: 'environ.RemoteConnection' = None
) -> str:
"""
Assembles a fully-resolved remote connection URL from the given endpoint
and remote_connection structure. If the remote_connection is omitted, the
global remote_connection object stored in the environ module will be
used in its place.
:param endpoint:
The endpoint for the API call
:param remote_connection:
The remote connection definition data structure
:return:
The fully-resolved URL for the given endpoint
"""
url_root = (
remote_connection.url
if remote_connection else
environ.remote_connection.url
)
url_root = url_root if url_root else 'localhost:5010'
parts = [
'http://' if not url_root.startswith('http') else '',
url_root.rstrip('/'),
'/',
endpoint.lstrip('/')
]
return ''.join(parts)
def parse_http_response(http_response: HttpResponse) -> 'environ.Response':
"""
Returns a Cauldron response object parsed from the serialized JSON data
specified in the http_response argument. If the response doesn't contain
valid Cauldron response data, an error Cauldron response object is
returned instead.
:param http_response:
The response object from an http request that contains a JSON
serialized Cauldron response object as its body
:return:
The Cauldron response object for the given http response
"""
try:
response = environ.Response.deserialize(http_response.json())
except Exception as error:
response = environ.Response().fail(
code='INVALID_REMOTE_RESPONSE',
error=error,
message='Invalid HTTP response from remote connection'
).console(
whitespace=1
).response
response.http_response = http_response
return response
def get_request_function(data: dict = None, method: str = None):
""" """
default_method = 'post' if data else 'get'
return getattr(requests, method.lower() if method else default_method)
def send_request(
endpoint: str,
data: dict = None,
remote_connection: 'environ.RemoteConnection' = None,
method: str = None,
**kwargs
) -> 'environ.Response':
""" """
url = assemble_url(endpoint, remote_connection)
func = get_request_function(data, method)
try:
http_response = func(url, json=data, **kwargs)
except Exception as error:
return environ.Response().fail(
code='CONNECTION_ERROR',
error=error,
message='Unable to communicate with the remote connection'
).console(
whitespace=1
).response
return parse_http_response(http_response)
def download_file(
filename: str,
save_path: str,
remote_connection: 'environ.RemoteConnection' = None
) -> 'environ.Response':
""" """
url = assemble_url(
'/download/{}'.format(filename),
remote_connection=remote_connection
)
try:
http_response = requests.get(url, stream=True)
except Exception as error:
return environ.Response().fail(
code='CONNECTION_ERROR',
error=error,
message='Unable to communicate with the remote download connection'
).console(
whitespace=1
).response
try:
with open(save_path, 'wb') as f:
for chunk in http_response.iter_content(2048):
if chunk:
f.write(chunk)
except Exception as error:
return environ.Response().fail(
code='WRITE_ERROR',
error=error,
message='Unable to write data to "{}"'.format(save_path)
).console(
whitespace=1
).response
return environ.Response()
| [
"[email protected]"
] | |
2f2580af3e6b347cac1c59f041da72e745bea421 | 8fe440deb4eb66d2fcb222a7c43680dc516394c1 | /src/api/bkuser_core/categories/utils.py | ce77a59d4005ae077ec7e093e24c60a0a708802c | [
"MIT"
] | permissive | robert871126/bk-user | 780e163db76a8a997ed94a1a83389fa4f81ad6a4 | 8c633e0a3821beb839ed120c4514c5733e675862 | refs/heads/master | 2023-08-20T11:05:46.317044 | 2021-10-22T08:44:06 | 2021-10-22T08:44:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,313 | py | # -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-用户管理(Bk-User) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
import time
from contextlib import contextmanager
from dataclasses import dataclass, field
from typing import Callable, ContextManager, Iterator, Optional
from bkuser_core.categories.models import ProfileCategory
logger = logging.getLogger(__name__)
def change_category_type(category_id: int, target_type: str):
"""将其他类型的目录转换"""
ins = ProfileCategory.objects.get(pk=category_id)
logger.info(
"going to change type of Category<%s> from %s to %s",
ins.display_name,
ins.type,
target_type,
)
ins.type = target_type
ins.save()
@dataclass
class TimeContext:
start_time: float = field(default_factory=time.time)
start_clock: float = field(default_factory=time.clock)
end_time: Optional[float] = None
end_clock: Optional[float] = None
@property
def time_delta(self):
"""消耗的时间"""
if self.end_time is None:
return time.time() - self.start_time
return self.end_time - self.start_time
@property
def clock_delta(self):
"""消耗的 CPU 时钟"""
if self.end_clock is None:
return time.clock() - self.start_clock
return self.end_clock - self.start_clock
def close(self):
self.end_time = time.time()
self.end_clock = time.clock()
def __catch_time__() -> Iterator[TimeContext]:
context = TimeContext()
try:
yield context
finally:
context.close()
catch_time: Callable[..., ContextManager[TimeContext]] = contextmanager(__catch_time__)
| [
"[email protected]"
] | |
318d389b6772abd1e5773d73bf64ff401cfbfb8d | 28f088b5356e66780c4bad204564bff92f910f02 | /src/python/pants/backend/docker/subsystems/dockerfile_parser_test.py | 533f9646fc8c9d82356bf8538864bf6be0332c24 | [
"Apache-2.0"
] | permissive | wonlay/pants | 57dcd99f82cdb2e37fcb7c563ec2bccf797ee7b7 | 53c66503b6898e83c9c9596e56cde5ad9ed6a0d3 | refs/heads/master | 2023-03-06T03:23:08.602817 | 2022-05-05T23:41:32 | 2022-05-05T23:41:32 | 24,695,709 | 0 | 0 | Apache-2.0 | 2023-03-01T11:59:58 | 2014-10-01T21:15:29 | Python | UTF-8 | Python | false | false | 6,330 | py | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from textwrap import dedent
import pytest
from pants.backend.docker.subsystems.dockerfile_parser import DockerfileInfo, DockerfileInfoRequest
from pants.backend.docker.subsystems.dockerfile_parser import rules as parser_rules
from pants.backend.docker.subsystems.dockerfile_parser import split_iterable
from pants.backend.docker.target_types import DockerImageTarget
from pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs
from pants.backend.docker.util_rules.dockerfile import rules as dockerfile_rules
from pants.backend.python.target_types import PexBinary
from pants.backend.python.util_rules.pex import rules as pex_rules
from pants.engine.addresses import Address
from pants.engine.internals.scheduler import ExecutionError
from pants.testutil.rule_runner import QueryRule, RuleRunner
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*dockerfile_rules(),
*parser_rules(),
*pex_rules(),
QueryRule(DockerfileInfo, (DockerfileInfoRequest,)),
],
target_types=[DockerImageTarget, PexBinary],
)
rule_runner.set_options(
[],
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
)
return rule_runner
@pytest.mark.parametrize(
"files",
[
pytest.param(
[
("test/BUILD", "docker_image()"),
("test/Dockerfile", "{dockerfile}"),
],
id="source Dockerfile",
),
pytest.param(
[
("test/BUILD", "docker_image(instructions=[{dockerfile!r}])"),
],
id="generate Dockerfile",
),
],
)
def test_putative_target_addresses(files: list[tuple[str, str]], rule_runner: RuleRunner) -> None:
dockerfile_content = dedent(
"""\
FROM base
COPY some.target/binary.pex some.target/tool.pex /bin
COPY --from=scratch this.is/ignored.pex /opt
COPY binary another/cli.pex tool /bin
"""
)
rule_runner.write_files(
{filename: content.format(dockerfile=dockerfile_content) for filename, content in files}
)
addr = Address("test")
info = rule_runner.request(DockerfileInfo, [DockerfileInfoRequest(addr)])
assert info.putative_target_addresses == (
"some/target:binary",
"some/target:tool",
"another:cli",
)
def test_split_iterable() -> None:
assert [("a", "b"), ("c",)] == list(split_iterable("-", ("a", "b", "-", "c")))
def test_build_args(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"test/BUILD": "docker_image()",
"test/Dockerfile": dedent(
"""\
ARG registry
FROM ${registry}/image:latest
ARG OPT_A
ARG OPT_B=default_b_value
ENV A=${OPT_A:-A_value}
ENV B=${OPT_B}
"""
),
}
)
addr = Address("test")
info = rule_runner.request(DockerfileInfo, [DockerfileInfoRequest(addr)])
assert info.build_args == DockerBuildArgs.from_strings(
"registry",
"OPT_A",
"OPT_B=default_b_value",
)
def test_from_image_build_arg_names(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"test/upstream/BUILD": "docker_image(name='image')",
"test/upstream/Dockerfile": "FROM upstream",
"test/downstream/BUILD": "docker_image(name='image')",
"test/downstream/Dockerfile": dedent(
"""\
ARG BASE_IMAGE=test/upstream:image
FROM ${BASE_IMAGE} AS base
"""
),
}
)
addr = Address("test/downstream", target_name="image")
info = rule_runner.request(DockerfileInfo, [DockerfileInfoRequest(addr)])
assert info.from_image_build_arg_names == ("BASE_IMAGE",)
def test_inconsistent_build_args(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"test/BUILD": "docker_image(name='image')",
"test/Dockerfile": dedent(
"""\
FROM image1:latest
ARG OPT_A=default_1
FROM image2:latest
ARG OPT_A=default_2
"""
),
}
)
addr = Address("test", target_name="image")
err_msg = (
r"Error while parsing test/Dockerfile for the test:image target: DockerBuildArgs: "
r"duplicated 'OPT_A' with different values: 'default_1' != 'default_2'\."
)
with pytest.raises(ExecutionError, match=err_msg):
rule_runner.request(DockerfileInfo, [DockerfileInfoRequest(addr)])
def test_copy_source_references(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"test/BUILD": "docker_image()",
"test/Dockerfile": dedent(
"""\
FROM base
COPY a b /
COPY --option c/d e/f/g /h
ADD ignored
COPY j k /
COPY
"""
),
}
)
info = rule_runner.request(DockerfileInfo, [DockerfileInfoRequest(Address("test"))])
assert info.copy_sources == ("a", "b", "c/d", "e/f/g", "j", "k")
def test_baseimage_tags(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"test/BUILD": "docker_image()",
"test/Dockerfile": (
"FROM untagged\n"
"FROM tagged:v1.2\n"
"FROM digest@sha256:d1f0463b35135852308ea815c2ae54c1734b876d90288ce35828aeeff9899f9d\n"
"FROM gcr.io/tekton-releases/github.com/tektoncd/operator/cmd/kubernetes/operator:"
"v0.54.0@sha256:d1f0463b35135852308ea815c2ae54c1734b876d90288ce35828aeeff9899f9d\n"
),
}
)
info = rule_runner.request(DockerfileInfo, [DockerfileInfoRequest(Address("test"))])
assert info.version_tags == (
"stage0 latest",
"stage1 v1.2",
# Stage 2 is not pinned with a tag.
"stage3 v0.54.0",
)
| [
"[email protected]"
] | |
e446b4b4c3699733ad5922d435466930f6cfb35b | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02573/s534378621.py | 53efd867886d93f3b9f58471c95fd08487dbc066 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 976 | py | # import sys
# input = sys.stdin.readline()
n,m = map(int, input().split())
ab = []
for i in range(m):
a,b = map(int, input().split())
ab.append([a,b])
class UnionFind:
def __init__(self,N):
self.parent = [i for i in range(N)]
self._size = [1] * N
self.count = 0
def root(self,a):
if self.parent[a] == a:
return a
else:
self.parent[a] = self.root(self.parent[a])
return self.parent[a]
def is_same(self,a,b):
return self.root(a) == self.root(b)
def unite(self,a,b):
ra = self.root(a)
rb = self.root(b)
if ra == rb: return
if self._size[ra] < self._size[rb]: ra,rb = rb,ra
self._size[ra] += self._size[rb]
self.parent[rb] = ra
self.count += 1
def size(self,a):
return self._size[self.root(a)]
uf = UnionFind(n)
for i in range(m):
a, b = ab[i][0],ab[i][1]
a -= 1
b -= 1
if uf.is_same(a,b):
continue
uf.unite(a,b)
x = 0
for i in range(n):
x = max(x, uf._size[i])
print (x) | [
"[email protected]"
] | |
2fe6904da2931f0c0af9091b946ef4de9424f574 | 377ec156e459f70ad32e625de2dde2672736dd06 | /Exercises/CorePythonExercises/ForMathModel.py | 527020f502bff6dbe72d50c9e9bb9988e7b05e69 | [] | no_license | tsonglew/learn-python | b657cc34d3e27993ec0dcce152796bea43224d4f | edbf0b5d24bf0e2d9ad7aa5811c7d3aa0a66b57c | refs/heads/master | 2021-06-13T13:46:35.199562 | 2017-04-14T16:57:38 | 2017-04-14T16:57:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,273 | py | # -*- coding: utf-8 -*-
"""
例1 混合泳接力队的选拔
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
5名候选人的百米成绩
甲 乙 丙 丁 戊
蝶泳 1'06"8 57"2 1'18" 1'10" 1'07"4
仰泳 1'15"6 1'06" 1'07"8 1'14"2 1'11"
蛙泳 1'27" 1'06"4 1'24"6 1'09"6 1'23"8
自由泳 58"6 53" 59"4 57"2 1'02"4
~~~~~~~~~~~~~~~~~~穷举所有的组队方案~~~~~~~~~~~~~~~~
选择语言: Python
"""
# 各种泳姿所有人的成绩列表,依次为甲、乙、丙、丁、戊(单位:秒)
Butterfly = [66.8, 57.2, 78, 70, 67.4]
Backstrock = [75.6, 66, 67.8, 74.2, 71]
Frog = [87, 66.4, 84.6, 69.6, 83.8]
Free = [58.6, 53, 59.4, 57.2, 62.4]
# 储存所有最终成绩的列表
Result = []
def func():
print "甲记作1,乙记作2,丙记作3,丁记作4,戊记作5"
flag = 1
for a in Butterfly:
for b in Backstrock:
if Backstrock.index(b) == Butterfly.index(a):
continue
for c in Frog:
if Frog.index(c) == Backstrock.index(b) \
or Frog.index(c) == Butterfly.index(a):
continue
for d in Free:
if Free.index(d) == Frog.index(c) \
or Free.index(d) == Backstrock.index(b) \
or Free.index(d) == Butterfly.index(a):
continue
time = a + b + c + d
Result.append(time)
print "第", flag, "种", ".蝶泳第", Butterfly.index(a)+1, "个人",
print "仰泳第", Backstrock.index(b)+1, "个人",
print "蛙泳第", Frog.index(c)+1, "个人",
print "自由泳第", Free.index(d)+1, "个人",
print "总时间:", time
flag = flag + 1
# 所有可能的情况
print "所有的情况共有:", len(Result), " 种"
SortedResult = sorted(Result)
# 用时最短
print "最短时间: ", SortedResult[0], "秒"
num = Result.index(SortedResult[0]) + 1
print "该方法为第", num, "种"
if __name__ == '__main__':
func()
| [
"[email protected]"
] | |
eed6251d219f84cbb1bbb2049424245e68263414 | 612325535126eaddebc230d8c27af095c8e5cc2f | /src/base/android/linker/DEPS | b38742d81718aa1491256df59b9fed46f23acb58 | [
"BSD-3-Clause"
] | permissive | TrellixVulnTeam/proto-quic_1V94 | 1a3a03ac7a08a494b3d4e9857b24bb8f2c2cd673 | feee14d96ee95313f236e0f0e3ff7719246c84f7 | refs/heads/master | 2023-04-01T14:36:53.888576 | 2019-10-17T02:23:04 | 2019-10-17T02:23:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | include_rules = [
# This code cannot depend on anything from base/
"-base",
]
| [
"[email protected]"
] | ||
1f8ebfb96e2da682a5d5d4c9ea89ca77a91743ec | 9d5c2a7c05602d478fe0981f910dbf0b7bb2abf0 | /devel/lib/python2.7/dist-packages/eband_local_planner/cfg/EBandPlannerConfig.py | 56a3c5c1177f4d9ff58882f58bc5e547efeaa7c1 | [] | no_license | baidu31/catkin_wp | f440fcbf70cefe43e735bcc322fd3f6cb849aa54 | 5cf0f5e3d4c1346835bb56f7903798c7c7f242d9 | refs/heads/master | 2021-09-09T23:59:53.030955 | 2018-03-20T08:59:12 | 2018-03-20T08:59:12 | 125,989,066 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,344 | py | ## *********************************************************
##
## File autogenerated for the eband_local_planner package
## by the dynamic_reconfigure package.
## Please do not edit.
##
## ********************************************************/
from dynamic_reconfigure.encoding import extract_params
inf = float('inf')
config_description = {'upper': 'DEFAULT', 'lower': 'groups', 'srcline': 235, 'name': 'Default', 'parent': 0, 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'cstate': 'true', 'parentname': 'Default', 'class': 'DEFAULT', 'field': 'default', 'state': True, 'parentclass': '', 'groups': [], 'parameters': [{'srcline': 280, 'description': 'Distance tolerance for reaching the goal pose', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'xy_goal_tolerance', 'edit_method': '', 'default': 0.1, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Orientation tolerance for reaching the desired goal pose', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'yaw_goal_tolerance', 'edit_method': '', 'default': 0.05, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Angular velocity lower bound that determines if the robot should stop to avoid limit-cycles or locks', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'rot_stopped_vel', 'edit_method': '', 'default': 0.01, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Linear velocity lower bound that determines if the robot should stop to avoid limit-cycles or locks', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'trans_stopped_vel', 'edit_method': '', 'default': 0.01, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Lifetime of eband visualization markers', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'marker_lifetime', 'edit_method': '', 'default': 0.5, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Min distance that denotes connectivity between consecutive bubbles', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'eband_min_relative_overlap', 'edit_method': '', 'default': 0.7, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Bubble geometric bound regarding tiny bubble distance', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'eband_tiny_bubble_distance', 'edit_method': '', 'default': 0.01, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Bubble geometric bound regarding tiny bubble expansion', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'eband_tiny_bubble_expansion', 'edit_method': '', 'default': 0.01, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Force gain of forces between consecutive bubbles that tend to stretch the elastic band', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'eband_internal_force_gain', 'edit_method': '', 'default': 1.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Force gain of forces that tend to move the bubbles away from obstacles', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'eband_external_force_gain', 'edit_method': '', 'default': 2.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Number of iterations for eband optimization', 'max': 2147483647, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'num_iterations_eband_optimization', 'edit_method': '', 'default': 3, 'level': 0, 'min': 1, 'type': 'int'}, {'srcline': 280, 'description': 'Number of iterations for reaching the equilibrium between internal and external forces', 'max': 2147483647, 'cconsttype': 'const int', 'ctype': 'int', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'eband_equilibrium_approx_max_recursion_depth', 'edit_method': '', 'default': 4, 'level': 0, 'min': 1, 'type': 'int'}, {'srcline': 280, 'description': 'Maximum relative equlibrium overshoot', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'eband_equilibrium_relative_overshoot', 'edit_method': '', 'default': 0.75, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Minimum magnitude of force that is considered significant and used in the calculations', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'eband_significant_force_lower_bound', 'edit_method': '', 'default': 0.15, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Costmap weight factor used in the calculation of distance to obstacles', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'costmap_weight', 'edit_method': '', 'default': 10.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Maximum linear velocity', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'max_vel_lin', 'edit_method': '', 'default': 0.75, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Maximum angular velocity', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'max_vel_th', 'edit_method': '', 'default': 1.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Minimum linear velocity', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'min_vel_lin', 'edit_method': '', 'default': 0.1, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Minimum angular velocity', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'min_vel_th', 'edit_method': '', 'default': 0.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Minimum in-place angular velocity', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'min_in_place_vel_th', 'edit_method': '', 'default': 0.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Minimum in place linear velocity', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'in_place_trans_vel', 'edit_method': '', 'default': 0.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Proportional gain of the PID controller', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'k_prop', 'edit_method': '', 'default': 4.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Damping gain of the PID controller', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'k_damp', 'edit_method': '', 'default': 3.5, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Control rate', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'Ctrl_Rate', 'edit_method': '', 'default': 10.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Maximum allowable acceleration', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'max_acceleration', 'edit_method': '', 'default': 0.5, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Virtual mass', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'virtual_mass', 'edit_method': '', 'default': 0.75, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Maximum linear acceleration', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'max_translational_acceleration', 'edit_method': '', 'default': 0.5, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Maximum angular acceleration', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'max_rotational_acceleration', 'edit_method': '', 'default': 1.5, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Rotation correction threshold', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'rotation_correction_threshold', 'edit_method': '', 'default': 0.5, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Denotes whether to use the differential drive hack', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'differential_drive', 'edit_method': '', 'default': True, 'level': 0, 'min': False, 'type': 'bool'}, {'srcline': 280, 'description': 'Multiplier of bubble radius', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'bubble_velocity_multiplier', 'edit_method': '', 'default': 2.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Multiplier of rotation threshold', 'max': 'std::numeric_limits<double>::infinity()', 'cconsttype': 'const double', 'ctype': 'double', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'rotation_threshold_multiplier', 'edit_method': '', 'default': 1.0, 'level': 0, 'min': 0.0, 'type': 'double'}, {'srcline': 280, 'description': 'Determines whether to try getting closer to the goal, in case of going past the tolerance', 'max': True, 'cconsttype': 'const bool', 'ctype': 'bool', 'srcfile': '/opt/ros/indigo/lib/python2.7/dist-packages/dynamic_reconfigure/parameter_generator_catkin.py', 'name': 'disallow_hysteresis', 'edit_method': '', 'default': False, 'level': 0, 'min': False, 'type': 'bool'}], 'type': '', 'id': 0}
min = {}
max = {}
defaults = {}
level = {}
type = {}
all_level = 0
#def extract_params(config):
# params = []
# params.extend(config['parameters'])
# for group in config['groups']:
# params.extend(extract_params(group))
# return params
for param in extract_params(config_description):
min[param['name']] = param['min']
max[param['name']] = param['max']
defaults[param['name']] = param['default']
level[param['name']] = param['level']
type[param['name']] = param['type']
all_level = all_level | param['level']
| [
"[email protected]"
] | |
0c3da8caa97d18e39b6ed16eb0ebda577c7fd86d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03779/s602875516.py | 150fe00d7563cd71c275ea0eeaaf1a0809dbd342 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 119 | py | x=int(input())
t=0
jump=0
for i in range(1,10**9):
t+=1
jump+=i
if jump>=x:
print(t)
exit() | [
"[email protected]"
] | |
fc07e28592592465f34667c2510771d8580a76b1 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_167/ch19_2019_08_30_17_33_49_991320.py | 255123fd942fa40a66a7196baefba809573f081f | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | import math
g=9.8
def calcula_distancia_do_projetil (v,θ,y0):
d==(v**2/2*g)*1+(1 +2*g*y0/(v)**2*(math.sin(θ)**2))**1/2
return d
| [
"[email protected]"
] | |
8ca3f112dba2ddf412dc8ed04db02bafdb9be374 | 78b42a602bdabbf28321b267714f0b29e89a669e | /3-2.队列.py | 1d5c1807ba1b3d50d86169ad3decc5bb6aaa2c1c | [] | no_license | michelleweii/DataStructure-Algorithms | 3c95240b8ed48237f23ff97754d6cc15cadc94dd | 84f5d6e2811f5e6c4578a5e0d381a86cbc414ce9 | refs/heads/master | 2020-03-29T13:07:06.910799 | 2018-11-29T12:57:57 | 2018-11-29T12:57:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,858 | py | # 取元素的端叫做队头,添加元素的端叫做队尾
class Queue(object):
"""队列"""
def __init__(self):
self.__list = []
def enqueue(self,item):
"""往队列中添加一个item元素"""
self.__list.append(item)
def dequeue(self):
"""从队列头部删除一个元素"""
return self.__list.pop(0) # 先进先出
def is_empty(self):
"""判断一个队列是否为空"""
return self.__list == []
def size(self):
"""返回队列的大小"""
return len(self.__list)
# 双端队列,两端都可以进和出,相当于两个栈底部合在了一起
class Deque(object):
"""双端队列"""
def __init__(self):
self.__list = []
def add_front(self, item):
"""往队列头部添加一个item元素"""
self.__list.insert(0,item)
def add_rear(self, item):
"""往队列尾部添加一个item元素"""
return self.__list.append(item) # 先进先出
def pop_front(self):
"""往队列头部删除一个元素"""
return self.__list == self.__list.pop(0)
def pop_rear(self):
"""往队列尾部删除一个元素"""
return self.__list == self.__list.pop()
def is_empty(self):
"""判断一个队列是否为空"""
return self.__list == []
def size(self):
"""返回队列的大小"""
return len(self.__list)
if __name__ == "__main__":
s = Queue()
s.enqueue(1)
s.enqueue(2)
s.enqueue(3)
s.enqueue(4)
print(s.dequeue())
print(s.dequeue())
print(s.dequeue())
print(s.dequeue())
# 双端队列
s = Queue()
s.enqueue(1)
s.enqueue(2)
s.enqueue(3)
s.enqueue(4)
print(s.dequeue())
print(s.dequeue())
print(s.dequeue())
print(s.dequeue())
| [
"[email protected]"
] | |
44803dedada3fec966306568b761e601637bccc8 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/NasFPN/mmdet/models/detectors/cascade_rcnn.py | 47cc7cef984123804c4f99900d496807cde3c0e6 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 1,288 | py | from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class CascadeRCNN(TwoStageDetector):
r"""Implementation of `Cascade R-CNN: Delving into High Quality Object
Detection <https://arxiv.org/abs/1906.09756>`_"""
def __init__(self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(CascadeRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained)
def show_result(self, data, result, **kwargs):
"""Show prediction results of the detector."""
if self.with_mask:
ms_bbox_result, ms_segm_result = result
if isinstance(ms_bbox_result, dict):
result = (ms_bbox_result['ensemble'],
ms_segm_result['ensemble'])
else:
if isinstance(result, dict):
result = result['ensemble']
return super(CascadeRCNN, self).show_result(data, result, **kwargs)
| [
"[email protected]"
] | |
6ff102d1cea23a24786b8d1335ce9d535b54fdb3 | 045cb1a5638c3575296f83471758dc09a8065725 | /addons/base_gengo/models/res_company.py | 82bdf8e87666085dc1a354e8cba4071dc6357e27 | [] | no_license | marionumza/saas | 7236842b0db98d1a0d0c3c88df32d268509629cb | 148dd95d991a348ebbaff9396759a7dd1fe6e101 | refs/heads/main | 2023-03-27T14:08:57.121601 | 2021-03-20T07:59:08 | 2021-03-20T07:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | # -*- coding: utf-8 -*-
# Part of Harpiya. See LICENSE file for full copyright and licensing details.
from harpiya import fields, models
class res_company(models.Model):
_inherit = "res.company"
gengo_private_key = fields.Char(string="Gengo Private Key", copy=False, groups="base.group_system")
gengo_public_key = fields.Text(string="Gengo Public Key", copy=False, groups="base.group_user")
gengo_comment = fields.Text(string="Comments", groups="base.group_user",
help="This comment will be automatically be enclosed in each an every request sent to Gengo")
gengo_auto_approve = fields.Boolean(string="Auto Approve Translation ?", groups="base.group_user", default=True,
help="Jobs are Automatically Approved by Gengo.")
gengo_sandbox = fields.Boolean(string="Sandbox Mode",
help="Check this box if you're using the sandbox mode of Gengo, mainly used for testing purpose.")
| [
"[email protected]"
] | |
66e2c5329e9521e3252d6ead99b95654d8cdaed4 | b7c51f5e564d1f0e622fbe6e144f996d693af8cf | /Bins/MakeSparseMatrixSVD.py | c24629f3f51015d698bb93ed7c430d0c9260a6cd | [] | no_license | GINK03/job-recommender-api | a223fb225a1231eaf1b56abd92d9aa8f20ff241b | 6afdfa915918184debe96f5ac6932dfa30f7d4a5 | refs/heads/master | 2022-11-19T16:48:38.776963 | 2020-07-24T15:49:23 | 2020-07-24T15:49:23 | 263,545,893 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,429 | py | import pickle
import gzip
import glob
from scipy.sparse import lil_matrix
from sklearn.decomposition import TruncatedSVD
# import faiss
import numpy as np
from pathlib import Path
from tqdm import tqdm
import sys
from concurrent.futures import ProcessPoolExecutor
import joblib
import pandas as pd
from os import environ as E
import psutil
import time
import bz2
from loguru import logger
HOME = Path.home()
FILE = Path(__file__).name
TOP_DIR = Path(__file__).resolve().parent.parent
def wight_tune(w):
for i in range(5):
w = np.log1p(w)
return w
idf = pd.read_csv(f'{TOP_DIR}/var/doc_freq.csv')
WORD_SIZE = 1000000
if "--create_transformer" in sys.argv:
SAMPLE_SIZE = 1000000
logger.info(f"total word size is = {WORD_SIZE}")
start_time = time.time()
def load(arg):
filename = arg
try:
with bz2.open(filename, "rb") as fp:
vec = pickle.load(fp)
SAMPLE_SIZE = vec["__SAMPLE_SIZE__"]
del vec["__SAMPLE_SIZE__"]
if SAMPLE_SIZE < 100:
return None
return (vec)
except Exception as exc:
logger.error(f"{exc}, {filename}")
Path(filename).unlink()
return None
args = []
for idx, filename in tqdm(enumerate(glob.glob(f"{TOP_DIR}/var/user_vectors/*")[:SAMPLE_SIZE]), desc="load example users..."):
args.append(filename)
mtx = lil_matrix((SAMPLE_SIZE, WORD_SIZE))
counter = 0
with ProcessPoolExecutor(max_workers=psutil.cpu_count()) as exe:
for ret in tqdm(exe.map(load, args), total=len(args), desc="load example users..."):
if ret is None:
continue
vec = ret
for term_idx, weight in vec.items():
if term_idx >= WORD_SIZE:
continue
mtx[counter, term_idx] = wight_tune(weight)
counter += 1
logger.info(mtx.shape)
mtx = mtx[:counter]
logger.info(mtx.shape)
# exit()
logger.info(f"[{FILE}] start to train TruncatedSVD...")
transformer = TruncatedSVD(n_components=500, n_iter=10, random_state=0)
transformer.fit(mtx)
elapsed_time = time.time() - start_time
logger.info(f"[{FILE}] elapsed_time = {elapsed_time}")
logger.info(f"[{FILE}] start to transform matrix...")
X_transformed = transformer.transform(mtx[:5000])
logger.info(X_transformed)
logger.info(X_transformed.shape)
logger.info(type(X_transformed))
joblib.dump(transformer, f"{TOP_DIR}/var/transformer.joblib")
if "--transform" in sys.argv:
transformer = joblib.load(f"{TOP_DIR}/var/transformer.joblib")
""" 1000個づつ分割 """
filenames = glob.glob(f"{TOP_DIR}/var/user_vectors/*")
args = []
STEP = 4000
for i in range(0, len(filenames), STEP):
args.append((i, filenames[i:i+STEP]))
Path(f"{TOP_DIR}/var/transformed").mkdir(exist_ok=True, parents=True)
def load(arg):
key, filenames = arg
mtx = lil_matrix((STEP, WORD_SIZE))
usernames = []
counter = 0
for idx, filename in enumerate(filenames):
try:
with bz2.open(filename, "rb") as fp:
vec = pickle.load(fp)
except Exception as exc:
tb_lineno = sys.exc_info()[2].tb_lineno
logger.error(f"[{FILE}] exc = {exc}, tb_lineno = {tb_lineno}")
continue
SAMPLE_SIZE = vec["__SAMPLE_SIZE__"]
del vec["__SAMPLE_SIZE__"]
if SAMPLE_SIZE < 100:
continue
for term_idx, weight in vec.items():
if term_idx >= 1000000:
continue
mtx[counter, term_idx] = weight
usernames.append(Path(filename).name)
counter += 1
mtx = mtx[:counter]
X_transformed = transformer.transform(mtx)
data = (usernames, X_transformed)
logger.info(f"{len(usernames)}, {X_transformed.shape}")
if len(usernames) != X_transformed.shape[0]:
raise Exception("size not match!")
with bz2.open(f"{TOP_DIR}/var/transformed/{key:09d}.pkl.bz2", "wb") as fp:
fp.write(pickle.dumps(data))
with ProcessPoolExecutor(max_workers=psutil.cpu_count()) as exe:
for _ in tqdm(exe.map(load, args), total=len(args), desc="transforming..."):
_
| [
"[email protected]"
] | |
4fb3833526370b776a65d10938c421bb15804d6c | 35c4c0ae37c78124732bc8056f9b9940cc80779b | /Data/Search/Find pivot element in a sorted array/Find pivot element in a sorted array.py | b266ca0952266d9b560f4086d94a868ad018fa69 | [] | no_license | bhusalashish/DSA-1 | 8189c6fe27a7905eaa3ea0a404a38164245c8b6e | 573b737483193c30753e7afc5d564396318d45ff | refs/heads/master | 2023-02-09T21:44:16.691700 | 2020-12-25T06:19:58 | 2020-12-25T06:19:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | '''
#### Name: Find pivot element in a sorted array
Link: [link]()
''' | [
"[email protected]"
] | |
5c2128c48f6cca29296f7fb6db51371bf51bddfe | e9e083aa75398a015e55ec5de655c262eb1496c6 | /mod5-adv/threads/simple-non-daemon.py | c84fa8b3f9455600fa262c7ffd6e93d45bccacc2 | [] | no_license | michaelconst/csuf-pythonprog | 54d98a878b34038a067c07c649a6025b8380b971 | 017ec2004482bbd20ce24d6c5ec8f0ae2a6cdb78 | refs/heads/master | 2021-01-21T10:00:30.268732 | 2017-03-14T01:29:44 | 2017-03-14T01:29:44 | 83,357,234 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | import threading
import time
import random
def do_work(t):
print('[{}] sleeping {}s'.format(threading.current_thread().name, t))
time.sleep(t)
print('[{}] exiting'.format(threading.current_thread().name))
for i in range(5):
threading.Thread(target=do_work, args=(random.randint(1, 5),)).start() | [
"[email protected]"
] | |
3d8e087c32269d3024415ff947d05fb54bc4b5ae | 826085daea311de883ad1e8dfcc8ef5569f087bf | /broca/similarity/term/wikipedia.py | 33c38f57abeaea4f2b39766fd32b32d37969b214 | [
"MIT"
] | permissive | parksebastien/broca | f2d10cfd6a7dcc6c069ee2e69d5faeb2e1004b67 | 7236dcf54edc0a4a54a55eb93be30800910667e7 | refs/heads/master | 2020-05-22T09:48:19.417396 | 2015-09-10T12:35:56 | 2015-09-10T12:35:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,242 | py | from scipy.spatial.distance import pdist, squareform
from broca.similarity.term import TermSimilarity
from broca.knowledge.wikipedia import Wikipedia
from broca.vectorize.bow import BoWVectorizer
class WikipediaSimilarity(Wikipedia, TermSimilarity):
def __init__(self, terms, wiki_conn=None):
"""
Initialize with a list of terms.
Will fetch Wikipedia pages for each term,
if available, then compute their similarity matrix.
"""
super().__init__(wiki_conn=wiki_conn)
# Term map for similarity matrix lookup later
terms = set(terms)
self.term_map = {t: i for i, t in enumerate(terms)}
# Fetch wikipages, compute cosine similarity matrix
docs = [self.fetch_wikipage(t) for t in terms]
vectr = BoWVectorizer()
vecs = vectr.vectorize(docs)
dist_mat = pdist(vecs.todense(), metric='cosine')
dist_mat = squareform(dist_mat)
self.sim_mat = 1/(1 + dist_mat)
def __getitem__(self, terms):
t1, t2 = terms
try:
i1 = self.term_map[t1]
i2 = self.term_map[t2]
return self.sim_mat[i1, i2]
# Term(s) not found
except KeyError:
return 0.
| [
"[email protected]"
] | |
0f2fb13936f02c57c1615fa5ae66848d7554ac5b | 34ed92a9593746ccbcb1a02630be1370e8524f98 | /lib/pints/pints/tests/test_log_likelihoods.py | abe63310e1b7037b4ac373902937a301a887133a | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] | permissive | HOLL95/Cytochrome_SV | 87b7a680ed59681230f79e1de617621680ea0fa0 | d02b3469f3ee5a4c85d756053bc87651093abea1 | refs/heads/master | 2022-08-01T05:58:16.161510 | 2021-02-01T16:09:31 | 2021-02-01T16:09:31 | 249,424,867 | 0 | 0 | null | 2022-06-22T04:09:11 | 2020-03-23T12:29:29 | Jupyter Notebook | UTF-8 | Python | false | false | 29,817 | py | #!/usr/bin/env python3
#
# Tests the log likelihood classes.
#
# This file is part of PINTS.
# Copyright (c) 2017-2019, University of Oxford.
# For licensing information, see the LICENSE file distributed with the PINTS
# software package.
#
import unittest
import pints
import pints.toy
import numpy as np
class TestLogLikelihood(unittest.TestCase):
def test_scaled_log_likelihood(self):
model = pints.toy.LogisticModel()
real_parameters = [0.015, 500]
test_parameters = [0.014, 501]
sigma = 0.001
times = np.linspace(0, 1000, 100)
values = model.simulate(real_parameters, times)
# Create an object with links to the model and time series
problem = pints.SingleOutputProblem(model, times, values)
# Create a scaled and not scaled log_likelihood
log_likelihood_not_scaled = pints.GaussianKnownSigmaLogLikelihood(
problem, sigma)
log_likelihood_scaled = pints.ScaledLogLikelihood(
log_likelihood_not_scaled)
eval_not_scaled = log_likelihood_not_scaled(test_parameters)
eval_scaled = log_likelihood_scaled(test_parameters)
self.assertEqual(int(eval_not_scaled), -20959169232)
self.assertAlmostEqual(eval_scaled * len(times), eval_not_scaled)
# Test bad constructor
self.assertRaises(ValueError, pints.ScaledLogLikelihood, model)
# Test single-output derivatives
y1, dy1 = log_likelihood_not_scaled.evaluateS1(test_parameters)
y2, dy2 = log_likelihood_scaled.evaluateS1(test_parameters)
self.assertEqual(y1, log_likelihood_not_scaled(test_parameters))
self.assertEqual(dy1.shape, (2, ))
self.assertEqual(y2, log_likelihood_scaled(test_parameters))
self.assertEqual(dy2.shape, (2, ))
dy3 = dy2 * len(times)
self.assertAlmostEqual(dy1[0] / dy3[0], 1)
self.assertAlmostEqual(dy1[1] / dy3[1], 1)
# Test on multi-output problem
model = pints.toy.FitzhughNagumoModel()
nt = 10
no = model.n_outputs()
times = np.linspace(0, 100, nt)
values = model.simulate([0.5, 0.5, 0.5], times)
problem = pints.MultiOutputProblem(model, times, values)
unscaled = pints.GaussianKnownSigmaLogLikelihood(problem, 1)
scaled = pints.ScaledLogLikelihood(unscaled)
p = [0.1, 0.1, 0.1]
x = unscaled(p)
y = scaled(p)
self.assertAlmostEqual(y, x / nt / no)
# Test multi-output derivatives
y1, dy1 = unscaled.evaluateS1(p)
y2, dy2 = scaled.evaluateS1(p)
self.assertAlmostEqual(y1, unscaled(p), places=6)
self.assertEqual(dy1.shape, (3, ))
self.assertAlmostEqual(y2, scaled(p))
self.assertEqual(dy2.shape, (3, ))
dy3 = dy2 * nt * no
self.assertAlmostEqual(dy1[0] / dy3[0], 1)
self.assertAlmostEqual(dy1[1] / dy3[1], 1)
# test values of log-likelihood and derivatives
model = pints.toy.ConstantModel(3)
times = [1, 2, 3, 4]
parameters = [0, 0, 0]
org_values = [[10.7, 3.5, 3.8],
[1.1, 3.2, -1.4],
[9.3, 0.0, 4.5],
[1.2, -3, -10]]
problem = pints.MultiOutputProblem(model, times, org_values)
f2 = pints.GaussianKnownSigmaLogLikelihood(problem, [3.5, 1, 12])
log_likelihood = pints.ScaledLogLikelihood(f2)
# Test Gaussian_logpdf((10.7, 1.1, 9.3, 1.2)|mean=0, sigma=3.5) +
# Gaussian_logpdf((3.5, 3.2, 0.0, -3)|mean=0, sigma=1) +
# Gaussian_logpdf((3.8, -1.4, 4.5, -10)|mean=0, sigma=12)
# = -50.5088...
self.assertAlmostEqual(
log_likelihood(parameters),
-50.508848609684783 / 12.0
)
l, dl = log_likelihood.evaluateS1(parameters)
self.assertAlmostEqual(l, -50.508848609684783 / 12.0)
self.assertAlmostEqual(dl[0], 1.820408163265306 / 12.0)
self.assertAlmostEqual(dl[1], 3.7000000000000002 / 12.0)
self.assertAlmostEqual(dl[2], -0.021527777777777774 / 12.0)
def test_gaussian_log_likelihoods_single_output(self):
"""
Single-output test for known/unknown noise log-likelihood methods
"""
model = pints.toy.LogisticModel()
parameters = [0.015, 500]
sigma = 0.1
times = np.linspace(0, 1000, 100)
values = model.simulate(parameters, times)
values += np.random.normal(0, sigma, values.shape)
problem = pints.SingleOutputProblem(model, times, values)
# Test if known/unknown give same result
l1 = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
l2 = pints.GaussianLogLikelihood(problem)
self.assertAlmostEqual(l1(parameters), l2(parameters + [sigma]))
# Test invalid constructors
self.assertRaises(
ValueError, pints.GaussianKnownSigmaLogLikelihood, problem, 0)
self.assertRaises(
ValueError, pints.GaussianKnownSigmaLogLikelihood, problem, -1)
# known noise value checks
model = pints.toy.ConstantModel(1)
times = np.linspace(0, 10, 10)
values = model.simulate([2], times)
org_values = np.arange(10) / 5.0
problem = pints.SingleOutputProblem(model, times, org_values)
log_likelihood = pints.GaussianKnownSigmaLogLikelihood(problem, 1.5)
self.assertAlmostEqual(log_likelihood([-1]), -21.999591968683927)
l, dl = log_likelihood.evaluateS1([3])
self.assertAlmostEqual(l, -23.777369746461702)
self.assertAlmostEqual(dl[0], -9.3333333333333321)
self.assertEqual(len(dl), 1)
# unknown noise value checks
log_likelihood = pints.GaussianLogLikelihood(problem)
self.assertAlmostEqual(log_likelihood([-3, 1.5]), -47.777369746461702)
# unknown noise check sensitivity
model = pints.toy.ConstantModel(1)
times = np.linspace(0, 10, 10)
values = model.simulate([2], times)
org_values = np.arange(10) / 5.0
problem = pints.SingleOutputProblem(model, times, org_values)
log_likelihood = pints.GaussianLogLikelihood(problem)
l, dl = log_likelihood.evaluateS1([7, 2.0])
self.assertAlmostEqual(l, -63.04585713764618)
self.assertAlmostEqual(dl[0], -15.25)
self.assertAlmostEqual(dl[1], 41.925000000000004)
# Test deprecated aliases
l1 = pints.KnownNoiseLogLikelihood(problem, sigma)
self.assertIsInstance(l1, pints.GaussianKnownSigmaLogLikelihood)
l2 = pints.UnknownNoiseLogLikelihood(problem)
self.assertIsInstance(l2, pints.GaussianLogLikelihood)
# test multiple output unknown noise
model = pints.toy.ConstantModel(3)
parameters = [0, 0, 0]
times = [1, 2, 3, 4]
values = model.simulate([0, 0, 0], times)
org_values = [[10.7, 3.5, 3.8],
[1.1, 3.2, -1.4],
[9.3, 0.0, 4.5],
[1.2, -3, -10]]
problem = pints.MultiOutputProblem(model, times, org_values)
log_likelihood = pints.GaussianLogLikelihood(problem)
# Test Gaussian_logpdf((10.7, 1.1, 9.3, 1.2)|mean=0, sigma=3.5) +
# Gaussian_logpdf((3.5, 3.2, 0.0, -3)|mean=0, sigma=1) +
# Gaussian_logpdf((3.8, -1.4, 4.5, -10)|mean=0, sigma=12)
# = -50.5088...
self.assertAlmostEqual(
log_likelihood(parameters + [3.5, 1, 12]),
-50.508848609684783
)
l, dl = log_likelihood.evaluateS1(parameters + [3.5, 1, 12])
self.assertAlmostEqual(l, -50.508848609684783)
self.assertAlmostEqual(dl[0], 1.820408163265306)
self.assertAlmostEqual(dl[1], 3.7000000000000002)
self.assertAlmostEqual(dl[2], -0.021527777777777774)
self.assertAlmostEqual(dl[3], 3.6065306122448981)
self.assertAlmostEqual(dl[4], 27.490000000000002)
self.assertAlmostEqual(dl[5], -0.25425347222222222)
# test multiple output model dimensions of sensitivities
d = 20
model = pints.toy.ConstantModel(d)
parameters = [0 for i in range(d)]
times = [1, 2, 3, 4]
values = model.simulate(parameters, times)
org_values = np.ones((len(times), d))
extra_params = np.ones(d).tolist()
problem = pints.MultiOutputProblem(model, times, org_values)
log_likelihood = pints.GaussianLogLikelihood(problem)
l = log_likelihood(parameters + extra_params)
l1, dl = log_likelihood.evaluateS1(parameters + extra_params)
self.assertTrue(np.array_equal(len(dl),
len(parameters + extra_params)))
self.assertEqual(l, l1)
def test_gaussian_integrated_uniform_log_likelihood_single(self):
# Tests GaussianIntegratedUniformLogLikelihood with single output
# problem
model = pints.toy.ConstantModel(1)
parameters = [0]
times = np.asarray([1, 2, 3])
model.simulate(parameters, times)
values = np.asarray([1.0, -10.7, 15.5])
problem = pints.SingleOutputProblem(model, times, values)
log_likelihood = pints.GaussianIntegratedUniformLogLikelihood(
problem, 2, 4)
self.assertAlmostEqual(log_likelihood([0]), -20.441037907121299)
# test incorrect constructors
self.assertRaises(ValueError,
pints.GaussianIntegratedUniformLogLikelihood,
problem, -1, 2)
self.assertRaises(ValueError,
pints.GaussianIntegratedUniformLogLikelihood,
problem, 0, 0)
self.assertRaises(ValueError,
pints.GaussianIntegratedUniformLogLikelihood,
problem, 2, 1)
self.assertRaises(ValueError,
pints.GaussianIntegratedUniformLogLikelihood,
problem, [1, 2], [2, 3])
def test_gaussian_integrated_uniform_log_likelihood_multi(self):
# Tests GaussianIntegratedUniformLogLikelihood with multi output
# problem
model = pints.toy.ConstantModel(4)
parameters = [0, 0, 0, 0]
times = np.asarray([1, 2, 3])
model.simulate(parameters, times)
values = np.asarray([[3.4, 4.3, 22.0, -7.3],
[11.1, 12.2, 13.9, 5.0],
[-0.4, -12.3, -8.3, -1.2]])
problem = pints.MultiOutputProblem(model, times, values)
log_likelihood = pints.GaussianIntegratedUniformLogLikelihood(
problem, 2, 4)
self.assertAlmostEqual(log_likelihood(parameters), -75.443307614807225)
# test non-equal prior limits
model = pints.toy.ConstantModel(4)
parameters = [0, 0, 0, 0]
times = np.asarray([1, 2, 3])
model.simulate(parameters, times)
values = np.asarray([[3.4, 4.3, 22.0, -7.3],
[11.1, 12.2, 13.9, 5.0],
[-0.4, -12.3, -8.3, -1.2]])
problem = pints.MultiOutputProblem(model, times, values)
log_likelihood = pints.GaussianIntegratedUniformLogLikelihood(
problem, [1, 0, 5, 2], [2, 4, 7, 8])
self.assertAlmostEqual(log_likelihood(parameters), -71.62076263891457)
# test incorrect constructors
model = pints.toy.ConstantModel(2)
parameters = [0, 0]
times = np.asarray([1, 2, 3])
model.simulate(parameters, times)
values = [[1, 2],
[3, 4],
[5, 6]]
problem = pints.MultiOutputProblem(model, times, values)
self.assertRaises(ValueError,
pints.GaussianIntegratedUniformLogLikelihood,
problem, 2, 2)
self.assertRaises(ValueError,
pints.GaussianIntegratedUniformLogLikelihood,
problem, [1, 2, 3], [2, 4])
self.assertRaises(ValueError,
pints.GaussianIntegratedUniformLogLikelihood,
problem, [1, 2], [2, 4, 5])
self.assertRaises(ValueError,
pints.GaussianIntegratedUniformLogLikelihood,
problem, [1, 3], [2, 2])
def test_known_noise_gaussian_single_S1(self):
"""
Simple tests for single known noise Gaussian log-likelihood with
sensitivities.
"""
model = pints.toy.LogisticModel()
x = [0.015, 500]
sigma = 0.1
times = np.linspace(0, 1000, 100)
values = model.simulate(x, times)
values += np.random.normal(0, sigma, values.shape)
problem = pints.SingleOutputProblem(model, times, values)
# Test if values are correct
f = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
L1 = f(x)
L2, dL = f.evaluateS1(x)
self.assertEqual(L1, L2)
self.assertEqual(dL.shape, (2,))
# Test with MultiOutputProblem
problem = pints.MultiOutputProblem(model, times, values)
f2 = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
L3 = f2(x)
L4, dL = f2.evaluateS1(x)
self.assertEqual(L3, L4)
self.assertEqual(L1, L3)
self.assertEqual(dL.shape, (2,))
# Test without noise
values = model.simulate(x, times)
problem = pints.SingleOutputProblem(model, times, values)
f = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
L1 = f(x)
L2, dL = f.evaluateS1(x)
self.assertEqual(L1, L2)
self.assertEqual(dL.shape, (2,))
# Test if zero at optimum
self.assertTrue(np.all(dL == 0))
# Test if positive to the left, negative to the right
L, dL = f.evaluateS1(x + np.array([-1e-9, 0]))
self.assertTrue(dL[0] > 0)
L, dL = f.evaluateS1(x + np.array([1e-9, 0]))
self.assertTrue(dL[0] < 0)
# Test if positive to the left, negative to the right
L, dL = f.evaluateS1(x + np.array([0, -1e-9]))
self.assertTrue(dL[1] > 0)
L, dL = f.evaluateS1(x + np.array([0, 1e-9]))
self.assertTrue(dL[1] < 0)
# Plot derivatives
if False:
import matplotlib.pyplot as plt
plt.figure()
r = np.linspace(x[0] * 0.95, x[0] * 1.05, 100)
L = []
dL1 = []
dL2 = []
for y in r:
a, b = f.evaluateS1([y, x[1]])
L.append(a)
dL1.append(b[0])
dL2.append(b[1])
plt.subplot(3, 1, 1)
plt.plot(r, L)
plt.subplot(3, 1, 2)
plt.plot(r, dL1)
plt.grid(True)
plt.subplot(3, 1, 3)
plt.plot(r, dL2)
plt.grid(True)
plt.figure()
r = np.linspace(x[1] * 0.95, x[1] * 1.05, 100)
L = []
dL1 = []
dL2 = []
for y in r:
a, b = f.evaluateS1([x[0], y])
L.append(a)
dL1.append(b[0])
dL2.append(b[1])
plt.subplot(3, 1, 1)
plt.plot(r, L)
plt.subplot(3, 1, 2)
plt.plot(r, dL1)
plt.grid(True)
plt.subplot(3, 1, 3)
plt.plot(r, dL2)
plt.grid(True)
plt.show()
# value-based tests (single output tests are above)
# multiple outputs
model = pints.toy.ConstantModel(3)
parameters = [0, 0, 0]
times = [1, 2, 3, 4]
values = model.simulate(parameters, times)
org_values = [[10.7, 3.5, 3.8],
[1.1, 3.2, -1.4],
[9.3, 0.0, 4.5],
[1.2, -3, -10]]
problem = pints.MultiOutputProblem(model, times, org_values)
sigma = [3.5, 1, 12]
log_likelihood = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
# Test Gaussian_logpdf((10.7, 1.1, 9.3, 1.2)|mean=0, sigma=3.5) +
# Gaussian_logpdf((3.5, 3.2, 0.0, -3)|mean=0, sigma=1) +
# Gaussian_logpdf((3.8, -1.4, 4.5, -10)|mean=0, sigma=12)
# = -50.5088...
self.assertAlmostEqual(
log_likelihood(parameters),
-50.508848609684783
)
l, dl = log_likelihood.evaluateS1(parameters)
self.assertAlmostEqual(l, -50.508848609684783)
self.assertAlmostEqual(dl[0], 1.820408163265306)
self.assertAlmostEqual(dl[1], 3.7000000000000002)
self.assertAlmostEqual(dl[2], -0.021527777777777774)
def test_student_t_log_likelihood_single(self):
"""
Single-output test for Student-t noise log-likelihood methods
"""
model = pints.toy.ConstantModel(1)
parameters = [0]
times = np.asarray([1, 2, 3])
model.simulate(parameters, times)
values = np.asarray([1.0, -10.7, 15.5])
problem = pints.SingleOutputProblem(model, times, values)
log_likelihood = pints.StudentTLogLikelihood(problem)
# Test Student-t_logpdf(values|mean=0, df = 3, scale = 10) = -11.74..
self.assertAlmostEqual(log_likelihood([0, 3, 10]), -11.74010919785115)
def test_student_t_log_likelihood_multi(self):
"""
Multi-output test for Student-t noise log-likelihood methods
"""
model = pints.toy.ConstantModel(4)
parameters = [0, 0, 0, 0]
times = np.arange(1, 4)
model.simulate(parameters, times)
values = np.asarray([[3.5, 7.6, 8.5, 3.4],
[1.1, -10.3, 15.6, 5.5],
[-10, -30.5, -5, 7.6]])
problem = pints.MultiOutputProblem(model, times, values)
log_likelihood = pints.StudentTLogLikelihood(problem)
# Test Student-t_logpdf((3.5,1.1,-10)|mean=0, df=2, scale=13) +
# Student-t_logpdf((7.6,-10.3,-30.5)|mean=0, df=1, scale=8) +
# Student-t_logpdf((8.5,15.6,-5)|mean=0, df=2.5, scale=13.5) +
# Student-t_logpdf((3.4,5.5,7.6)|mean=0, df=3.4, scale=10.5)
# = -47.83....
self.assertAlmostEqual(
log_likelihood(parameters + [2, 13, 1, 8, 2.5, 13.5, 3.4, 10.5]),
-47.83720347766945)
def test_cauchy_log_likelihood_single(self):
"""
Single-output test for Cauchy noise log-likelihood methods
"""
model = pints.toy.ConstantModel(1)
parameters = [0]
times = np.asarray([1, 2, 3])
model.simulate(parameters, times)
values = np.asarray([1.0, -10.7, 15.5])
problem = pints.SingleOutputProblem(model, times, values)
log_likelihood = pints.CauchyLogLikelihood(problem)
# Test Cauchy_logpdf(values|mean=0, scale = 10) = -12.34..
self.assertAlmostEqual(log_likelihood([0, 10]), -12.3394986541736)
def test_cauchy_log_likelihood_multi(self):
"""
Multi-output test for Cauchy noise log-likelihood methods
"""
model = pints.toy.ConstantModel(4)
parameters = [0, 0, 0, 0]
times = np.arange(1, 4)
model.simulate(parameters, times)
values = np.asarray([[3.5, 7.6, 8.5, 3.4],
[1.1, -10.3, 15.6, 5.5],
[-10, -30.5, -5, 7.6]])
problem = pints.MultiOutputProblem(model, times, values)
log_likelihood = pints.CauchyLogLikelihood(problem)
# Test Cauchy_logpdf((3.5,1.1,-10)|mean=0, scale=13) +
# Cauchy_logpdf((7.6,-10.3,-30.5)|mean=0, scale=8) +
# Cauchy_logpdf((8.5,15.6,-5)|mean=0, scale=13.5) +
# Cauchy_logpdf((3.4,5.5,7.6)|mean=0, scale=10.5)
# = -49.51....
self.assertAlmostEqual(
log_likelihood(parameters + [13, 8, 13.5, 10.5]),
-49.51182454195375)
def test_gaussian_noise_multi(self):
"""
Multi-output test for known/unknown Gaussian noise log-likelihood
methods.
"""
model = pints.toy.FitzhughNagumoModel()
parameters = [0.5, 0.5, 0.5]
sigma = 0.1
times = np.linspace(0, 100, 100)
values = model.simulate(parameters, times)
values += np.random.normal(0, sigma, values.shape)
problem = pints.MultiOutputProblem(model, times, values)
# Test if known/unknown give same result
l1 = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
l2 = pints.GaussianKnownSigmaLogLikelihood(problem, [sigma, sigma])
l3 = pints.GaussianLogLikelihood(problem)
self.assertAlmostEqual(
l1(parameters),
l2(parameters),
l3(parameters + [sigma, sigma]))
# Test invalid constructors
self.assertRaises(
ValueError, pints.GaussianKnownSigmaLogLikelihood, problem, 0)
self.assertRaises(
ValueError, pints.GaussianKnownSigmaLogLikelihood, problem, -1)
self.assertRaises(
ValueError, pints.GaussianKnownSigmaLogLikelihood, problem, [1])
self.assertRaises(
ValueError, pints.GaussianKnownSigmaLogLikelihood, problem,
[1, 2, 3, 4])
self.assertRaises(
ValueError, pints.GaussianKnownSigmaLogLikelihood, problem,
[1, 2, -3])
def test_known_noise_gaussian_single_and_multi(self):
"""
Tests the output of single-series against multi-series known noise
log-likelihoods.
"""
# Define boring 1-output and 2-output models
class NullModel1(pints.ForwardModel):
def n_parameters(self):
return 1
def simulate(self, x, times):
return np.zeros(times.shape)
class NullModel2(pints.ForwardModel):
def n_parameters(self):
return 1
def n_outputs(self):
return 2
def simulate(self, x, times):
return np.zeros((len(times), 2))
# Create two single output problems
times = np.arange(10)
np.random.seed(1)
sigma1 = 3
sigma2 = 5
values1 = np.random.uniform(0, sigma1, times.shape)
values2 = np.random.uniform(0, sigma2, times.shape)
model1d = NullModel1()
problem1 = pints.SingleOutputProblem(model1d, times, values1)
problem2 = pints.SingleOutputProblem(model1d, times, values2)
log1 = pints.GaussianKnownSigmaLogLikelihood(problem1, sigma1)
log2 = pints.GaussianKnownSigmaLogLikelihood(problem2, sigma2)
# Create one multi output problem
values3 = np.array([values1, values2]).swapaxes(0, 1)
model2d = NullModel2()
problem3 = pints.MultiOutputProblem(model2d, times, values3)
log3 = pints.GaussianKnownSigmaLogLikelihood(
problem3, [sigma1, sigma2])
# Check if we get the right output
self.assertAlmostEqual(log1(0) + log2(0), log3(0))
def test_sum_of_independent_log_pdfs(self):
# Test single output
model = pints.toy.LogisticModel()
x = [0.015, 500]
sigma = 0.1
times = np.linspace(0, 1000, 100)
values = model.simulate(x, times) + 0.1
problem = pints.SingleOutputProblem(model, times, values)
l1 = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
l2 = pints.GaussianLogLikelihood(problem)
ll = pints.SumOfIndependentLogPDFs([l1, l1, l1])
self.assertEqual(l1.n_parameters(), ll.n_parameters())
self.assertEqual(3 * l1(x), ll(x))
# Test single output derivatives
y, dy = ll.evaluateS1(x)
self.assertEqual(y, ll(x))
self.assertEqual(dy.shape, (2, ))
y1, dy1 = l1.evaluateS1(x)
self.assertTrue(np.all(3 * dy1 == dy))
# Wrong number of arguments
self.assertRaises(TypeError, pints.SumOfIndependentLogPDFs)
self.assertRaises(
ValueError, pints.SumOfIndependentLogPDFs, [l1])
# Wrong types
self.assertRaises(
ValueError, pints.SumOfIndependentLogPDFs, [l1, 1])
self.assertRaises(
ValueError, pints.SumOfIndependentLogPDFs, [problem, l1])
# Mismatching dimensions
self.assertRaises(
ValueError, pints.SumOfIndependentLogPDFs, [l1, l2])
# Test multi-output
model = pints.toy.FitzhughNagumoModel()
x = model.suggested_parameters()
nt = 10
nx = model.n_parameters()
times = np.linspace(0, 10, nt)
values = model.simulate(x, times) + 0.01
problem = pints.MultiOutputProblem(model, times, values)
sigma = 0.01
l1 = pints.GaussianKnownSigmaLogLikelihood(problem, sigma)
ll = pints.SumOfIndependentLogPDFs([l1, l1, l1])
self.assertEqual(l1.n_parameters(), ll.n_parameters())
self.assertEqual(3 * l1(x), ll(x))
# Test multi-output derivatives
y, dy = ll.evaluateS1(x)
# Note: y and ll(x) differ a bit, because the solver acts slightly
# different when evaluating with and without sensitivities!
self.assertAlmostEqual(y, ll(x), places=3)
self.assertEqual(dy.shape, (nx, ))
y1, dy1 = l1.evaluateS1(x)
self.assertTrue(np.all(3 * dy1 == dy))
def test_ar1(self):
# single outputs
model = pints.toy.ConstantModel(1)
parameters = [0]
times = np.asarray([1, 2, 3])
model.simulate(parameters, times)
values = np.asarray([1.0, -10.7, 15.5])
problem = pints.SingleOutputProblem(model, times, values)
log_likelihood = pints.AR1LogLikelihood(problem)
self.assertAlmostEqual(
log_likelihood([0, 0.5, 5]), -19.706737485492436)
# multiple outputs
model = pints.toy.ConstantModel(4)
parameters = [0, 0, 0, 0]
times = np.arange(1, 5)
model.simulate(parameters, times)
values = np.asarray([[3.5, 7.6, 8.5, 3.4],
[1.1, -10.3, 15.6, 5.5],
[-10, -30.5, -5, 7.6],
[-12, -10.1, -4, 2.3]])
problem = pints.MultiOutputProblem(model, times, values)
log_likelihood = pints.AR1LogLikelihood(problem)
# Test AR1Logpdf((3.5,1.1,-10, -12)|mean=0, rho=0.5, sigma=1) +
# AR1Logpdf((7.6,-10.3,-30.5, -10.1)|mean=0, rho=-0.25, sigma=3) +
# AR1Logpdf((8.5,15.6,-5, -4)|mean=0, rho=0.9, sigma=10) +
# AR1Logpdf((3.4,5.5,7.6, 2.3)|mean=0, rho=0.0, sigma=2)
# = -109.4752924909364 -93.58199 - 18.3833..
# -16.4988
self.assertAlmostEqual(
log_likelihood(parameters + [0.5, 1.0,
-0.25, 3.0,
0.9, 10.0,
0.0, 2.0]),
-237.93936126949615)
def test_arma11(self):
model = pints.toy.ConstantModel(1)
parameters = [0]
times = np.asarray([1, 2, 3, 4])
model.simulate(parameters, times)
values = np.asarray([3, -4.5, 10.5, 0.3])
problem = pints.SingleOutputProblem(model, times, values)
log_likelihood = pints.ARMA11LogLikelihood(problem)
self.assertAlmostEqual(
log_likelihood([0, 0.9, -0.4, 1]), -171.53031588534171)
# multiple outputs
model = pints.toy.ConstantModel(4)
parameters = [0, 0, 0, 0]
times = np.arange(1, 5)
model.simulate(parameters, times)
values = np.asarray([[3.5, 7.6, 8.5, 3.4],
[1.1, -10.3, 15.6, 5.5],
[-10, -30.5, -5, 7.6],
[-12, -10.1, -4, 2.3]])
problem = pints.MultiOutputProblem(model, times, values)
log_likelihood = pints.ARMA11LogLikelihood(problem)
# ARMA1Logpdf((3.5,1.1,-10, -12)|mean=0, rho=0.5, phi=0.34 sigma=1) +
# ARMA1Logpdf((7.6,-10.3,-30.5, -10.1)|
# mean=0, rho=-0.25, phi=0.1, sigma=3) +
# ARMA1Logpdf((8.5,15.6,-5, -4)|mean=0, rho=0.9, phi=0.0, sigma=10) +
# ARMA1Logpdf((3.4,5.5,7.6, 2.3)|mean=0, rho=0.0, phi=0.9, sigma=2)
# = -116.009 -74.94 -14.32 -8.88
self.assertAlmostEqual(
log_likelihood(parameters + [0.5, 0.34, 1.0,
-0.25, 0.1, 3.0,
0.9, 0.0, 10.0,
0.0, 0.9, 2.0]),
-214.17034137601107)
def test_multiplicative_gaussian(self):
# Test single output
model = pints.toy.ConstantModel(1)
parameters = [2]
times = np.asarray([1, 2, 3, 4])
model.simulate(parameters, times)
values = np.asarray([1.9, 2.1, 1.8, 2.2])
problem = pints.SingleOutputProblem(model, times, values)
log_likelihood = pints.MultiplicativeGaussianLogLikelihood(problem)
self.assertAlmostEqual(log_likelihood(parameters + [2.0, 1.0]),
-9.224056577298253)
# Test multiple output
model = pints.toy.ConstantModel(2)
parameters = [1, 2]
times = np.asarray([1, 2, 3])
model.simulate(parameters, times)
values = np.asarray([[1.1, 0.9, 1.5], [1.5, 2.5, 2.0]]).transpose()
problem = pints.MultiOutputProblem(model, times, values)
log_likelihood = pints.MultiplicativeGaussianLogLikelihood(problem)
self.assertAlmostEqual(
log_likelihood(parameters + [1.0, 2.0, 1.0, 1.0]),
-12.176330824267543)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
52fc0a314b8c7900c41339efe131dd5f2bc66806 | 8c69736d7ac2541be761d728284d315cefa90d28 | /nodes/1.x/python/Element.Category+.py | 1d9538abcbb641afdd57cccc319aef7a91d65b65 | [
"MIT"
] | permissive | ptrklk/ClockworkForDynamo | 21ae4ab4ab70d02b6d706f16f312865cd73c4ace | 90293d8fb74e6b3339acd6ca4ff69f695b6a02ac | refs/heads/master | 2020-03-26T07:50:47.053264 | 2018-07-28T07:55:10 | 2018-07-28T07:55:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
def GetCategory(item):
objtype = item.GetType().ToString()
if objtype == "Autodesk.Revit.DB.ViewSchedule": return Revit.Elements.Category.ById(item.Definition.CategoryId.IntegerValue)
elif objtype == "Autodesk.Revit.DB.Family": return Revit.Elements.Category.ById(item.FamilyCategoryId.IntegerValue)
elif objtype == "Autodesk.Revit.DB.GraphicsStyle": return Revit.Elements.Category.ById(item.GraphicsStyleCategory.Id.IntegerValue)
elif objtype == "Autodesk.Revit.DB.Category":
if item.Parent: return Revit.Elements.Category.ById(item.Parent.Id.IntegerValue)
else: return None
elif hasattr(item, "Category"): return Revit.Elements.Category.ById(item.Category.Id.IntegerValue)
else: return None
items = UnwrapElement(IN[0])
if isinstance(IN[0], list): OUT = [GetCategory(x) for x in items]
else: OUT = GetCategory(items) | [
"[email protected]"
] | |
f63cba311274c50550ab3646cb08e9203bacea0f | 9d961bd6a590cc96db0c1f9c72d84e3a66636edf | /심심풀이땅콩/[백준]2920.py | 66cc611eced9e2f94957f850a848db3dff36e886 | [] | no_license | 0equal2/Python_Programming | bae65338929e8e1a88247b8d23de805caa026702 | 2ac1d0262320220f49cbdb45e787e55e994d0b0f | refs/heads/master | 2023-05-14T22:13:41.583214 | 2021-06-09T03:04:51 | 2021-06-09T03:04:51 | 304,628,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | ###[백준]2920
info=list(map(str,input().split()))
info="".join(info)
if info=="12345678":
print("ascending")
elif info=="87654321":
print("descending")
else:
print("mixed")
| [
"[email protected]"
] | |
5cc176da258299c6ddf7a86b3875331dc23f0c4a | 2f418a0f2fcca40f84ec0863b31ff974b574350c | /scripts/addons_extern/BlenderGIS-master/misc/view3d_setCamFromExif.py | af1522a46f970af7b694e7e71938eb00d01e6db1 | [] | no_license | JT-a/blenderpython279 | 57a81b55564218f3b1417c2ffa97f5161897ec79 | 04846c82f794c22f87d677d9eb8cec1d05c48cda | refs/heads/master | 2021-06-25T06:58:07.670613 | 2017-09-11T11:14:36 | 2017-09-11T11:14:36 | 103,723,697 | 4 | 2 | null | 2017-09-16T04:09:31 | 2017-09-16T04:09:31 | null | UTF-8 | Python | false | false | 9,569 | py | # -*- coding:utf-8 -*-
# ***** GPL LICENSE BLOCK *****
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# All rights reserved.
# ***** GPL LICENSE BLOCK *****
import os
from math import pi
import bpy
from bpy.props import StringProperty, CollectionProperty, EnumProperty
from bpy.types import Panel, Operator, OperatorFileListElement
#bgis
from ..geoscene import GeoScene
from ..utils.proj import reprojPt
from ..utils.img import getImgFormat
#deps
from ..lib import Tyf
def newEmpty(scene, name, location):
"""Create a new empty"""
target = bpy.data.objects.new(name, None)
target.empty_draw_size = 10
target.empty_draw_type = 'PLAIN_AXES'
target.location = location
scene.objects.link(target)
return target
def newCamera(scene, name, location, focalLength):
"""Create a new camera"""
cam = bpy.data.cameras.new(name)
cam.sensor_width = 35
cam.lens = focalLength
cam.draw_size = 10
cam_obj = bpy.data.objects.new(name,cam)
cam_obj.location = location
cam_obj.rotation_euler[0] = pi/2
cam_obj.rotation_euler[2] = pi
scene.objects.link(cam_obj)
return cam, cam_obj
def newTargetCamera(scene, name, location, focalLength):
"""Create a new camera.target"""
cam, cam_obj = newCamera(scene, name, location, focalLength)
x, y, z = location[:]
target = newEmpty(scene, name+".target", (x, y - 50, z))
constraint = cam_obj.constraints.new(type='TRACK_TO')
constraint.track_axis = 'TRACK_NEGATIVE_Z'
constraint.up_axis = 'UP_Y'
constraint.target = target
return cam, cam_obj
class SetGeophotosCam(Operator):
bl_idname = "camera.geophotos"
bl_description = "Create cameras from geotagged photos"
bl_label = "Exif cam"
bl_options = {"REGISTER"}
files = CollectionProperty(
name="File Path",
type=OperatorFileListElement,
)
directory = StringProperty(
subtype='DIR_PATH',
)
filter_glob = StringProperty(
default="*.jpg;*.jpeg;*.tif;*.tiff",
options={'HIDDEN'},
)
filename_ext = ""
exifMode = EnumProperty(
attr="exif_mode",
name="Action",
description="Choose an action",
items=[('TARGET_CAMERA','Target Camera','Create a camera with target helper'),('CAMERA','Camera','Create a camera'),('EMPTY','Empty','Create an empty helper'),('CURSOR','Cursor','Move cursor')],
default="TARGET_CAMERA"
)
def invoke(self, context, event):
scn = context.scene
geoscn = GeoScene(scn)
if not geoscn.isGeoref:
self.report({'ERROR'},"The scene must be georeferenced.")
return {'CANCELLED'}
#File browser
context.window_manager.fileselect_add(self)
return {'RUNNING_MODAL'}
def execute(self, context):
scn = context.scene
geoscn = GeoScene(scn)
directory = self.directory
for file_elem in self.files:
filepath = os.path.join(directory, file_elem.name)
if not os.path.isfile(filepath):
self.report({'ERROR'},"Invalid file")
return {'FINISHED'}
imgFormat = getImgFormat(filepath)
if imgFormat not in ['JPEG', 'TIFF']:
self.report({'ERROR'},"Invalid format " + str(imgFormat))
return {'FINISHED'}
try:
exif = Tyf.open(filepath)
#tags = {t.key:exif[t.key] for t in exif.exif.tags() if t.key != 'Unknown' }
except Exception as e:
self.report({'ERROR'},"Unable to open file. " + str(e))
return {'FINISHED'}
try:
lat = exif["GPSLatitude"] * exif["GPSLatitudeRef"]
lon = exif["GPSLongitude"] * exif["GPSLongitudeRef"]
except:
self.report({'ERROR'},"Can't find gps longitude or latitude")
return {'FINISHED'}
try:
alt = exif["GPSAltitude"]
except:
alt = 0
try:
x, y = reprojPt(4326, geoscn.crs, lon, lat)
except Exception as e:
self.report({'ERROR'},"Reprojection error. " + str(e))
return {'FINISHED'}
try:
print(exif["FocalLengthIn35mmFilm"])
focalLength = exif["FocalLengthIn35mmFilm"]
except:
focalLength = 35
try:
location = (x-geoscn.crsx, y-geoscn.crsy, alt)
name = bpy.path.display_name_from_filepath(filepath)
if self.exifMode == "TARGET_CAMERA":
cam, cam_obj = newTargetCamera(scn, name, location, focalLength)
elif self.exifMode == "CAMERA":
cam, cam_obj = newCamera(scn, name, location, focalLength)
elif self.exifMode == "EMPTY":
newEmpty(scn,name,location)
else:
scn.cursor_location = location
except Exception as e:
self.report({'ERROR'},"Can't perform action. " + str(e))
return {'FINISHED'}
if self.exifMode in ["TARGET_CAMERA","CAMERA"]:
cam['background'] = filepath
'''
try:
cam['imageWidth'] = exif["PixelXDimension"] #for jpg, in tif tag is named imageWidth...
cam['imageHeight'] = exif["PixelYDimension"]
except:
pass
'''
img = bpy.data.images.load(filepath)
w, h = img.size
cam['imageWidth'] = w #exif["PixelXDimension"] #for jpg, in tif tag is named imageWidth...
cam['imageHeight'] = h
try:
cam['orientation'] = exif["Orientation"]
except:
cam['orientation'] = 1 #no rotation
#Set camera rotation (NOT TESTED)
if cam['orientation'] == 8: #90° CCW
cam_obj.rotation_euler[1] -= pi/2
if cam['orientation'] == 6: #90° CW
cam_obj.rotation_euler[1] += pi/2
if cam['orientation'] == 3: #180°
cam_obj.rotation_euler[1] += pi
if scn.camera is None:
bpy.ops.camera.geophotos_setactive('EXEC_DEFAULT', camLst=cam_obj.name)
return {'FINISHED'}
class SetActiveGeophotoCam(Operator):
bl_idname = "camera.geophotos_setactive"
bl_description = "Switch active geophoto camera"
bl_label = "Switch geophoto camera"
bl_options = {"REGISTER"}
def listGeoCam(self, context):
scn = context.scene
#put each object in a tuple (key, label, tooltip)
return [(obj.name, obj.name, obj.name) for obj in scn.objects if obj.type == 'CAMERA' and 'background' in obj.data]
camLst = EnumProperty(name='Camera', description='Select camera', items=listGeoCam)
def draw(self, context):
layout = self.layout
layout.prop(self, 'camLst')#, text='')
def invoke(self, context, event):
if len(self.camLst) == 0:
self.report({'ERROR'},"No valid camera")
return {'CANCELLED'}
return context.window_manager.invoke_props_dialog(self)#, width=200)
def execute(self, context):
if context.space_data.type != 'VIEW_3D':
self.report({'ERROR'},"Wrong context")
return {'CANCELLED'}
scn = context.scene
view3d = context.space_data
#Get cam
cam_obj = scn.objects[self.camLst]
cam_obj.select = True
scn.objects.active = cam_obj
cam = cam_obj.data
scn.camera = cam_obj
#Set render size
scn.render.resolution_x = cam['imageWidth']
scn.render.resolution_y = cam['imageHeight']
scn.render.resolution_percentage = 100
#Get or load bpy image
filepath = cam['background']
try:
img = [img for img in bpy.data.images if img.filepath == filepath][0]
except:
img = bpy.data.images.load(filepath)
#Activate view3d background
view3d.show_background_images = True
#Hide all existing camera background
for bkg in view3d.background_images:
if bkg.view_axis == 'CAMERA':
bkg.show_background_image = False
#Get or load background image
bkgs = [bkg for bkg in view3d.background_images if bkg.image is not None]
try:
bkg = [bkg for bkg in bkgs if bkg.image.filepath == filepath][0]
except:
bkg = view3d.background_images.new()
bkg.image = img
#Set some props
bkg.show_background_image = True
bkg.view_axis = 'CAMERA'
bkg.opacity = 1
return {'FINISHED'}
| [
"[email protected]"
] | |
f90413e5f6f9b7b27188bad677b9dc723922f3fd | 1f71f4e41c6aa789f7f5481bc369b852f9ac8eab | /cvat/apps/lambda_manager/tests/test_lambda.py | 9409f2f9c400538428dfa6d7506be196ac1fa939 | [
"MIT",
"LGPL-2.0-or-later",
"GPL-1.0-or-later"
] | permissive | shalevy1/cvat | 2c7e041fa4c8b9a5166894b3ae5e7c28df5d8ae3 | 912e47e56c772eb6c2fb5b32f898b029a985fdfc | refs/heads/develop | 2023-02-13T23:05:15.428038 | 2023-02-08T16:08:52 | 2023-02-08T16:08:52 | 200,596,810 | 0 | 0 | MIT | 2023-01-27T08:48:21 | 2019-08-05T06:39:24 | TypeScript | UTF-8 | Python | false | false | 46,793 | py | # Copyright (C) 2021-2022 Intel Corporation
# Copyright (C) 2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
import json
from collections import OrderedDict
from io import BytesIO
from typing import Dict, Optional
from unittest import mock, skip
import os
import requests
from django.contrib.auth.models import Group, User
from django.http import HttpResponseNotFound, HttpResponseServerError
from PIL import Image
from rest_framework import status
from rest_framework.test import APIClient, APITestCase
from cvat.apps.engine.tests.utils import get_paginated_collection
LAMBDA_ROOT_PATH = '/api/lambda'
LAMBDA_FUNCTIONS_PATH = f'{LAMBDA_ROOT_PATH}/functions'
LAMBDA_REQUESTS_PATH = f'{LAMBDA_ROOT_PATH}/requests'
id_function_detector = "test-openvino-omz-public-yolo-v3-tf"
id_function_reid_response_data = "test-openvino-omz-intel-person-reidentification-retail-0300"
id_function_reid_response_no_data = "test-openvino-omz-intel-person-reidentification-retail-1234"
id_function_interactor = "test-openvino-dextr"
id_function_tracker = "test-pth-foolwood-siammask"
id_function_non_type = "test-model-has-non-type"
id_function_wrong_type = "test-model-has-wrong-type"
id_function_unknown_type = "test-model-has-unknown-type"
id_function_non_unique_labels = "test-model-has-non-unique-labels"
id_function_state_building = "test-model-has-state-building"
id_function_state_error = "test-model-has-state-error"
expected_keys_in_response_all_functions = ["id", "kind", "labels", "description", "framework", "name"]
expected_keys_in_response_function_interactor = expected_keys_in_response_all_functions + ["min_pos_points", "startswith_box"]
expected_keys_in_response_function_tracker = expected_keys_in_response_all_functions + ["state"]
expected_keys_in_response_requests = ["id", "function", "status", "progress", "enqueued", "started", "ended", "exc_info"]
path = os.path.join(os.path.dirname(__file__), 'assets', 'tasks.json')
with open(path) as f:
tasks = json.load(f)
# removed unnecessary data
path = os.path.join(os.path.dirname(__file__), 'assets', 'functions.json')
with open(path) as f:
functions = json.load(f)
def generate_image_file(filename, size=(100, 100)):
f = BytesIO()
image = Image.new('RGB', size=size)
image.save(f, 'jpeg')
f.name = filename
f.seek(0)
return f
class ForceLogin:
def __init__(self, user, client):
self.user = user
self.client = client
def __enter__(self):
if self.user:
self.client.force_login(self.user, backend='django.contrib.auth.backends.ModelBackend')
return self
def __exit__(self, exception_type, exception_value, traceback):
if self.user:
self.client.logout()
class _LambdaTestCaseBase(APITestCase):
def setUp(self):
self.client = APIClient()
http_patcher = mock.patch('cvat.apps.lambda_manager.views.LambdaGateway._http', side_effect = self.__get_data_from_lambda_manager_http)
self.addCleanup(http_patcher.stop)
http_patcher.start()
invoke_patcher = mock.patch('cvat.apps.lambda_manager.views.LambdaGateway.invoke', side_effect = self.__invoke_function)
self.addCleanup(invoke_patcher.stop)
invoke_patcher.start()
def __get_data_from_lambda_manager_http(self, **kwargs):
url = kwargs["url"]
if url == "/api/functions":
return functions["positive"]
else:
func_id = url.split("/")[-1]
if func_id in functions["positive"]:
if func_id in [id_function_state_building, id_function_state_error]:
r = requests.RequestException()
r.response = HttpResponseServerError()
raise r # raise 500 Internal_Server error
return functions["positive"][func_id]
else:
r = requests.HTTPError()
r.response = HttpResponseNotFound()
raise r # raise 404 Not Found error
def __invoke_function(self, func, payload):
data = []
func_id = func.id
type_function = functions["positive"][func_id]["metadata"]["annotations"]["type"]
if type_function == "reid":
if func_id == id_function_reid_response_data:
data = [0, 1]
else:
data = []
elif type_function == "tracker":
data = {
"shape": [12.34, 34.0, 35.01, 41.99],
"state": {"key": "value"},
}
elif type_function == "interactor":
data = [
[8, 12],
[34, 56],
[77, 77],
]
elif type_function == "detector":
data = [
{'confidence': '0.9959098', 'label': 'car', 'points': [3, 3, 15, 15], 'type': 'rectangle'},
{'confidence': '0.89535173', 'label': 'car', 'points': [20, 25, 30, 35], 'type': 'rectangle'},
{'confidence': '0.59464583', 'label': 'car', 'points': [12.17, 45.0, 69.80, 18.99], 'type': 'polygon'},
]
return data
@classmethod
def _create_db_users(cls):
(group_admin, _) = Group.objects.get_or_create(name="admin")
(group_user, _) = Group.objects.get_or_create(name="business")
user_admin = User.objects.create_superuser(username="admin", email="",
password="admin")
user_admin.groups.add(group_admin)
user_dummy = User.objects.create_user(username="user", password="user",
email="[email protected]")
user_dummy.groups.add(group_user)
cls.admin = user_admin
cls.user = user_dummy
def _create_task(self, data, image_data, *, owner=None, org_id=None):
with ForceLogin(owner or self.admin, self.client):
response = self.client.post('/api/tasks', data=data, format="json",
QUERY_STRING=f'org_id={org_id}' if org_id is not None else None)
assert response.status_code == status.HTTP_201_CREATED, response.status_code
tid = response.data["id"]
response = self.client.post("/api/tasks/%s/data" % tid,
data=image_data,
QUERY_STRING=f'org_id={org_id}' if org_id is not None else None)
assert response.status_code == status.HTTP_202_ACCEPTED, response.status_code
response = self.client.get("/api/tasks/%s" % tid,
QUERY_STRING=f'org_id={org_id}' if org_id is not None else None)
task = response.data
return task
def _generate_task_images(self, count): # pylint: disable=no-self-use
images = {
"client_files[%d]" % i: generate_image_file("image_%d.jpg" % i)
for i in range(count)
}
images["image_quality"] = 75
return images
@classmethod
def setUpTestData(cls):
cls._create_db_users()
def _get_request(self, path, user, *, org_id=None):
with ForceLogin(user, self.client):
response = self.client.get(path,
QUERY_STRING=f'org_id={org_id}' if org_id is not None else '')
return response
def _delete_request(self, path, user, *, org_id=None):
with ForceLogin(user, self.client):
response = self.client.delete(path,
QUERY_STRING=f'org_id={org_id}' if org_id is not None else '')
return response
def _post_request(self, path, user, data, *, org_id=None):
data = json.dumps(data)
with ForceLogin(user, self.client):
response = self.client.post(path, data=data, content_type='application/json',
QUERY_STRING=f'org_id={org_id}' if org_id is not None else '')
return response
def _patch_request(self, path, user, data, *, org_id=None):
data = json.dumps(data)
with ForceLogin(user, self.client):
response = self.client.patch(path, data=data, content_type='application/json',
QUERY_STRING=f'org_id={org_id}' if org_id is not None else '')
return response
def _check_expected_keys_in_response_function(self, data):
kind = data["kind"]
if kind == "interactor":
for key in expected_keys_in_response_function_interactor:
self.assertIn(key, data)
elif kind == "tracker":
for key in expected_keys_in_response_function_tracker:
self.assertIn(key, data)
else:
for key in expected_keys_in_response_all_functions:
self.assertIn(key, data)
class LambdaTestCases(_LambdaTestCaseBase):
def setUp(self):
super().setUp()
images_main_task = self._generate_task_images(3)
images_assigneed_to_user_task = self._generate_task_images(3)
self.main_task = self._create_task(tasks["main"], images_main_task)
self.assigneed_to_user_task = self._create_task(
tasks["assigneed_to_user"], images_assigneed_to_user_task
)
def test_api_v2_lambda_functions_list(self):
response = self._get_request(LAMBDA_FUNCTIONS_PATH, self.admin)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for data in response.data:
self._check_expected_keys_in_response_function(data)
response = self._get_request(LAMBDA_FUNCTIONS_PATH, self.user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for data in response.data:
self._check_expected_keys_in_response_function(data)
response = self._get_request(LAMBDA_FUNCTIONS_PATH, None)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@mock.patch('cvat.apps.lambda_manager.views.LambdaGateway._http', return_value = {})
def test_api_v2_lambda_functions_list_empty(self, mock_http):
response = self._get_request(LAMBDA_FUNCTIONS_PATH, self.admin)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 0)
response = self._get_request(LAMBDA_FUNCTIONS_PATH, self.user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 0)
response = self._get_request(LAMBDA_FUNCTIONS_PATH, None)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@mock.patch('cvat.apps.lambda_manager.views.LambdaGateway._http', return_value = functions["negative"])
def test_api_v2_lambda_functions_list_wrong(self, mock_http):
response = self._get_request(LAMBDA_FUNCTIONS_PATH, self.admin)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_api_v2_lambda_functions_read(self):
ids_functions = [id_function_detector, id_function_interactor,\
id_function_tracker, id_function_reid_response_data, \
id_function_non_type, id_function_wrong_type, id_function_unknown_type]
for id_func in ids_functions:
path = f'{LAMBDA_FUNCTIONS_PATH}/{id_func}'
response = self._get_request(path, self.admin)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self._check_expected_keys_in_response_function(response.data)
response = self._get_request(path, self.user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self._check_expected_keys_in_response_function(response.data)
response = self._get_request(path, None)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_api_v2_lambda_functions_read_wrong_id(self):
id_wrong_function = "test-functions-wrong-id"
response = self._get_request(f'{LAMBDA_FUNCTIONS_PATH}/{id_wrong_function}', self.admin)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self._get_request(f'{LAMBDA_FUNCTIONS_PATH}/{id_wrong_function}', self.user)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self._get_request(f'{LAMBDA_FUNCTIONS_PATH}/{id_wrong_function}', None)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@mock.patch('cvat.apps.lambda_manager.views.LambdaGateway._http', return_value = functions["negative"][id_function_non_unique_labels])
def test_api_v2_lambda_functions_read_non_unique_labels(self, mock_http):
response = self._get_request(f'{LAMBDA_FUNCTIONS_PATH}/{id_function_non_unique_labels}', self.admin)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
@skip("Fail: add mock")
def test_api_v2_lambda_requests_list(self):
response = self._get_request(LAMBDA_REQUESTS_PATH, self.admin)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for key in expected_keys_in_response_requests:
self.assertIn(key, response.data[0])
response = self._get_request(LAMBDA_REQUESTS_PATH, self.user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for key in expected_keys_in_response_requests:
self.assertIn(key, response.data[0])
response = self._get_request(LAMBDA_REQUESTS_PATH, None)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_api_v2_lambda_requests_list_empty(self):
response = self._get_request(LAMBDA_REQUESTS_PATH, self.admin)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 0)
response = self._get_request(LAMBDA_REQUESTS_PATH, self.user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 0)
response = self._get_request(LAMBDA_REQUESTS_PATH, None)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_api_v2_lambda_requests_read(self):
# create request
data_main_task = {
"function": id_function_detector,
"task": self.main_task["id"],
"cleanup": True,
"threshold": 55,
"quality": "original",
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(LAMBDA_REQUESTS_PATH, self.admin, data_main_task)
self.assertEqual(response.status_code, status.HTTP_200_OK)
id_request = response.data["id"]
response = self._get_request(f'{LAMBDA_REQUESTS_PATH}/{id_request}', self.admin)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for key in expected_keys_in_response_requests:
self.assertIn(key, response.data)
response = self._get_request(f'{LAMBDA_REQUESTS_PATH}/{id_request}', self.user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for key in expected_keys_in_response_requests:
self.assertIn(key, response.data)
response = self._get_request(f'{LAMBDA_REQUESTS_PATH}/{id_request}', None)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_api_v2_lambda_requests_read_wrong_id(self):
id_request = "cf343b95-afeb-475e-ab53-8d7e64991d30-wrong-id"
response = self._get_request(f'{LAMBDA_REQUESTS_PATH}/{id_request}', self.admin)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self._get_request(f'{LAMBDA_REQUESTS_PATH}/{id_request}', self.user)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self._get_request(f'{LAMBDA_REQUESTS_PATH}/{id_request}', None)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_api_v2_lambda_requests_delete_finished_request(self):
data = {
"function": id_function_detector,
"task": self.main_task["id"],
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(f'{LAMBDA_REQUESTS_PATH}', self.admin, data)
id_request = response.data["id"]
response = self._delete_request(f'{LAMBDA_REQUESTS_PATH}/{id_request}', None)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self._delete_request(f'{LAMBDA_REQUESTS_PATH}/{id_request}', self.admin)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
response = self._get_request(f'{LAMBDA_REQUESTS_PATH}/{id_request}', self.admin)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
response = self._post_request(f'{LAMBDA_REQUESTS_PATH}', self.admin, data)
id_request = response.data["id"]
response = self._delete_request(f'{LAMBDA_REQUESTS_PATH}/{id_request}', self.user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
response = self._get_request(f'{LAMBDA_REQUESTS_PATH}/{id_request}', self.user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
@skip("Fail: add mock")
def test_api_v2_lambda_requests_delete_not_finished_request(self):
pass
def test_api_v2_lambda_requests_create(self):
ids_functions = [id_function_detector, id_function_interactor, id_function_tracker, \
id_function_reid_response_data, id_function_detector, id_function_reid_response_no_data, \
id_function_non_type, id_function_wrong_type, id_function_unknown_type]
for id_func in ids_functions:
data_main_task = {
"function": id_func,
"task": self.main_task["id"],
"cleanup": True,
"threshold": 55,
"quality": "original",
"mapping": {
"car": { "name": "car" },
},
}
data_assigneed_to_user_task = {
"function": id_func,
"task": self.assigneed_to_user_task["id"],
"cleanup": False,
"quality": "compressed",
"max_distance": 70,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(LAMBDA_REQUESTS_PATH, self.admin, data_main_task)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for key in expected_keys_in_response_requests:
self.assertIn(key, response.data)
response = self._post_request(LAMBDA_REQUESTS_PATH, self.user, data_assigneed_to_user_task)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for key in expected_keys_in_response_requests:
self.assertIn(key, response.data)
response = self._post_request(LAMBDA_REQUESTS_PATH, self.user, data_main_task)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
response = self._post_request(LAMBDA_REQUESTS_PATH, None, data_main_task)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@mock.patch('cvat.apps.lambda_manager.views.LambdaGateway._http', return_value = functions["negative"]["test-model-has-non-unique-labels"])
def test_api_v2_lambda_requests_create_non_unique_labels(self, mock_http):
data = {
"function": id_function_non_unique_labels,
"task": self.main_task["id"],
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(LAMBDA_REQUESTS_PATH, self.admin, data)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_api_v2_lambda_requests_create_empty_data(self):
data = {}
response = self._post_request(LAMBDA_REQUESTS_PATH, self.admin, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_api_v2_lambda_requests_create_without_function(self):
data = {
"task": self.main_task["id"],
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(LAMBDA_REQUESTS_PATH, self.admin, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_api_v2_lambda_requests_create_wrong_id_function(self):
data = {
"function": "test-requests-wrong-id",
"task": self.main_task["id"],
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(LAMBDA_REQUESTS_PATH, self.admin, data)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
@skip("Fail: add mock")
def test_api_v2_lambda_requests_create_two_requests(self):
data = {
"function": id_function_detector,
"task": self.main_task["id"],
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
self._post_request(LAMBDA_REQUESTS_PATH, self.admin, data)
response = self._post_request(LAMBDA_REQUESTS_PATH, self.admin, data)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
def test_api_v2_lambda_requests_create_empty_mapping(self):
data = {
"function": id_function_detector,
"task": self.main_task["id"],
"cleanup": True,
"mapping": {},
}
response = self._post_request(LAMBDA_REQUESTS_PATH, self.admin, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for key in expected_keys_in_response_requests:
self.assertIn(key, response.data)
def test_api_v2_lambda_requests_create_without_cleanup(self):
data = {
"function": id_function_detector,
"task": self.main_task["id"],
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(LAMBDA_REQUESTS_PATH, self.admin, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for key in expected_keys_in_response_requests:
self.assertIn(key, response.data)
def test_api_v2_lambda_requests_create_without_mapping(self):
data = {
"function": id_function_detector,
"task": self.main_task["id"],
"cleanup": True,
}
response = self._post_request(LAMBDA_REQUESTS_PATH, self.admin, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
for key in expected_keys_in_response_requests:
self.assertIn(key, response.data)
def test_api_v2_lambda_requests_create_without_task(self):
data = {
"function": id_function_detector,
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(LAMBDA_REQUESTS_PATH, self.admin, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_api_v2_lambda_requests_create_wrong_id_task(self):
data = {
"function": id_function_detector,
"task": 12345,
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(LAMBDA_REQUESTS_PATH, self.admin, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_api_v2_lambda_requests_create_is_not_ready(self):
ids_functions = [id_function_state_building, id_function_state_error]
for id_func in ids_functions:
data = {
"function": id_func,
"task": self.main_task["id"],
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(LAMBDA_REQUESTS_PATH, self.admin, data)
self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
def test_api_v2_lambda_functions_create_detector(self):
data_main_task = {
"task": self.main_task["id"],
"frame": 0,
"cleanup": True,
"threshold": 0.55,
"mapping": {
"car": { "name": "car" },
},
}
data_assigneed_to_user_task = {
"task": self.assigneed_to_user_task["id"],
"frame": 0,
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_detector}", self.admin, data_main_task)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_detector}", self.user, data_assigneed_to_user_task)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_detector}", None, data_main_task)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@skip("Fail: expected result != actual result") # TODO move test to test_api_v2_lambda_functions_create
def test_api_v2_lambda_functions_create_user_assigned_to_no_user(self):
data = {
"task": self.main_task["id"],
"frame": 0,
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_detector}", self.user, data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_api_v2_lambda_functions_create_interactor(self):
data_main_task = {
"task": self.main_task["id"],
"frame": 0,
"pos_points": [
[3.45, 6.78],
[12.1, 12.1],
[34.1, 41.0],
[43.01, 43.99],
],
"neg_points": [
[3.25, 6.58],
[11.1, 11.0],
[35.5, 44.44],
[45.01, 45.99],
],
}
data_assigneed_to_user_task = {
"task": self.assigneed_to_user_task["id"],
"frame": 0,
"threshold": 0.1,
"pos_points": [
[3.45, 6.78],
[12.1, 12.1],
[34.1, 41.0],
[43.01, 43.99],
],
"neg_points": [
[3.25, 6.58],
[11.1, 11.0],
[35.5, 44.44],
[45.01, 45.99],
],
}
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_interactor}", self.admin, data_main_task)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_interactor}", self.user, data_assigneed_to_user_task)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_interactor}", None, data_main_task)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_api_v2_lambda_functions_create_tracker(self):
data_main_task = {
"task": self.main_task["id"],
"frame": 0,
"shape": [
12.12,
34.45,
54.0,
76.12,
],
}
data_assigneed_to_user_task = {
"task": self.assigneed_to_user_task["id"],
"frame": 0,
"shape": [
12.12,
34.45,
54.0,
76.12,
],
}
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_tracker}", self.admin, data_main_task)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_tracker}", self.user, data_assigneed_to_user_task)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_tracker}", None, data_main_task)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_api_v2_lambda_functions_create_reid(self):
data_main_task = {
"task": self.main_task["id"],
"frame0": 0,
"frame1": 1,
"boxes0": [
OrderedDict([('attributes', []), ('frame', 0), ('group', None), ('id', 11258), ('label_id', 8), ('occluded', False), ('path_id', 0), ('points', [137.0, 129.0, 457.0, 676.0]), ('source', 'auto'), ('type', 'rectangle'), ('z_order', 0)]),
OrderedDict([('attributes', []), ('frame', 0), ('group', None), ('id', 11259), ('label_id', 8), ('occluded', False), ('path_id', 1), ('points', [1511.0, 224.0, 1537.0, 437.0]), ('source', 'auto'), ('type', 'rectangle'), ('z_order', 0)]),
],
"boxes1": [
OrderedDict([('attributes', []), ('frame', 1), ('group', None), ('id', 11260), ('label_id', 8), ('occluded', False), ('points', [1076.0, 199.0, 1218.0, 593.0]), ('source', 'auto'), ('type', 'rectangle'), ('z_order', 0)]),
OrderedDict([('attributes', []), ('frame', 1), ('group', None), ('id', 11261), ('label_id', 8), ('occluded', False), ('points', [924.0, 177.0, 1090.0, 615.0]), ('source', 'auto'), ('type', 'rectangle'), ('z_order', 0)]),
],
"quality": None,
"threshold": 0.5,
"max_distance": 55,
}
data_assigneed_to_user_task = {
"task": self.assigneed_to_user_task["id"],
"frame0": 0,
"frame1": 1,
"boxes0": [
OrderedDict([('attributes', []), ('frame', 0), ('group', None), ('id', 11258), ('label_id', 8), ('occluded', False), ('path_id', 0), ('points', [137.0, 129.0, 457.0, 676.0]), ('source', 'auto'), ('type', 'rectangle'), ('z_order', 0)]),
OrderedDict([('attributes', []), ('frame', 0), ('group', None), ('id', 11259), ('label_id', 8), ('occluded', False), ('path_id', 1), ('points', [1511.0, 224.0, 1537.0, 437.0]), ('source', 'auto'), ('type', 'rectangle'), ('z_order', 0)]),
],
"boxes1": [
OrderedDict([('attributes', []), ('frame', 1), ('group', None), ('id', 11260), ('label_id', 8), ('occluded', False), ('points', [1076.0, 199.0, 1218.0, 593.0]), ('source', 'auto'), ('type', 'rectangle'), ('z_order', 0)]),
OrderedDict([('attributes', []), ('frame', 1), ('group', 0), ('id', 11398), ('label_id', 8), ('occluded', False), ('points', [184.3935546875, 211.5048828125, 331.64968722073354, 97.27792672028772, 445.87667560321825, 126.17873100983161, 454.13404825737416, 691.8087578194827, 180.26452189455085]), ('source', 'manual'), ('type', 'polygon'), ('z_order', 0)]),
],
"quality": None,
}
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_reid_response_data}", self.admin, data_main_task)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_reid_response_data}", self.user, data_assigneed_to_user_task)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_reid_response_data}", None, data_main_task)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_reid_response_no_data}", self.admin, data_main_task)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_reid_response_no_data}", self.user, data_assigneed_to_user_task)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_reid_response_no_data}", None, data_main_task)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_api_v2_lambda_functions_create_non_type(self):
data = {
"task": self.main_task["id"],
"frame": 0,
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_non_type}", self.admin, data)
self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
def test_api_v2_lambda_functions_create_wrong_type(self):
data = {
"task": self.main_task["id"],
"frame": 0,
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_wrong_type}", self.admin, data)
self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
def test_api_v2_lambda_functions_create_unknown_type(self):
data = {
"task": self.main_task["id"],
"frame": 0,
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_unknown_type}", self.admin, data)
self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
@mock.patch('cvat.apps.lambda_manager.views.LambdaGateway._http', return_value = functions["negative"]["test-model-has-non-unique-labels"])
def test_api_v2_lambda_functions_create_non_unique_labels(self, mock_http):
data = {
"task": self.main_task["id"],
"frame": 0,
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_non_unique_labels}", self.admin, data)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_api_v2_lambda_functions_create_quality(self):
qualities = [None, "original", "compressed"]
for quality in qualities:
data = {
"task": self.main_task["id"],
"frame": 0,
"cleanup": True,
"quality": quality,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_detector}", self.admin, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = {
"task": self.main_task["id"],
"frame": 0,
"cleanup": True,
"quality": "test-error-quality",
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_detector}", self.admin, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_api_v2_lambda_functions_create_empty_data(self):
data = {}
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_detector}", self.admin, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_api_v2_lambda_functions_create_detector_empty_mapping(self):
data = {
"task": self.main_task["id"],
"frame": 0,
"cleanup": True,
"mapping": {},
}
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_detector}", self.admin, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_api_v2_lambda_functions_create_detector_without_cleanup(self):
data = {
"task": self.main_task["id"],
"frame": 0,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_detector}", self.admin, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_api_v2_lambda_functions_create_detector_without_mapping(self):
data = {
"task": self.main_task["id"],
"frame": 0,
"cleanup": True,
}
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_detector}", self.admin, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_api_v2_lambda_functions_create_detector_without_task(self):
data = {
"frame": 0,
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_detector}", self.admin, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_api_v2_lambda_functions_create_detector_without_id_frame(self):
data = {
"task": self.main_task["id"],
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_detector}", self.admin, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_api_v2_lambda_functions_create_wrong_id_function(self):
data = {
"task": self.main_task["id"],
"frame": 0,
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/test-functions-wrong-id", self.admin, data)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_api_v2_lambda_functions_create_wrong_id_task(self):
data = {
"task": 12345,
"frame": 0,
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_detector}", self.admin, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@skip("Fail: expected result != actual result, issue #2770")
def test_api_v2_lambda_functions_create_detector_wrong_id_frame(self):
data = {
"task": self.main_task["id"],
"frame": 12345,
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_detector}", self.admin, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@skip("Fail: add mock and expected result != actual result")
def test_api_v2_lambda_functions_create_two_functions(self):
data = {
"task": self.main_task["id"],
"frame": 0,
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_detector}", self.admin, data)
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_detector}", self.admin, data)
self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)
def test_api_v2_lambda_functions_create_function_is_not_ready(self):
data = {
"task": self.main_task["id"],
"frame": 0,
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_state_building}", self.admin, data)
self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
response = self._post_request(f"{LAMBDA_FUNCTIONS_PATH}/{id_function_state_error}", self.admin, data)
self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
class Issue4996_Cases(_LambdaTestCaseBase):
# Check regressions for https://github.com/opencv/cvat/issues/4996#issuecomment-1266123032
# We need to check that job assignee can call functions in the assigned jobs
# This requires to pass the job id in the call request.
def _create_org(self, *, owner: int, members: Dict[int, str] = None) -> dict:
org = self._post_request('/api/organizations', user=owner, data={
"slug": "testorg",
"name": "test Org",
})
assert org.status_code == status.HTTP_201_CREATED
org = org.json()
for uid, role in members.items():
user = self._get_request('/api/users/self', user=uid)
assert user.status_code == status.HTTP_200_OK
user = user.json()
invitation = self._post_request('/api/invitations', user=owner, data={
'email': user['email'],
'role': role,
}, org_id=org['id'])
assert invitation.status_code == status.HTTP_201_CREATED
return org
def _set_task_assignee(self, task: int, assignee: Optional[int], *,
org_id: Optional[int] = None):
response = self._patch_request(f'/api/tasks/{task}', user=self.admin, data={
'assignee_id': assignee,
}, org_id=org_id)
assert response.status_code == status.HTTP_200_OK
def _set_job_assignee(self, job: int, assignee: Optional[int], *,
org_id: Optional[int] = None):
response = self._patch_request(f'/api/jobs/{job}', user=self.admin, data={
'assignee': assignee,
}, org_id=org_id)
assert response.status_code == status.HTTP_200_OK
def setUp(self):
self.org = self._create_org(owner=self.admin, members={self.user: 'worker'})
task = self._create_task(data={
'name': 'test_task',
'labels': [{'name': 'cat'}],
'segment_size': 2
},
image_data=self._generate_task_images(6),
owner=self.admin,
org_id=self.org['id'],
)
self.task = task
jobs = get_paginated_collection(lambda page:
self._get_request(
f"/api/jobs?task_id={self.task['id']}&page={page}",
self.admin, org_id=self.org['id']
)
)
self.job = jobs[1]
self.common_data = {
"task": self.task['id'],
"frame": 0,
"cleanup": True,
"mapping": {
"car": { "name": "car" },
},
}
self.function_name = f"{LAMBDA_FUNCTIONS_PATH}/{id_function_detector}"
return super().setUp()
def _get_valid_job_params(self):
return {
"job": self.job['id'],
"frame": 2
}
def _get_invalid_job_params(self):
return {
"job": self.job['id'],
"frame": 0
}
def test_can_call_function_for_job_worker_in_org__deny_unassigned_worker_with_task_request(self):
data = self.common_data.copy()
with self.subTest(job=None, assignee=None):
response = self._post_request(self.function_name, self.user, data,
org_id=self.org['id'])
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_can_call_function_for_job_worker_in_org__deny_unassigned_worker_with_job_request(self):
data = self.common_data.copy()
data.update(self._get_valid_job_params())
with self.subTest(job='defined', assignee=None):
response = self._post_request(self.function_name, self.user, data,
org_id=self.org['id'])
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_can_call_function_for_job_worker_in_org__allow_task_assigned_worker_with_task_request(self):
self._set_task_assignee(self.task['id'], self.user.id, org_id=self.org['id'])
data = self.common_data.copy()
with self.subTest(job=None, assignee='task'):
response = self._post_request(self.function_name, self.user, data,
org_id=self.org['id'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_can_call_function_for_job_worker_in_org__deny_job_assigned_worker_with_task_request(self):
self._set_job_assignee(self.job['id'], self.user.id, org_id=self.org['id'])
data = self.common_data.copy()
with self.subTest(job=None, assignee='job'):
response = self._post_request(self.function_name, self.user, data,
org_id=self.org['id'])
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_can_call_function_for_job_worker_in_org__allow_job_assigned_worker_with_job_request(self):
self._set_job_assignee(self.job['id'], self.user.id, org_id=self.org['id'])
data = self.common_data.copy()
data.update(self._get_valid_job_params())
with self.subTest(job='defined', assignee='job'):
response = self._post_request(self.function_name, self.user, data,
org_id=self.org['id'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_can_check_job_boundaries_in_function_call__fail_for_frame_outside_job(self):
self._set_job_assignee(self.job['id'], self.user.id, org_id=self.org['id'])
data = self.common_data.copy()
data.update(self._get_invalid_job_params())
with self.subTest(job='defined', frame='outside'):
response = self._post_request(self.function_name, self.user, data,
org_id=self.org['id'])
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_can_check_job_boundaries_in_function_call__ok_for_frame_inside_job(self):
self._set_job_assignee(self.job['id'], self.user.id, org_id=self.org['id'])
data = self.common_data.copy()
data.update(self._get_valid_job_params())
with self.subTest(job='defined', frame='inside'):
response = self._post_request(self.function_name, self.user, data,
org_id=self.org['id'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
| [
"[email protected]"
] | |
6a726761b329c956b3768f904b22bebfcb704b34 | 2359121ebcebba9db2cee20b4e8f8261c5b5116b | /configs_pytorch/f30_pt.py | ab367f0af0238b8e8079026ca000938067363cfa | [] | no_license | EliasVansteenkiste/plnt | 79840bbc9f1518c6831705d5a363dcb3e2d2e5c2 | e15ea384fd0f798aabef04d036103fe7af3654e0 | refs/heads/master | 2021-01-20T00:34:37.275041 | 2017-07-20T18:03:08 | 2017-07-20T18:03:08 | 89,153,531 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,549 | py |
#copy of j25
import numpy as np
from collections import namedtuple
from functools import partial
from PIL import Image
import data_transforms
import data_iterators
import pathfinder
import utils
import app
import torch
import torchvision
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
restart_from_save = None
rng = np.random.RandomState(42)
# transformations
p_transform = {'patch_size': (256, 256),
'channels': 3,
'n_labels': 17}
#only lossless augmentations
p_augmentation = {
'rot90_values': [0,1,2,3],
'flip': [0, 1]
}
# data preparation function
def data_prep_function_train(x, p_transform=p_transform, p_augmentation=p_augmentation, **kwargs):
x = x.convert('RGB')
x = np.array(x)
x = np.swapaxes(x,0,2)
x = x / 255.
x = x.astype(np.float32)
x = data_transforms.lossless(x, p_augmentation, rng)
return x
def data_prep_function_valid(x, p_transform=p_transform, **kwargs):
x = x.convert('RGB')
x = np.array(x)
x = np.swapaxes(x,0,2)
x = x / 255.
x = x.astype(np.float32)
return x
def label_prep_function(x):
#cut out the label
return x
# data iterators
batch_size = 16
nbatches_chunk = 1
chunk_size = batch_size * nbatches_chunk
folds = app.make_stratified_split(no_folds=5)
print len(folds)
train_ids = folds[0] + folds[1] + folds[2] + folds[3]
valid_ids = folds[4]
all_ids = folds[0] + folds[1] + folds[2] + folds[3] + folds[4]
bad_ids = []
train_ids = [x for x in train_ids if x not in bad_ids]
valid_ids = [x for x in valid_ids if x not in bad_ids]
test_ids = np.arange(40669)
test2_ids = np.arange(20522)
train_data_iterator = data_iterators.DataGenerator(dataset='train-jpg',
batch_size=chunk_size,
img_ids = train_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_train,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=True, random=True, infinite=True)
feat_data_iterator = data_iterators.DataGenerator(dataset='train-jpg',
batch_size=chunk_size,
img_ids = all_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
valid_data_iterator = data_iterators.DataGenerator(dataset='train-jpg',
batch_size=chunk_size,
img_ids = valid_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
test_data_iterator = data_iterators.DataGenerator(dataset='test-jpg',
batch_size=chunk_size,
img_ids = test_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
test2_data_iterator = data_iterators.DataGenerator(dataset='test2-jpg',
batch_size=chunk_size,
img_ids = test2_ids,
p_transform=p_transform,
data_prep_fun = data_prep_function_valid,
label_prep_fun = label_prep_function,
rng=rng,
full_batch=False, random=False, infinite=False)
nchunks_per_epoch = train_data_iterator.nsamples / chunk_size
max_nchunks = nchunks_per_epoch * 40
validate_every = int(0.5 * nchunks_per_epoch)
save_every = int(1 * nchunks_per_epoch)
learning_rate_schedule = {
0: 1e-4,
int(max_nchunks * 0.4): 3e-5,
int(max_nchunks * 0.6): 1e-5,
int(max_nchunks * 0.7): 5e-6,
int(max_nchunks * 0.8): 2e-6,
int(max_nchunks * 0.9): 1e-6
}
# model
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.resnet = torchvision.models.resnet34(pretrained=True)
self.resnet.fc = nn.Linear(self.resnet.fc.in_features, p_transform["n_labels"])
def forward(self, x):
x = self.resnet(x)
x_softmax = F.softmax(x.narrow(1,0,4))
x_sigmoid = F.sigmoid(x.narrow(1,4,p_transform['n_labels']-4))
x = torch.cat([x_softmax,x_sigmoid],1)
return x
def build_model():
net = Net()
return namedtuple('Model', [ 'l_out'])( net )
# loss
class MultiLoss(torch.nn.modules.loss._Loss):
def forward(self, input, target):
torch.nn.modules.loss._assert_no_grad(target)
softmax_loss = torch.sum(-torch.log(input.narrow(1,0,4)+1e-7)*target.narrow(1,0,4))
binary_loss = F.binary_cross_entropy(input.narrow(1,4,p_transform['n_labels']-4),target.narrow(1,4,p_transform['n_labels']-4),weight=None,size_average=False)
return (binary_loss+softmax_loss)/p_transform["n_labels"]
def build_objective():
return MultiLoss()
def score(gts, preds):
return app.f2_score_arr(gts, preds)
# updates
def build_updates(model, learning_rate):
return optim.Adam(model.parameters(), lr=learning_rate)
| [
"[email protected]"
] | |
4e3dbf1d5e5f1322b18d30e741a4ecefa93b1978 | e7795082c0131682803a09e929a86b2deddeab74 | /common/PackageRequest.py | 55551a9dd599fd01e2d21b48e965acf577cdcab7 | [
"MIT"
] | permissive | liwanlei/FXTest | 01de3ad55849b16c49d93b58d1aae21fd0fdafa0 | aeda58d01c14194290ca149d411c3a8596cca82d | refs/heads/master | 2023-04-01T15:45:26.668688 | 2023-03-19T05:19:54 | 2023-03-19T05:19:54 | 97,098,845 | 807 | 419 | MIT | 2022-04-23T06:52:16 | 2017-07-13T08:27:48 | Python | UTF-8 | Python | false | false | 5,324 | py | # -*- coding: utf-8 -*-
# @Date : 2017-07-20 15:13:25
# @Author : lileilei
'''
requets模块的简单的封装
'''
import requests, json
from config import Interface_Time_Out
from requests import exceptions
from common.systemlog import logger
class reques():
def get(self, url, headers, parms): # get消息
try:
self.r = requests.get(url, headers=headers, params=parms, timeout=Interface_Time_Out)
self.r.encoding = 'UTF-8'
spend = self.r.elapsed.total_seconds()
json_response = json.loads(self.r.text)
return json_response, spend
except exceptions.Timeout:
logger.exception(exceptions.Timeout)
return {'get请求出错': "请求超时"}
except exceptions.InvalidURL:
logger.exception(exceptions.InvalidURL)
return {'get请求出错': "非法url"}
except exceptions.HTTPError:
logger.exception(exceptions.HTTPError)
return {'get请求出错': "http请求错误"}
except Exception as e:
logger.exception(e)
return {'get请求出错': "错误原因:%s" % e}
def post(self, url, params, headers): # post消息
data = json.dumps(params)
try:
reponse = requests.post(url,
data=data,
timeout=Interface_Time_Out,
headers=headers)
if reponse.status_code != 200:
return {'post请求出错': "状态码返回不是200"}
json_response = json.loads(reponse.text)
spend = reponse.elapsed.total_seconds()
return json_response, spend
except exceptions.Timeout:
logger.info(exceptions.Timeout)
return {'post请求出错': "请求超时"}
except exceptions.InvalidURL:
logger.info(exceptions.InvalidURL)
return {'post请求出错': "非法url"}
except exceptions.HTTPError:
logger.info(exceptions.HTTPError)
return {'post请求出错': "http请求错误"}
except Exception as e:
logger.info(e)
return {'post请求出错': "错误原因:%s" % e}
def put(self, url, params, headers): # post消息
data = json.dumps(params)
try:
reponse = requests.put(url,
data=data,
timeout=Interface_Time_Out,
headers=headers)
if reponse.status_code != 200:
logger.info(reponse.status_code)
return {'put请求出错': "状态码返回不是200"}
json_response = json.loads(reponse.text)
spend = reponse.elapsed.total_seconds()
return json_response, spend
except exceptions.Timeout:
logger.info(exceptions.Timeout)
return {'put请求出错': "请求超时"}
except exceptions.InvalidURL:
logger.info(exceptions.InvalidURL)
return {'put请求出错': "非法url"}
except exceptions.HTTPError:
logger.info(exceptions.HTTPError)
return {'put请求出错': "http请求错误"}
except Exception as e:
logger.info(e)
return {'put请求出错': "错误原因:%s" % e}
def delfile(self, url, params, headers): # 删除的请求
try:
self.rdel_word = requests.delete(url, data=params,
headers=headers,
timeout=Interface_Time_Out)
json_response = json.loads(self.rdel_word.text)
spend = self.rdel_word.elapsed.total_seconds()
return json_response, spend
except exceptions.Timeout:
logger.info(exceptions.Timeout)
return {'delete请求出错': "请求超时"}
except exceptions.InvalidURL:
logger.info(exceptions.InvalidURL)
return {'delete请求出错': "非法url"}
except exceptions.HTTPError:
logger.info(exceptions.HTTPError)
return {'delete请求出错': "http请求错误"}
except Exception as e:
logger.info(e)
return {'delete请求出错': "错误原因:%s" % e}
def putfile(self, url, params, headers): # put请求
try:
self.rdata = json.dumps(params)
me = requests.put(url, self.rdata, headers=headers,
timeout=Interface_Time_Out)
json_response = json.loads(me.text)
spend = me.elapsed.total_seconds()
return json_response, spend
except exceptions.Timeout:
logger.exception(exceptions.Timeout)
return {'put请求出错': "请求超时"}
except exceptions.InvalidURL:
logger.exception(exceptions.InvalidURL)
return {'put请求出错': "非法url"}
except exceptions.HTTPError:
logger.exception(exceptions.HTTPError)
return {'put请求出错': "http请求错误"}
except Exception as e:
logger.exception(e)
return {'put请求出错': "错误原因:%s" % e}
| [
"[email protected]"
] | |
bc02bd5d06610caebebf2ffa034ff85777eb4b1d | d51e67abc7709b17664ee08eb9d68ecb938f00a9 | /utils/farmbot.py | 1ace2bcc577c12cf3db231e381d3c13f6f25e1c8 | [] | no_license | averdier/farmware_watering_farmdesign | bb6d3d2615c8ac5294e3bbd61cea97cf0950e05c | d474d78629504907971c7f7b2ee6b88954f6f4cf | refs/heads/master | 2020-03-30T09:26:40.012409 | 2018-10-02T09:16:04 | 2018-10-02T09:16:04 | 151,075,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,367 | py | # -*- coding: utf-8 -*-
import uuid
from time import time, sleep
from .api import send_celery_script, log, get_resource
from .geometry import Point3D
def prepare_move_absolute_script(position, speed):
return {
'kind': 'rpc_request',
'args': {
'label': 'farmware_circle_' + str(uuid.uuid4())
},
'body': [{
'kind': 'move_absolute',
'args': {
'location': {
'kind': 'coordinate',
'args': {
'x': position.x,
'y': position.y,
'z': position.z
}
},
'offset': {
'kind': 'coordinate',
'args': {
'x': 0,
'y': 0,
'z': 0
}
},
'speed': speed
}
}]
}
class FarmBot:
@property
def position(self):
response = get_resource('/api/v1/bot/state')
if response.status_code != 200:
raise RuntimeError('Unable to get position')
data = response.json()['location_data']['position']
return Point3D(data['x'], data['y'], data['z'])
def move(self, position, speed, tolerance, timeout):
target = Point3D(
int(position.x),
int(position.y),
int(position.z)
)
log('target position: ' + str(target), 'debug')
celery_move_script = prepare_move_absolute_script(target, speed)
current_position = self.position
send_celery_script(celery_move_script)
t0 = time()
while not target == current_position:
new_position = self.position
if new_position == current_position:
if new_position.is_near(target, tolerance):
break
else:
t1 = time()
if t1 - t0 > timeout:
if not new_position.is_near(target, tolerance):
raise RuntimeError('Movement timeout')
else:
break
else:
current_position = new_position
t0 = time()
sleep(0.5)
| [
"[email protected]"
] | |
2b9bacb7050fe1eb24ece4e8eb572e2926d53cb4 | 4f340ed313a67bddf077a9d8da2205b9c2feb29a | /emv/util.py | dd9f8ac0b99ed0b904b87cec4e819cd092561dd2 | [
"MIT"
] | permissive | russss/python-emv | 49b645be9ec60acc8cd23e95e5423a1b2c90a175 | ce316ba05165e9b82026f72b55329b9bc287c0b8 | refs/heads/main | 2023-06-21T19:09:10.124472 | 2023-06-01T18:44:34 | 2023-06-11T15:26:11 | 76,971,486 | 105 | 30 | MIT | 2023-06-11T15:26:12 | 2016-12-20T16:10:14 | Python | UTF-8 | Python | false | false | 1,096 | py | import re
def hex_int(val):
"""Convert an integer into a decimal-encoded hex integer as bytes,
which the EMV spec seems awfully keen on.
>>> hex_int(123456)
[0x12, 0x34, 0x56]
>>> hex_int(65432)
[0x06, 0x54, 0x32]
"""
s = str(val)
if len(s) % 2 != 0:
s = "0" + s
return [int(s[i : i + 2], 16) for i in range(0, len(s), 2)]
def from_hex_int(val):
"""Convert hex digits to decimal.
>>> from_hex_int([0x12, 0x34])
1234
"""
return int("".join(["%02x" % i for i in val]).rstrip("f"))
def from_hex_date(val):
return "%02x/%02x/%02x" % (val[0], val[1], val[2])
def decode_int(val):
result = val[0]
for i in val[1:]:
result = result << 8
result += i
return result
def format_bytes(data):
if type(data) == int:
return "[%02X]" % data
return "[" + " ".join(["%02X" % i for i in data]) + "]"
def unformat_bytes(data):
data = re.split(r"(?:\s+|:)", data)
return [int(i, 16) for i in data]
def bit_set(value, bit):
mask = 1 << bit
return (value & mask) == mask
| [
"[email protected]"
] | |
631b80890ec2de9a99b09a99a04c8962afd187d6 | e1fada3a9846a5593e3d3d2fdc32b23b832e38b4 | /tests/unit/cli/utils/test_hpo.py | a71d48a0cbb6160a41c64b495c9d45ad40b43c95 | [
"Apache-2.0"
] | permissive | GalyaZalesskaya/openvino_training_extensions | fd1ebb189900008b16b85568449e5c62d8edbad5 | 6116639caeff100b06a6c10a96c7e7f5951f20c7 | refs/heads/develop | 2023-09-03T19:32:44.702497 | 2023-03-15T06:48:24 | 2023-03-15T06:48:24 | 202,568,309 | 0 | 0 | Apache-2.0 | 2019-10-28T16:16:27 | 2019-08-15T15:41:59 | Python | UTF-8 | Python | false | false | 27,415 | py | import json
from copy import deepcopy
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List
from unittest.mock import MagicMock
import pytest
import otx
from otx.api.configuration.helper import create as create_conf_hp
from otx.api.entities.model import ModelEntity
from otx.api.entities.model_template import TaskType
from otx.api.entities.subset import Subset
from otx.api.entities.task_environment import TaskEnvironment
from otx.cli.registry import find_and_parse_model_template
from otx.cli.utils.hpo import (
HpoCallback,
HpoDataset,
HpoRunner,
TaskEnvironmentManager,
TaskManager,
Trainer,
get_best_hpo_weight,
run_hpo,
run_trial,
)
from otx.hpo.hpo_base import TrialStatus
from tests.test_suite.e2e_test_system import e2e_pytest_unit
CLASSIFCATION_TASK = {TaskType.CLASSIFICATION}
DETECTION_TASK = {TaskType.DETECTION, TaskType.INSTANCE_SEGMENTATION, TaskType.ROTATED_DETECTION}
SEGMENTATION_TASK = {TaskType.SEGMENTATION}
ANOMALY_TASK = {TaskType.ANOMALY_CLASSIFICATION, TaskType.ANOMALY_DETECTION, TaskType.ANOMALY_SEGMENTATION}
MMCV_TASK = CLASSIFCATION_TASK | DETECTION_TASK | SEGMENTATION_TASK
ALL_TASK = MMCV_TASK | ANOMALY_TASK
OTX_ROOT_PATH = Path(otx.__file__).parent
class TestTaskManager:
@e2e_pytest_unit
@pytest.mark.parametrize("task", MMCV_TASK)
def test_is_mmcv_framework_task(self, task: TaskType):
task_manager = TaskManager(task)
assert task_manager.is_mmcv_framework_task()
@e2e_pytest_unit
@pytest.mark.parametrize("task", ANOMALY_TASK)
def test_is_not_mmcv_framework_task(self, task: TaskType):
task_manager = TaskManager(task)
assert not task_manager.is_mmcv_framework_task()
@e2e_pytest_unit
@pytest.mark.parametrize("task", CLASSIFCATION_TASK)
def test_is_cls_framework_task(self, task: TaskType):
task_manager = TaskManager(task)
assert task_manager.is_cls_framework_task()
@e2e_pytest_unit
@pytest.mark.parametrize("task", ALL_TASK - CLASSIFCATION_TASK)
def test_is_not_cls_framework_task(self, task: TaskType):
task_manager = TaskManager(task)
assert not task_manager.is_cls_framework_task()
@e2e_pytest_unit
@pytest.mark.parametrize("task", DETECTION_TASK)
def test_is_det_framework_task(self, task: TaskType):
task_manager = TaskManager(task)
assert task_manager.is_det_framework_task()
@e2e_pytest_unit
@pytest.mark.parametrize("task", ALL_TASK - DETECTION_TASK)
def test_is_not_det_framework_task(self, task: TaskType):
task_manager = TaskManager(task)
assert not task_manager.is_det_framework_task()
@e2e_pytest_unit
@pytest.mark.parametrize("task", SEGMENTATION_TASK)
def test_is_seg_framework_task(self, task: TaskType):
task_manager = TaskManager(task)
assert task_manager.is_seg_framework_task()
@e2e_pytest_unit
@pytest.mark.parametrize("task", ALL_TASK - SEGMENTATION_TASK)
def test_is_not_seg_framework_task(self, task: TaskType):
task_manager = TaskManager(task)
assert not task_manager.is_seg_framework_task()
@e2e_pytest_unit
@pytest.mark.parametrize("task", ANOMALY_TASK)
def test_is_anomaly_framework_task(self, task: TaskType):
task_manager = TaskManager(task)
assert task_manager.is_anomaly_framework_task()
@e2e_pytest_unit
@pytest.mark.parametrize("task", ALL_TASK - ANOMALY_TASK)
def test_is_not_anomaly_framework_task(self, task: TaskType):
task_manager = TaskManager(task)
assert not task_manager.is_anomaly_framework_task()
@e2e_pytest_unit
@pytest.mark.parametrize("task", MMCV_TASK)
def test_get_mmcv_batch_size_name(self, task: TaskType):
task_manager = TaskManager(task)
assert task_manager.get_batch_size_name() == "learning_parameters.batch_size"
@e2e_pytest_unit
@pytest.mark.parametrize("task", ANOMALY_TASK)
def test_get_anomaly_batch_size_name(self, task: TaskType):
task_manager = TaskManager(task)
assert task_manager.get_batch_size_name() == "learning_parameters.train_batch_size"
@e2e_pytest_unit
def test_get_unknown_task_batch_size_name(self, mocker):
mock_func1 = mocker.patch.object(TaskManager, "is_mmcv_framework_task")
mock_func1.return_value = False
mock_func2 = mocker.patch.object(TaskManager, "is_anomaly_framework_task")
mock_func2.return_value = False
task_manager = TaskManager(mocker.MagicMock())
with pytest.raises(RuntimeError):
task_manager.get_batch_size_name()
@e2e_pytest_unit
@pytest.mark.parametrize("task", MMCV_TASK)
def test_get_mmcv_epoch_name(self, task: TaskType):
task_manager = TaskManager(task)
assert task_manager.get_epoch_name() == "num_iters"
@e2e_pytest_unit
@pytest.mark.parametrize("task", ANOMALY_TASK)
def test_get_anomaly_epoch_name(self, task: TaskType):
task_manager = TaskManager(task)
assert task_manager.get_epoch_name() == "max_epochs"
@e2e_pytest_unit
def test_get_unknown_task_epoch_name(self, mocker):
mock_func1 = mocker.patch.object(TaskManager, "is_mmcv_framework_task")
mock_func1.return_value = False
mock_func2 = mocker.patch.object(TaskManager, "is_anomaly_framework_task")
mock_func2.return_value = False
task_manager = TaskManager(mocker.MagicMock())
with pytest.raises(RuntimeError):
task_manager.get_epoch_name()
@e2e_pytest_unit
@pytest.mark.parametrize("task", MMCV_TASK)
def test_copy_weight(self, task: TaskType):
task_manager = TaskManager(task)
fake_model_weight = Path("temp_epoch_3.pth")
with TemporaryDirectory() as src_dir, TemporaryDirectory() as det_dir:
weight_in_src = src_dir / fake_model_weight
weight_in_det = det_dir / fake_model_weight
weight_in_src.write_text("fake")
task_manager.copy_weight(src_dir, det_dir)
assert weight_in_det.exists()
@e2e_pytest_unit
@pytest.mark.parametrize("task", MMCV_TASK)
def test_get_latest_weight(self, task: TaskType):
task_manager = TaskManager(task)
with TemporaryDirectory() as work_dir:
for i in range(1, 10):
(work_dir / Path(f"epoch_{i}.pth")).write_text("fake")
latest_model_weight = work_dir / Path("epoch_10.pth")
latest_model_weight.write_text("fake")
assert task_manager.get_latest_weight(work_dir) == str(latest_model_weight)
def get_template_path(template_dir: str) -> Path:
task_config_dir = OTX_ROOT_PATH / "algorithms" / template_dir
return list(task_config_dir.glob("**/template.yaml"))[0]
def make_task_env(template_path: str) -> TaskEnvironment:
template = find_and_parse_model_template(template_path)
return TaskEnvironment(template, None, create_conf_hp(template.hyper_parameters.data), MagicMock())
@pytest.fixture(scope="module")
def cls_template_path() -> str:
return str(get_template_path("classification/configs"))
@pytest.fixture(scope="module")
def det_template_path() -> str:
return str(get_template_path("detection/configs/detection"))
@pytest.fixture(scope="module")
def seg_template_path() -> str:
return str(get_template_path("segmentation/configs"))
@pytest.fixture(scope="module")
def anomaly_template_path() -> str:
return str(OTX_ROOT_PATH / "algorithms/anomaly/configs/classification/stfpm/template.yaml")
@pytest.fixture(scope="module")
def cls_task_env(cls_template_path):
return make_task_env(cls_template_path)
@pytest.fixture(scope="module")
def det_task_env(det_template_path) -> TaskEnvironment:
return make_task_env(det_template_path)
@pytest.fixture(scope="module")
def seg_task_env(seg_template_path) -> TaskEnvironment:
return make_task_env(seg_template_path)
@pytest.fixture(scope="module")
def anomaly_task_env(anomaly_template_path) -> TaskEnvironment:
return make_task_env(anomaly_template_path)
@pytest.fixture
def mmcv_task_env(cls_task_env, det_task_env, seg_task_env) -> List[TaskEnvironment]:
return [cls_task_env, det_task_env, seg_task_env]
@pytest.fixture
def all_task_env(cls_task_env, det_task_env, seg_task_env, anomaly_task_env) -> List[TaskEnvironment]:
return [cls_task_env, det_task_env, seg_task_env, anomaly_task_env]
@pytest.fixture
def mock_environment():
MockTaskEnv = MagicMock(spec=TaskEnvironment)
return MockTaskEnv()
@pytest.fixture(scope="module")
def action_template_path() -> str:
return str(get_template_path("action"))
@pytest.fixture(scope="module")
def action_task_env(action_template_path) -> TaskEnvironment:
return make_task_env(action_template_path)
class TestTaskEnvironmentManager:
@pytest.fixture(autouse=True)
def _make_mock_task_env(self, mock_environment):
self._mock_environment = mock_environment
@e2e_pytest_unit
def test_init(self, all_task_env):
for task_env in all_task_env:
TaskEnvironmentManager(task_env)
@e2e_pytest_unit
def test_get_task(self, cls_task_env, det_task_env, seg_task_env):
task_env = TaskEnvironmentManager(cls_task_env)
assert task_env.get_task() == TaskType.CLASSIFICATION
task_env = TaskEnvironmentManager(det_task_env)
assert task_env.get_task() == TaskType.DETECTION
task_env = TaskEnvironmentManager(seg_task_env)
assert task_env.get_task() == TaskType.SEGMENTATION
@e2e_pytest_unit
def test_get_model_template(
self, cls_task_env, det_task_env, seg_task_env, cls_template_path, det_template_path, seg_template_path
):
task_env = TaskEnvironmentManager(cls_task_env)
assert task_env.get_model_template() == find_and_parse_model_template(cls_template_path)
task_env = TaskEnvironmentManager(det_task_env)
assert task_env.get_model_template() == find_and_parse_model_template(det_template_path)
task_env = TaskEnvironmentManager(seg_task_env)
assert task_env.get_model_template() == find_and_parse_model_template(seg_template_path)
@e2e_pytest_unit
def test_get_model_template_path(
self, cls_task_env, det_task_env, seg_task_env, cls_template_path, det_template_path, seg_template_path
):
task_env = TaskEnvironmentManager(cls_task_env)
assert task_env.get_model_template_path() == cls_template_path
task_env = TaskEnvironmentManager(det_task_env)
assert task_env.get_model_template_path() == det_template_path
task_env = TaskEnvironmentManager(seg_task_env)
assert task_env.get_model_template_path() == seg_template_path
@e2e_pytest_unit
def test_set_hyper_parameter_using_str_key(self):
task_env = TaskEnvironmentManager(self._mock_environment)
hyper_parameter = {"a.b.c.d": 1, "e.f.g.h": 2}
task_env.set_hyper_parameter_using_str_key(hyper_parameter)
env_hp = self._mock_environment.get_hyper_parameters()
assert env_hp.a.b.c.d == hyper_parameter["a.b.c.d"]
assert env_hp.e.f.g.h == hyper_parameter["e.f.g.h"]
@e2e_pytest_unit
def test_get_dict_type_hyper_parameter(self):
learning_parameters = self._mock_environment.get_hyper_parameters().learning_parameters
learning_parameters.parameters = ["a", "b"]
learning_parameters.a = 1
learning_parameters.b = 2
task_env = TaskEnvironmentManager(self._mock_environment)
dict_hp = task_env.get_dict_type_hyper_parameter()
assert dict_hp["learning_parameters.a"] == 1
assert dict_hp["learning_parameters.b"] == 2
@e2e_pytest_unit
@pytest.mark.parametrize("task", ALL_TASK)
def test_get_max_epoch(self, task):
max_epoch = 10
self._mock_environment.model_template.task_type = task
learning_parameters = self._mock_environment.get_hyper_parameters().learning_parameters
setattr(learning_parameters, TaskManager(task).get_epoch_name(), max_epoch)
task_env = TaskEnvironmentManager(self._mock_environment)
assert task_env.get_max_epoch() == max_epoch
@e2e_pytest_unit
def test_save_mmcv_initial_weight(self, mmcv_task_env):
for task_env in mmcv_task_env:
task_env.model = None
task_env = TaskEnvironmentManager(task_env)
assert not task_env.save_initial_weight("fake_path")
@e2e_pytest_unit
def test_save_anomaly_initial_weight(self, mocker, anomaly_task_env):
def mock_save_model_data(model, save_path: str):
(Path(save_path) / "weights.pth").write_text("fake")
mocker.patch.object(TaskEnvironmentManager, "get_train_task")
mocker.patch("otx.cli.utils.hpo.save_model_data", mock_save_model_data)
with TemporaryDirectory() as tmp_dir:
anomaly_task_env.model = None
task_env = TaskEnvironmentManager(anomaly_task_env)
save_path = Path(tmp_dir) / "init.pth"
assert task_env.save_initial_weight(str(save_path))
assert save_path.exists()
@e2e_pytest_unit
def test_loaded_inital_weight(self, mocker, all_task_env):
def mock_save_model_data(model, save_path: str):
(Path(save_path) / "weights.pth").write_text("fake")
mocker.patch.object(TaskEnvironmentManager, "get_train_task")
mocker.patch("otx.cli.utils.hpo.save_model_data", mock_save_model_data)
with TemporaryDirectory() as tmp_dir:
for task_env in all_task_env:
task_env.model = mocker.MagicMock()
task_env = TaskEnvironmentManager(task_env)
save_path = Path(tmp_dir) / "init.pth"
assert task_env.save_initial_weight(str(save_path))
assert save_path.exists()
@e2e_pytest_unit
def test_get_train_task(self, mocker, all_task_env):
mock_func = mocker.patch("otx.cli.utils.hpo.get_impl_class")
for task_env in all_task_env:
mock_class = mocker.MagicMock()
mock_func.return_vlaue = mock_class
task_env = TaskEnvironmentManager(task_env)
task_env.get_train_task()
mock_class.assert_not_called()
@e2e_pytest_unit
def test_get_mmcv_batch_size_name(self, mmcv_task_env):
for task_env in mmcv_task_env:
task_env = TaskEnvironmentManager(task_env)
assert task_env.get_batch_size_name() == "learning_parameters.batch_size"
@e2e_pytest_unit
def test_get_anomaly_batch_size_name(self, anomaly_task_env):
task_env = TaskEnvironmentManager(anomaly_task_env)
assert task_env.get_batch_size_name() == "learning_parameters.train_batch_size"
@e2e_pytest_unit
def test_load_model_weight(self, mocker, all_task_env):
mock_func = mocker.patch("otx.cli.utils.hpo.read_model")
for task_env in all_task_env:
mock_class = mocker.MagicMock()
mock_func.return_value = mock_class
task_manager = TaskEnvironmentManager(task_env)
task_manager.load_model_weight("fake", mocker.MagicMock())
assert task_env.model == mock_class
@e2e_pytest_unit
def test_resume_model_weight(self, mocker, all_task_env):
mock_func = mocker.patch("otx.cli.utils.hpo.read_model")
for task_env in all_task_env:
mock_class = mocker.MagicMock()
mock_func.return_value = mock_class
task_manager = TaskEnvironmentManager(task_env)
task_manager.resume_model_weight("fake", mocker.MagicMock())
assert task_env.model == mock_class
assert mock_class.model_adapters["resume"]
@e2e_pytest_unit
def test_get_new_model_entity(self, all_task_env):
for task_env in all_task_env:
task_manager = TaskEnvironmentManager(task_env)
model_entity = task_manager.get_new_model_entity()
assert isinstance(model_entity, ModelEntity)
@e2e_pytest_unit
def test_set_epoch(self, all_task_env):
epoch = 123
for task_env in all_task_env:
task_manager = TaskEnvironmentManager(task_env)
task_manager.set_epoch(epoch)
assert task_manager.get_max_epoch() == epoch
class TestHpoRunner:
@e2e_pytest_unit
def test_init(self, all_task_env):
for task_env in all_task_env:
HpoRunner(task_env, 100, 10, "fake_path")
@e2e_pytest_unit
@pytest.mark.parametrize("train_dataset_size,val_dataset_size", [(0, 10), (10, 0), (-1, -1)])
def test_init_wrong_dataset_size(self, cls_task_env, train_dataset_size, val_dataset_size):
with pytest.raises(ValueError):
HpoRunner(cls_task_env, train_dataset_size, val_dataset_size, "fake_path", 4)
@e2e_pytest_unit
@pytest.mark.parametrize("hpo_time_ratio", [-3, 0])
def test_init_wrong_hpo_time_ratio(self, cls_task_env, hpo_time_ratio):
with pytest.raises(ValueError):
HpoRunner(cls_task_env, 100, 10, "fake_path", hpo_time_ratio)
@e2e_pytest_unit
def test_run_hpo(self, mocker, cls_task_env):
cls_task_env.model = None
hpo_runner = HpoRunner(cls_task_env, 100, 10, "fake_path")
mock_run_hpo_loop = mocker.patch("otx.cli.utils.hpo.run_hpo_loop")
mock_hb = mocker.patch("otx.cli.utils.hpo.HyperBand")
hpo_runner.run_hpo(mocker.MagicMock(), {"fake", "fake"})
mock_run_hpo_loop.assert_called() # call hpo_loop to run HPO
mock_hb.assert_called() # make hyperband
@e2e_pytest_unit
def test_run_hpo_w_dataset_smaller_than_batch(self, mocker, cls_task_env):
cls_task_env.model = None
hpo_runner = HpoRunner(cls_task_env, 2, 10, "fake_path")
mock_run_hpo_loop = mocker.patch("otx.cli.utils.hpo.run_hpo_loop")
mock_hb = mocker.patch("otx.cli.utils.hpo.HyperBand")
hpo_runner.run_hpo(mocker.MagicMock(), {"fake", "fake"})
mock_run_hpo_loop.assert_called() # call hpo_loop to run HPO
mock_hb.assert_called() # make hyperband
class TestTrainer:
@e2e_pytest_unit
def test_init(self, mocker, cls_template_path):
Trainer(
hp_config={"configuration": {"iterations": 10}},
report_func=mocker.stub(),
model_template=find_and_parse_model_template(cls_template_path),
data_roots={"fake": "fake"},
task_type=TaskType.CLASSIFICATION,
hpo_workdir="fake",
initial_weight_name="fake",
metric="fake",
)
@e2e_pytest_unit
def test_run(self, mocker, cls_template_path):
with TemporaryDirectory() as tmp_dir:
# prepare
trial_id = "1"
weight_format = "epoch_{}.pth"
hpo_workdir = Path(tmp_dir) / "hpo_dir"
fake_project_path = Path(tmp_dir) / "fake_proejct"
fake_project_path.mkdir(parents=True)
for i in range(1, 5):
(fake_project_path / weight_format.format(i)).write_text("fake")
mock_get_train_task = mocker.patch.object(TaskEnvironmentManager, "get_train_task")
mock_task = mocker.MagicMock()
mock_task.project_path = str(fake_project_path)
mock_get_train_task.return_value = mock_task
mock_report_func = mocker.MagicMock()
mocker.patch("otx.cli.utils.hpo.get_dataset_adapter")
mocker.patch("otx.cli.utils.hpo.HpoDataset")
# run
trainer = Trainer(
hp_config={"configuration": {"iterations": 10}, "id": trial_id},
report_func=mock_report_func,
model_template=find_and_parse_model_template(cls_template_path),
data_roots=mocker.MagicMock(),
task_type=TaskType.CLASSIFICATION,
hpo_workdir=hpo_workdir,
initial_weight_name="fake",
metric="fake",
)
trainer.run()
# check
mock_report_func.assert_called_once_with(0, 0, done=True) # finilize report
assert hpo_workdir.exists() # make a directory to copy weight
for i in range(1, 5): # check model weights are copied
assert (hpo_workdir / "weight" / trial_id / weight_format.format(i)).exists()
mock_task.train.assert_called() # check task.train() is called
class TestHpoCallback:
@e2e_pytest_unit
def test_init(self, mocker):
HpoCallback(mocker.MagicMock(), "fake", 3, mocker.MagicMock())
@e2e_pytest_unit
@pytest.mark.parametrize("max_epoch", [-3, 0])
def test_init_wrong_max_epoch(self, mocker, max_epoch):
with pytest.raises(ValueError):
HpoCallback(mocker.MagicMock(), "fake", max_epoch, mocker.MagicMock())
@e2e_pytest_unit
def test_call(self, mocker):
mock_report_func = mocker.MagicMock()
hpo_call_back = HpoCallback(report_func=mock_report_func, metric="fake", max_epoch=50, task=mocker.MagicMock())
hpo_call_back(progress=20, score=100)
mock_report_func.assert_called_once_with(progress=10, score=100)
@e2e_pytest_unit
def test_call_and_get_stop_flag(self, mocker):
mock_report_func = mocker.MagicMock()
mock_report_func.return_value = TrialStatus.STOP
mock_task = mocker.MagicMock()
hpo_call_back = HpoCallback(report_func=mock_report_func, metric="fake", max_epoch=50, task=mock_task)
hpo_call_back(progress=20, score=100)
mock_task.cancel_training.assert_called_once_with()
@e2e_pytest_unit
def test_not_copy_report_func(self, mocker):
mock_report_func = mocker.MagicMock()
hpo_call_back = HpoCallback(report_func=mock_report_func, metric="fake", max_epoch=50, task=mocker.MagicMock())
new_hpo_call_back = deepcopy(hpo_call_back)
new_hpo_call_back(progress=20, score=100)
mock_report_func.assert_called_once()
class TestHpoDataset:
@e2e_pytest_unit
def test_init(self, mocker):
hpo_dataset = HpoDataset(fullset=mocker.MagicMock(), config={"train_environment": {"subset_ratio": 0.5}})
assert hpo_dataset.subset_ratio == 0.5
@e2e_pytest_unit
@pytest.mark.parametrize("subset_ratio", [0.1, 0.5, 1])
def test_get_subset(self, mocker, subset_ratio):
mock_fullset = mocker.MagicMock()
mock_fullset.get_subset.return_value = [i for i in range(10)]
config = {"train_environment": {"subset_ratio": subset_ratio}}
hpo_dataset = HpoDataset(fullset=mock_fullset, config=config)
hpo_sub_dataset = hpo_dataset.get_subset(Subset.TRAINING)
num_hpo_sub_dataset = len(hpo_sub_dataset)
assert num_hpo_sub_dataset == round(10 * subset_ratio)
for i in range(num_hpo_sub_dataset):
hpo_sub_dataset[i]
@e2e_pytest_unit
def test_len_before_get_subset(self):
hpo_dataset = HpoDataset(fullset=range(10), config={"train_environment": {"subset_ratio": 0.5}})
assert len(hpo_dataset) == 10
@e2e_pytest_unit
def test_getitem_before_get_subset(self):
hpo_dataset = HpoDataset(fullset=range(10), config={"train_environment": {"subset_ratio": 0.5}})
for _ in hpo_dataset:
pass
@e2e_pytest_unit
def test_run_hpo(mocker, mock_environment):
with TemporaryDirectory() as tmp_dir:
# prepare
save_model_to_path = Path(tmp_dir) / "fake"
mock_get_best_hpo_weight = mocker.patch("otx.cli.utils.hpo.get_best_hpo_weight")
mock_get_best_hpo_weight.return_value = "mock_best_weight_path"
def mock_run_hpo(*args, **kwargs):
return {"config": {"a.b": 1, "c.d": 2}, "id": "1"}
mock_hpo_runner_instance = mocker.MagicMock()
mock_hpo_runner_instance.run_hpo.side_effect = mock_run_hpo
mock_hpo_runner_class = mocker.patch("otx.cli.utils.hpo.HpoRunner")
mock_hpo_runner_class.return_value = mock_hpo_runner_instance
def mock_read_model(args1, path, arg2):
return path
mocker.patch("otx.cli.utils.hpo.read_model", mock_read_model)
mock_args = mocker.MagicMock()
mock_args.hpo_time_ratio = "4"
mock_args.save_model_to = save_model_to_path
mock_environment.model_template.task_type = TaskType.CLASSIFICATION
# run
environment = run_hpo(mock_args, mock_environment, mocker.MagicMock(), mocker.MagicMock())
# check
mock_hpo_runner_instance.run_hpo.assert_called() # Check that HpoRunner.run_hpo is called
env_hp = environment.get_hyper_parameters() # Check that best HP is applied well.
assert env_hp.a.b == 1
assert env_hp.c.d == 2
assert environment.model == "mock_best_weight_path" # check that best model weight is used
@e2e_pytest_unit
def test_run_hpo_not_supported_task(mocker, action_task_env):
mock_hpo_runner_instance = mocker.MagicMock()
mock_hpo_runner_class = mocker.patch("otx.cli.utils.hpo.HpoRunner")
mock_hpo_runner_class.return_value = mock_hpo_runner_instance
run_hpo(mocker.MagicMock(), action_task_env, mocker.MagicMock(), mocker.MagicMock())
mock_hpo_runner_instance.run_hpo.assert_not_called()
@e2e_pytest_unit
def test_get_best_hpo_weight():
with TemporaryDirectory() as tmp_dir:
# prepare
hpo_dir = Path(tmp_dir) / "hpo"
weight_path = hpo_dir / "weight"
weight_path.mkdir(parents=True)
score = {"score": {str(i): i for i in range(1, 11)}}
bracket_0_dir = hpo_dir / "0"
bracket_0_dir.mkdir(parents=True)
for trial_num in range(2):
with (bracket_0_dir / f"{trial_num}.json").open("w") as f:
json.dump(score, f)
trial_weight_path = weight_path / str(trial_num)
trial_weight_path.mkdir(parents=True)
for i in range(1, 11):
(trial_weight_path / f"epoch_{i}.pth").write_text("fake")
assert get_best_hpo_weight(hpo_dir, "1") == str(weight_path / "1" / "epoch_10.pth")
@e2e_pytest_unit
def test_get_best_hpo_weight_not_exist():
with TemporaryDirectory() as tmp_dir:
# prepare
hpo_dir = Path(tmp_dir) / "hpo"
weight_path = hpo_dir / "weight"
weight_path.mkdir(parents=True)
score = {"score": {str(i): i for i in range(1, 11)}}
bracket_0_dir = hpo_dir / "0"
bracket_0_dir.mkdir(parents=True)
for trial_num in range(1):
with (bracket_0_dir / f"{trial_num}.json").open("w") as f:
json.dump(score, f)
trial_weight_path = weight_path / str(trial_num)
trial_weight_path.mkdir(parents=True)
for i in range(1, 11):
(trial_weight_path / f"epoch_{i}.pth").write_text("fake")
assert get_best_hpo_weight(hpo_dir, "1") is None
@e2e_pytest_unit
def test_run_trial(mocker):
mock_run = mocker.patch.object(Trainer, "run")
run_trial(
mocker.MagicMock(),
mocker.MagicMock(),
mocker.MagicMock(),
mocker.MagicMock(),
mocker.MagicMock(),
mocker.MagicMock(),
mocker.MagicMock(),
mocker.MagicMock(),
)
mock_run.assert_called()
| [
"[email protected]"
] | |
c2d01b5d527ad76dc88ed38af49bfe7bbfe6dcf6 | 711756b796d68035dc6a39060515200d1d37a274 | /output_cog/optimized_8838.py | 9260c4cc09a99e6f082dc00d995fd8e8dba08d41 | [] | no_license | batxes/exocyst_scripts | 8b109c279c93dd68c1d55ed64ad3cca93e3c95ca | a6c487d5053b9b67db22c59865e4ef2417e53030 | refs/heads/master | 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,842 | py | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((493.911, 488.674, 431.968), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((475.508, 455.302, 376.997), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((450.332, 428.101, 303.269), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((511.721, 553.784, 312.087), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((370.467, 331.485, 153.352), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((470.44, 472.737, 395.274), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((470.035, 473.456, 396.133), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((455.899, 497.328, 393.871), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((440.406, 520.657, 391.983), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((416.798, 530.615, 380.803), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((390.731, 526.037, 389.988), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((397.479, 526.84, 417.217), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((486.38, 466.908, 418.183), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((305.393, 579.582, 418.162), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((280.558, 440.831, 273.306), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((280.558, 440.831, 273.306), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((291.702, 438.689, 299.228), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((316.181, 435.887, 313.52), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((343.628, 439.822, 320.769), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((364.005, 440.152, 340.631), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((389.861, 436.923, 353.027), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((416.211, 440.512, 364.536), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((214.743, 581.915, 287.281), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((628.359, 312.906, 444.516), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((426.157, 410.539, 339.96), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((426.157, 410.539, 339.96), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((426.739, 435.88, 326.043), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((439.363, 457.884, 311.913), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((464.298, 458.638, 296.806), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((527.358, 512.25, 390.319), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((411.601, 402.62, 198.132), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((490.734, 478.286, 371.063), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((490.987, 478.577, 370.955), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((504.276, 471.752, 395.152), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((492.628, 466.326, 420.445), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((467.831, 473.617, 432.133), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((443.323, 485.645, 424.976), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((419.083, 491.629, 437.962), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((392.367, 497.301, 431.441), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((425.108, 418.691, 419.148), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((361.001, 578.114, 440.176), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((458.594, 399.335, 393.917), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((464.32, 416.134, 374.893), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((478.785, 453.077, 333.308), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((493.557, 489.842, 291.542), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((560.619, 500.107, 336.636), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((464.39, 530.715, 199.96), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((537.143, 467.259, 408.03), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((532.028, 454.647, 383.324), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((519.593, 441.95, 361.16), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((509.019, 427.177, 338.575), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((497.054, 412.683, 316.536), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((484.007, 398.028, 294.833), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((483.226, 433.15, 365.78), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((483.688, 360.18, 220.403), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| [
"[email protected]"
] | |
1b392e96dea3bc4c7baebfd90006596440762416 | 30ba09b0a746b1cef4c817fd87f1f310904a6d88 | /test/test_dialogs/test_title_screen.py | 770f143c244c1bfcf20b45cf482ac9f12c0d5667 | [
"MIT",
"Artistic-1.0-Perl"
] | permissive | krother/tilegamelib | 69e610959294ed950f49fefd8e7d50ceb1ba09ed | a8165f27b166acca37f81e8432a70e0b2e028338 | refs/heads/master | 2022-07-21T20:07:35.031330 | 2022-07-10T20:18:19 | 2022-07-10T20:18:19 | 1,045,043 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,172 | py |
from unittest import TestCase, main
from conftest import MENU_RECT, TITLE_IMAGE, TITLE_RECT
from tilegamelib.dialogs.title_screen import TitleScreen
from tilegamelib.events import QUIT_EVENT, EventGenerator
from tilegamelib.menu import VERTICAL_MOVES
class TitleScreenTests(TestCase):
def setUp(self):
self.events = EventGenerator()
self.result = ''
def one(self):
self.result += '1'
def two(self):
self.result += '2'
def three(self):
self.result += '3'
def test_show_title(self, screen):
"""Displays a main menu."""
menu = [
('One', self.one),
('Two', self.two),
('Three', self.three),
]
title = TitleScreen(screen, self.events,
TITLE_RECT, TITLE_IMAGE, menu, MENU_RECT, VERTICAL_MOVES)
self.events.add_scripted_keys([K_UP, K_RETURN, K_DOWN, K_DOWN,
K_SPACE, K_RETURN, K_UP, K_RETURN, K_UP], converter=int)
self.events.add_scripted_event(QUIT_EVENT)
title.run()
self.assertEqual(self.result, '3')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
3742ff611dc02b777b4f83d39a8aade1e7dc7cfc | a2cd609a52eb5be16a248c054fb014394f12d344 | /build/globalplanner/catkin_generated/pkg.installspace.context.pc.py | a04cf5b45702ff3cf2cd28d70712504d7506b0de | [] | no_license | rfzeg/simon_thesis_ws | c5e6d6b20ee63010ffede91d17ba144527e5f6c5 | dc79635f628dade14cab1a631cc4eb24aee1762c | refs/heads/master | 2021-09-16T12:43:41.270235 | 2018-06-20T12:40:57 | 2018-06-20T12:40:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 550 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/simoneforno/simon_ws/install/include".split(';') if "/home/simoneforno/simon_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;pluginlib;costmap_2d;base_local_planner;nav_core".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lglobalplanner".split(';') if "-lglobalplanner" != "" else []
PROJECT_NAME = "globalplanner"
PROJECT_SPACE_DIR = "/home/simoneforno/simon_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
] | |
6ba857f45be0a377186287819331c0f00d600dc2 | 411eff94020c192d5e5f657fa6012232ab1d051c | /game/src/coginvasion/attack/LinearProjectile.py | f5e3003258b6aada6e2dd3d56c10f9ceea59b384 | [] | no_license | xMakerx/cio-src | 48c9efe7f9a1bbf619a4c95a4198aaace78b8491 | 60b2bdf2c4a24d506101fdab1f51752d0d1861f8 | refs/heads/master | 2023-02-14T03:12:51.042106 | 2021-01-15T14:02:10 | 2021-01-15T14:02:10 | 328,268,776 | 1 | 0 | null | 2021-01-15T15:15:35 | 2021-01-09T23:51:37 | Python | UTF-8 | Python | false | false | 484 | py | from src.coginvasion.attack.BaseProjectile import BaseProjectile
from src.coginvasion.attack.LinearProjectileShared import LinearProjectileShared
class LinearProjectile(BaseProjectile, LinearProjectileShared):
def __init__(self, cr):
BaseProjectile.__init__(self, cr)
LinearProjectileShared.__init__(self)
def onSpawn(self):
self.playLinear()
def disable(self):
LinearProjectileShared.cleanup(self)
BaseProjectile.disable(self)
| [
"[email protected]"
] | |
575dcad18b12804754dbe2c263b5eda9c4da5b29 | 2a4290c36832e7080faa4104d58083c29ed1ea09 | /experiments/model_cat_wgan_old.py | ecc8c9353358f06089f1942e0c62a1566ac4eba8 | [] | no_license | jerryli27/my_dtn | 0380e22fb7892f5d46084339a5edb24c8ed5b8c8 | 54b16f403a480c35d5ae331dbbfd0efed53880b9 | refs/heads/master | 2021-01-19T17:17:19.787195 | 2017-03-05T21:48:06 | 2017-03-05T21:48:06 | 82,433,878 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,037 | py | import tensorflow as tf
import tensorflow.contrib.slim as slim
class DTN(object):
"""Domain Transfer Network
"""
def __init__(self, mode='train', learning_rate=0.0003, num_classes = 10, hw = 32, alpha=15, beta=15):
self.mode = mode
self.learning_rate = learning_rate
self.num_classes = num_classes
self.hw = hw
self.alpha = alpha
self.beta=beta
def content_extractor(self, images, reuse=False):
# images: (batch, 32, 32, 3) or (batch, 32, 32, 1)
if images.get_shape()[3] == 1:
# For mnist dataset, replicate the gray scale image 3 times.
images = tf.image.grayscale_to_rgb(images)
with tf.variable_scope('content_extractor', reuse=reuse):
with slim.arg_scope([slim.conv2d], padding='SAME', activation_fn=None, weights_initializer=tf.contrib.layers.xavier_initializer()):
with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True,
activation_fn=tf.nn.relu, is_training=(self.mode=='train' or self.mode=='pretrain')):
net = slim.conv2d(images, 64, [3, 3], stride=1, scope='conv1_1') # (batch_size, 32, 32, 64)
net = slim.batch_norm(net, scope='bn1_1')
net = slim.conv2d(net, 64, [3, 3], stride=2, scope='conv1_2') # (batch_size, 16, 16, 64)
net = slim.batch_norm(net, scope='bn1_2')
net = slim.conv2d(net, 128, [3, 3], stride=1, scope='conv2_1') # (batch_size, 16, 16, 128)
net = slim.batch_norm(net, scope='bn2_1')
net = slim.conv2d(net, 128, [3, 3], stride=2, scope='conv2_2') # (batch_size, 8, 8, 128)
net = slim.batch_norm(net, scope='bn2_2')
net = slim.conv2d(net, 256, [3, 3], stride=1, scope='conv3_1') # (batch_size, 8, 8, 256)
net = slim.batch_norm(net, scope='bn3_1')
net = slim.conv2d(net, 256, [3, 3], stride=2, scope='conv3_2') # (batch_size, 4, 4, 256)
net = slim.batch_norm(net, scope='bn3_2')
net = slim.conv2d(net, 512, [3, 3], stride=1, scope='conv4_1') # (batch_size, 4, 4, 512)
net = slim.batch_norm(net, scope='bn4_1')
net = slim.conv2d(net, 512, [4, 4], stride=2, padding='VALID', scope='conv4_2') # (batch_size, 1, 1, 512)
net = slim.batch_norm(net, activation_fn=tf.nn.tanh, scope='bn4_2')
if self.mode == 'pretrain':
net = slim.conv2d(net, self.num_classes, [1, 1], padding='VALID', scope='out')
net = slim.flatten(net)
return net
def generator(self, inputs, reuse=False):
# inputs: (batch, 1, 1, 128)
with tf.variable_scope('generator', reuse=reuse):
with slim.arg_scope([slim.conv2d_transpose], padding='SAME', activation_fn=None,
stride=2, weights_initializer=tf.contrib.layers.xavier_initializer()):
with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True,
activation_fn=tf.nn.relu, is_training=(self.mode=='train')):
with slim.arg_scope([slim.conv2d], padding='SAME', activation_fn=None,
stride=1, weights_initializer=tf.contrib.layers.xavier_initializer()):
net = slim.conv2d_transpose(inputs, 512, [4, 4], padding='VALID',
scope='conv_transpose1_1') # (batch_size, 4, 4, 512)
net = slim.batch_norm(net, scope='bn1_1')
net = slim.conv2d(net, 512, [3, 3], scope='conv_transpose1_2') # (batch_size, 4, 4, 512)
net = slim.batch_norm(net, scope='bn1_2')
net = slim.conv2d_transpose(net, 256, [3, 3], scope='conv_transpose2_1') # (batch_size, 8, 8, 256)
net = slim.batch_norm(net, scope='bn2')
net = slim.conv2d(net, 256, [3, 3], scope='conv_transpose2_2') # (batch_size, 4, 4, 512)
net = slim.batch_norm(net, scope='bn2_2')
net = slim.conv2d_transpose(net, 128, [3, 3], scope='conv_transpose3_1') # (batch_size, 16, 16, 128)
net = slim.batch_norm(net, scope='bn3')
net = slim.conv2d(net, 128, [3, 3], scope='conv_transpose3_2') # (batch_size, 4, 4, 512)
net = slim.batch_norm(net, scope='bn3_2')
net = slim.conv2d_transpose(net, 3, [3, 3], activation_fn=tf.nn.tanh, scope='conv_transpose4') # (batch_size, 32, 32, 3)
return net
def discriminator(self, images, reuse=False):
# images: (batch, 32, 32, 3)
with tf.variable_scope('discriminator', reuse=reuse):
with slim.arg_scope([slim.conv2d], padding='SAME', activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer()):
with slim.arg_scope([slim.batch_norm], decay=0.95, center=True, scale=True,
activation_fn=tf.nn.relu, is_training=(self.mode=='train')):
net = slim.conv2d(images, 128, [3, 3], stride=1, activation_fn=tf.nn.relu,
scope='conv1_1') # (batch_size, 32, 32, 128)
net = slim.batch_norm(net, scope='bn1_1')
net = slim.conv2d(net, 128, [3, 3], stride=2, scope='conv1_2') # (batch_size, 16, 16, 128)
net = slim.batch_norm(net, scope='bn1_2')
net = slim.conv2d(net, 256, [3, 3], stride=1, scope='conv2_1') # (batch_size, 16, 16, 256)
net = slim.batch_norm(net, scope='bn2_1')
net = slim.conv2d(net, 256, [3, 3], stride=2, scope='conv2_2') # (batch_size, 8, 8, 256)
net = slim.batch_norm(net, scope='bn2_2')
net = slim.conv2d(net, 512, [3, 3], stride=1, scope='conv3_1') # (batch_size, 8, 8, 512)
net = slim.batch_norm(net, scope='bn3_1')
net = slim.conv2d(net, 512, [3, 3], stride=2, scope='conv3_2') # (batch_size, 4, 4, 512)
net = slim.batch_norm(net, scope='bn3_2')
net = slim.conv2d(net, 1, [4, 4], padding='VALID', scope='conv4') # (batch_size, 1, 1, 1)
net = slim.flatten(net)
return net
def build_model(self):
if self.mode == 'pretrain':
self.images = tf.placeholder(tf.float32, [None, self.hw , self.hw , 3], 'svhn_images')
self.labels = tf.placeholder(tf.int64, [None], 'svhn_labels')
# logits and accuracy
self.logits = self.content_extractor(self.images)
self.pred = tf.argmax(self.logits, 1)
self.correct_pred = tf.equal(self.pred, self.labels)
self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32))
# loss and train op
self.loss = slim.losses.sparse_softmax_cross_entropy(self.logits, self.labels)
self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.train_op = slim.learning.create_train_op(self.loss, self.optimizer)
# summary op
loss_summary = tf.summary.scalar('classification_loss', self.loss)
accuracy_summary = tf.summary.scalar('accuracy', self.accuracy)
self.summary_op = tf.summary.merge([loss_summary, accuracy_summary])
elif self.mode == 'eval':
self.images = tf.placeholder(tf.float32, [None, self.hw , self.hw , 3], 'svhn_images')
# source domain (svhn to mnist)
self.fx = self.content_extractor(self.images)
self.sampled_images = self.generator(self.fx)
elif self.mode == 'train':
self.src_images = tf.placeholder(tf.float32, [None, self.hw , self.hw , 3], 'svhn_images')
self.trg_images = tf.placeholder(tf.float32, [None, self.hw , self.hw , 3], 'mnist_images')
# source domain (svhn to mnist)
self.fx = self.content_extractor(self.src_images)
self.fake_images = self.generator(self.fx)
self.logits = self.discriminator(self.fake_images)
self.fgfx = self.content_extractor(self.fake_images, reuse=True)
# loss
# self.d_loss_src = slim.losses.sigmoid_cross_entropy(self.logits, tf.zeros_like(self.logits))
# self.g_loss_src = slim.losses.sigmoid_cross_entropy(self.logits, tf.ones_like(self.logits))
self.d_loss_src = tf.reduce_mean(self.logits)
self.g_loss_src = - tf.reduce_mean(self.logits)
self.f_loss_src = tf.reduce_mean(tf.square(self.fx - self.fgfx)) * self.alpha
# optimizer
self.d_optimizer_src = tf.train.AdamOptimizer(self.learning_rate)
self.g_optimizer_src = tf.train.AdamOptimizer(self.learning_rate)
self.f_optimizer_src = tf.train.AdamOptimizer(self.learning_rate)
t_vars = tf.trainable_variables()
d_vars = [var for var in t_vars if 'discriminator' in var.name]
g_vars = [var for var in t_vars if 'generator' in var.name]
f_vars = [var for var in t_vars if 'content_extractor' in var.name]
# TODO: add weight clipping
self.d_clip_ops = [var.assign(tf.clip_by_value(var, -0.01, 0.01)) for var in d_vars]
# train op
with tf.name_scope('source_train_op'):
self.d_train_op_src = slim.learning.create_train_op(self.d_loss_src, self.d_optimizer_src, variables_to_train=d_vars)
self.g_train_op_src = slim.learning.create_train_op(self.g_loss_src, self.g_optimizer_src, variables_to_train=g_vars)
self.f_train_op_src = slim.learning.create_train_op(self.f_loss_src, self.f_optimizer_src, variables_to_train=f_vars)
# summary op
d_loss_src_summary = tf.summary.scalar('src_d_loss', self.d_loss_src)
g_loss_src_summary = tf.summary.scalar('src_g_loss', self.g_loss_src)
f_loss_src_summary = tf.summary.scalar('src_f_loss', self.f_loss_src)
origin_images_summary = tf.summary.image('src_origin_images', self.src_images)
sampled_images_summary = tf.summary.image('src_sampled_images', self.fake_images)
self.summary_op_src = tf.summary.merge([d_loss_src_summary, g_loss_src_summary,
f_loss_src_summary, origin_images_summary,
sampled_images_summary])
# target domain (mnist)
self.fx = self.content_extractor(self.trg_images, reuse=True)
self.reconst_images = self.generator(self.fx, reuse=True)
self.logits_fake = self.discriminator(self.reconst_images, reuse=True)
self.logits_real = self.discriminator(self.trg_images, reuse=True)
# loss
# self.d_loss_fake_trg = slim.losses.sigmoid_cross_entropy(self.logits_fake, tf.zeros_like(self.logits_fake))
# self.d_loss_real_trg = slim.losses.sigmoid_cross_entropy(self.logits_real, tf.ones_like(self.logits_real))
self.d_loss_fake_trg = tf.reduce_mean(self.logits_fake)
self.d_loss_real_trg = - tf.reduce_mean(self.logits_real)
self.d_loss_trg = self.d_loss_fake_trg + self.d_loss_real_trg
self.g_loss_fake_trg = - tf.reduce_mean(self.logits_fake)
self.g_loss_const_trg = tf.reduce_mean(tf.square(self.trg_images - self.reconst_images)) * self.beta
self.g_loss_trg = self.g_loss_fake_trg + self.g_loss_const_trg
# optimizer
self.d_optimizer_trg = tf.train.AdamOptimizer(self.learning_rate)
self.g_optimizer_trg = tf.train.AdamOptimizer(self.learning_rate)
# train op
# TODO: add weight clipping
with tf.name_scope('target_train_op'):
self.d_train_op_trg = slim.learning.create_train_op(self.d_loss_trg, self.d_optimizer_trg, variables_to_train=d_vars)
self.g_train_op_trg = slim.learning.create_train_op(self.g_loss_trg, self.g_optimizer_trg, variables_to_train=g_vars)
# summary op
d_loss_fake_trg_summary = tf.summary.scalar('trg_d_loss_fake', self.d_loss_fake_trg)
d_loss_real_trg_summary = tf.summary.scalar('trg_d_loss_real', self.d_loss_real_trg)
d_loss_trg_summary = tf.summary.scalar('trg_d_loss', self.d_loss_trg)
g_loss_fake_trg_summary = tf.summary.scalar('trg_g_loss_fake', self.g_loss_fake_trg)
g_loss_const_trg_summary = tf.summary.scalar('trg_g_loss_const', self.g_loss_const_trg)
g_loss_trg_summary = tf.summary.scalar('trg_g_loss', self.g_loss_trg)
origin_images_summary = tf.summary.image('trg_origin_images', self.trg_images)
sampled_images_summary = tf.summary.image('trg_reconstructed_images', self.reconst_images)
self.summary_op_trg = tf.summary.merge([d_loss_trg_summary, g_loss_trg_summary,
d_loss_fake_trg_summary, d_loss_real_trg_summary,
g_loss_fake_trg_summary, g_loss_const_trg_summary,
origin_images_summary, sampled_images_summary])
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
| [
"[email protected]"
] | |
a6cac7d77145dd89deb708f5be1407ffe096d320 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_suffocated.py | 8b356fe7d1d8ac09890ee20e1e5dc5a7719a80ba | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py |
from xai.brain.wordbase.verbs._suffocate import _SUFFOCATE
#calss header
class _SUFFOCATED(_SUFFOCATE, ):
def __init__(self,):
_SUFFOCATE.__init__(self)
self.name = "SUFFOCATED"
self.specie = 'verbs'
self.basic = "suffocate"
self.jsondata = {}
| [
"[email protected]"
] | |
280b60d0e3b097e45c49ec6db0db8f323a43d3c5 | 9568dee77459304ad0f7e01c9dea9432c11377d0 | /maxlike_sim_filehandler.py | c91d4799c888691d93117cefde6a1784acc6f66d | [
"MIT"
] | permissive | lbaumo/wtgpipeline | c101c7e7ec1491a1c40cbe14102662770641bb9a | 73de01736e33769c09c4467e3c040545d7070407 | refs/heads/master | 2021-06-20T14:40:38.263891 | 2017-08-14T21:08:24 | 2017-08-14T21:08:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,688 | py | #############################
# Handles loading files for a simulation run
#############################
import ldac, cPickle, numpy as np
import astropy.io.fits as pyfits
import pdzfile_utils, nfwutils, varcontainer
#############################
__cvs_id__ = "$Id$"
#############################
class SimFilehandler(object):
###############
def addCLOps(self, parser):
parser.add_option('-i', '--inputcat', dest='inputCatFile',
help='Simulation format cat containing shape info')
parser.add_option('-b', '--bpz', dest='inputBPZ',
help='BPZ file objects were drawn from')
parser.add_option('-p', '--pdzfile', dest='inputPDZ',
help='Simulation PDZ file')
#############################
def createOptions(self, inputCatFile, inputBPZ, inputPDZ, options = None, args = None):
if options is None:
options = varcontainer.VarContainer()
options.inputCatFile = inputCatFile
options.inputBPZ = inputBPZ
options.inputPDZ = inputPDZ
return options, args
#############################
def readData(self, manager):
options = manager.options
manager.open('inputcat', options.inputCatFile, ldac.openObjectFile)
manager.concentration = manager.inputcat.hdu.header['CONCEN']
manager.zcluster = manager.inputcat.hdu.header['Z']
manager.store('r500', nfwutils.rdelta, manager.inputcat.hdu.header['R_S'],
manager.concentration, 500)
bpz = ldac.openObjectFile(options.inputBPZ, 'STDTAB')
if bpz is None:
bpz = ldac.openObjectFile(options.inputBPZ, 'COS30PHOTZ')
manager.matchedBPZ = bpz.matchById(manager.inputcat, 'z_id')
bpz = manager.matchedBPZ
newcols = [pyfits.Column(name = 'z_b', format = 'E', array = bpz['BPZ_Z_B']),
pyfits.Column(name='odds', format = 'E', array = bpz['BPZ_ODDS']),
pyfits.Column(name='z_t', format = 'E', array = bpz['BPZ_T_B'])]
inputcat = ldac.LDACCat(pyfits.new_table(pyfits.ColDefs(newcols) + manager.inputcat.hdu.columns))
manager.replace('inputcat', inputcat)
manager.open('pdzmanager', options.inputPDZ, pdzfile_utils.PDZManager.open)
pdzrange, associatedPDZs = manager.pdzmanager.associatePDZ(manager.inputcat['z_id'])
pdzrange = pdzrange.astype(np.float64)
associatedPDZs = associatedPDZs.astype(np.float64)
manager.pdzrange = pdzrange
manager.pdz = associatedPDZs
manager.replace('pdzmanager', None)
| [
"[email protected]"
] | |
adeb59d010bdfae9169f837cf998303af2c9df2e | 463c053bcf3f4a7337b634890720ea9467f14c87 | /rllib/models/jax/misc.py | 02ebd98fd261b684769013ef6b2f7fd3ba31bc62 | [
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] | permissive | pdames/ray | e8faddc4440976211a6bcead8f8b6e62c1dcda01 | 918d3601c6519d333f10910dc75eb549cbb82afa | refs/heads/master | 2023-01-23T06:11:11.723212 | 2022-05-06T22:55:59 | 2022-05-06T22:55:59 | 245,515,407 | 1 | 1 | Apache-2.0 | 2023-01-14T08:02:21 | 2020-03-06T20:59:04 | Python | UTF-8 | Python | false | false | 2,287 | py | import time
from typing import Callable, Optional
from ray.rllib.utils.framework import get_activation_fn, try_import_jax
jax, flax = try_import_jax()
nn = np = None
if flax:
import flax.linen as nn
import jax.numpy as np
class SlimFC:
"""Simple JAX version of a fully connected layer."""
def __init__(
self,
in_size,
out_size,
initializer: Optional[Callable] = None,
activation_fn: Optional[str] = None,
use_bias: bool = True,
prng_key: Optional[jax.random.PRNGKey] = None,
name: Optional[str] = None,
):
"""Initializes a SlimFC instance.
Args:
in_size (int): The input size of the input data that will be passed
into this layer.
out_size (int): The number of nodes in this FC layer.
initializer (flax.:
activation_fn (str): An activation string specifier, e.g. "relu".
use_bias (bool): Whether to add biases to the dot product or not.
#bias_init (float):
prng_key (Optional[jax.random.PRNGKey]): An optional PRNG key to
use for initialization. If None, create a new random one.
name (Optional[str]): An optional name for this layer.
"""
# By default, use Glorot uniform initializer.
if initializer is None:
initializer = nn.initializers.xavier_uniform()
self.prng_key = prng_key or jax.random.PRNGKey(int(time.time()))
_, self.prng_key = jax.random.split(self.prng_key)
# Create the flax dense layer.
self._dense = nn.Dense(
out_size,
use_bias=use_bias,
kernel_init=initializer,
name=name,
)
# Initialize it.
dummy_in = jax.random.normal(self.prng_key, (in_size,), dtype=np.float32)
_, self.prng_key = jax.random.split(self.prng_key)
self._params = self._dense.init(self.prng_key, dummy_in)
# Activation function (if any; default=None (linear)).
self.activation_fn = get_activation_fn(activation_fn, "jax")
def __call__(self, x):
out = self._dense.apply(self._params, x)
if self.activation_fn:
out = self.activation_fn(out)
return out
| [
"[email protected]"
] | |
08b869e8d8559db4d477d360515bc7c627507ca5 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03129/s302066927.py | 0887597072b00c7f90e5a7d87138c543cb18fc6f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | import sys
N, K = map(int, input().split())
ans = 0
for i in range(1,N+1,2):
ans = ans + 1
if ans >= K:
print("YES")
sys.exit()
print("NO")
| [
"[email protected]"
] | |
77ec1a307afebd5162f2ea1d4eaaece759d3fd15 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5753053697277952_0/Python/BIPUL/a.py | 3b8209e6b92d12243001a0707948776e81fe2d81 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,955 | py | ############### Author: Bipul Ranjan @ranjanbipul ###############
import sys
import time
import os
import math
import operator
import random
from functools import lru_cache
from decimal import Decimal as D
from fractions import Fraction as F
#sys.setrecursionlimit(10000)
#@lru_cache(maxsize=None)
MOD = 1000000007
################################################################
QNO = 'a' #SET QUESTION NUMBER
FIN,FOUT = QNO+'.in.txt',QNO+'.out.txt'
FIN = QNO.capitalize()+'-small-attempt0.in'
#FIN = QNO+'.sample.txt'
#FIN = QNO.capitalize()+'-large.in'
fin = open(FIN)
fout = open(FOUT,'w')
sys.stdin = fin
######################## PROGRAM START ##########################
def solve(a,n):
return len(a)
for nu in range(int(input())):
n = int(input())
a = [int(i) for i in input().strip().split(" ")]
t = 0
for i in a: t+=i
print("Case #{0}:".format(nu+1),file=fout,end=" ")
while t>0:
#print(t)
s = []
if t==2:
for i in range(n):
if a[i]==1:
s.append(i)
t-=1
a[i]-=1
if len(s)==2: break
elif t==3:
for i in range(n):
if a[i]==1:
s.append(i)
t-=1
a[i]-=1
break
else:
m = 0
for i in range(1,n):
if a[i]>a[m]: m =i
s.append(m)
t-=1
a[m]-=1
m = 0
for i in range(1,n):
if a[i]>a[m]: m =i
s.append(m)
t-=1
a[m]-=1
s = [chr(i+65) for i in s]
print("{0}".format("".join(s)),file=fout,end=" ")
print("",file=fout)
######################## PROGRAM END #############################
fin.close()
fout.close()
print("Program complete")
| [
"[email protected]"
] | |
ac6d4db8c9c186c23b51f9690ba1d838c76936cc | b9662213e143acea87e9af6f41143c50f23bec95 | /main.py | 3bcc258a67bac8bb863e85ccfce47cd210cce045 | [] | no_license | elprofedotti/python-mvc | 3442680dc20d7cb0ec9c3bf09442daecda4c8e4e | 9beee06de2e7a04ad872e037157a08dd2bb3dcc6 | refs/heads/main | 2023-08-04T08:17:03.886448 | 2021-04-20T12:57:17 | 2021-04-20T12:58:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | """Entry point."""
from app.app import Application
from app.views import MainPage
from app.models import Book
if __name__ == "__main__":
app = Application(MainPage(Book.list()))
app.run()
| [
"[email protected]"
] | |
235c672831f5d93373e32f38e38f5655cf5ab225 | fd48fba90bb227017ac2da9786d59f9b9130aaf0 | /digsby/src/contacts/BuddyListElement.py | f33b3380035eee14ea46a75e7f98d16ccdad3ccf | [
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | niterain/digsby | bb05b959c66b957237be68cd8576e3a7c0f7c693 | 16a62c7df1018a49eaa8151c0f8b881c7e252949 | refs/heads/master | 2021-01-18T10:07:10.244382 | 2013-11-03T02:48:25 | 2013-11-03T02:48:25 | 5,991,568 | 1 | 0 | null | 2013-11-03T02:48:26 | 2012-09-28T02:24:50 | Python | UTF-8 | Python | false | false | 630 | py | from util.primitives.funcs import isiterable
from common.actions import ActionMeta
class BuddyListElement(object):
__metaclass__ = ActionMeta
@property
def num_online(self):
from Contact import Contact
if isiterable(self) and not isinstance(self, Contact):
return sum(elt.num_online for elt in self)
else:
return int(self.online)
def find(self, obj):
assert isinstance(self, list)
return list.find(self, obj)
def chat(self):
import gui.imwin, wx
wx.CallAfter(lambda: gui.imwin.begin_conversation(self))
| [
"[email protected]"
] | |
0a67f283ef42dca44daf76bbc4a66abe4d8c48dd | 951a84f6fafa763ba74dc0ad6847aaf90f76023c | /Solu86.py | e7e90402710c612a3e2d5f06db28be58e7c62afc | [] | no_license | SakuraGo/leetcodepython3 | 37258531f1994336151f8b5c8aec5139f1ba79f8 | 8cedddb997f4fb6048b53384ac014d933b6967ac | refs/heads/master | 2020-09-27T15:55:28.353433 | 2020-02-15T12:00:02 | 2020-02-15T12:00:02 | 226,550,406 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,225 | py | # # 86. 分隔链表
# 给定一个链表和一个特定值 x,对链表进行分隔,使得所有小于 x 的节点都在大于或等于 x 的节点之前。
#
# 你应当保留两个分区中每个节点的初始相对位置。
#
# 输入: head = 1->4->3->2->5->2, x = 3
# 输出: 1->2->2->4->3->5
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def partition(self, head: ListNode, x: int) -> ListNode:
dummy = ListNode(-1)
pre = dummy
dummy.next = head
if head is None:
return head
while pre.next is not None and pre.next.val < x:
print(pre.val)
pre = pre.next
if pre.next is None:
return head
flag = pre.next
pre11 = flag
cur = flag.next
while cur is not None:
print(cur.val)
if cur.val >= x:
pre11 = cur
cur = cur.next
else:
pre.next = cur
pre11.next = cur.next
cur.next = flag
cur = pre11.next
pre = pre.next
return dummy.next
| [
"[email protected]"
] | |
782ce93a0a256fec41cbd6777146f5c3cd2c5695 | b3586235dc1e1acbd49fab996f581269a808480b | /sistema/planeamento/migrations/0013_ordemproducao_num_paletes_stock_in.py | 1fcbb090362b7154911b2ca6ccc20eb7e102cd64 | [] | no_license | gonfersilva/Sistema | 37ad1cd03dfbb7889fa0b0367c6ebd9044712ae3 | 4c6d9ade22040972efbe892eae0130939d7b5c46 | refs/heads/master | 2021-10-23T23:21:51.262723 | 2021-10-13T19:45:49 | 2021-10-13T19:45:49 | 155,545,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | # Generated by Django 2.2.7 on 2020-08-06 14:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('planeamento', '0012_auto_20200805_1401'),
]
operations = [
migrations.AddField(
model_name='ordemproducao',
name='num_paletes_stock_in',
field=models.IntegerField(default=0, verbose_name='Nº de paletes de stock inseridas'),
),
]
| [
"[email protected]"
] | |
095c6844b796a1aa2773a69bc20447cb7d6c0cd5 | efe67da7ca1092e033fba7b0a251a43d9a165022 | /get_city_from_id/views.py | 5d69da18fd281507a348b06021c952f6c6f4bd2e | [] | no_license | poojapauskar/savmytime-api | 0a019077720ab9ec9bd113379e77f0f4c83ef887 | 2bb48d60fce24888c2d4e4ba3b1da8947242a1fd | refs/heads/master | 2021-01-21T13:29:41.086409 | 2016-05-09T06:38:54 | 2016-05-09T06:38:54 | 54,106,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,573 | py | from services.models import Services
from category.models import Category
from sub_category.models import Sub_category
from cities.models import Cities
from get_details.serializers import Get_detailsSerializer
from rest_framework import generics
# from ticket.permissions import IsOwnerOrReadOnly
# from rest_framework import permissions
from django.shortcuts import get_object_or_404
from django.db.models import Count
from django.http import JsonResponse
# class Get_listList(generics.ListCreateAPIView):
# queryset = Ticket.objects.all()
# serializer_class = Get_listSerializer
# permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class StatusCode(object):
OK = 200
NOT_FOUND = 404
# add more status code according to your need
import json
from django.http import HttpResponse
def JSONResponse(data = None, status = StatusCode.OK):
if data is None:
return HttpResponse(status)
if data and type(data) is dict:
return HttpResponse(json.dumps(data, indent = 4, encoding = 'utf-8', sort_keys = True), \
mimetype = 'application/json', status = status)
else:
return HttpResponse(status = StatusCode.NOT_FOUND)
from django.views import generic
from django.views.generic import ListView
class CustomListView(ListView):
#paginate_by = 2
def get(self, request, *args, **kwargs):
import sys
# print >> sys.stderr, service_id
id1=self.kwargs['id']
objects=list(Cities.objects.filter(id=id1).values('city'))
return JsonResponse(objects,safe=False)
| [
"[email protected]"
] | |
1b6e511387f944e0ce53157aa60363c6551647e9 | 2a4290c36832e7080faa4104d58083c29ed1ea09 | /prepro.py | 244c70dc13f9e7ef94ec033204e0662e264afae7 | [] | no_license | jerryli27/my_dtn | 0380e22fb7892f5d46084339a5edb24c8ed5b8c8 | 54b16f403a480c35d5ae331dbbfd0efed53880b9 | refs/heads/master | 2021-01-19T17:17:19.787195 | 2017-03-05T21:48:06 | 2017-03-05T21:48:06 | 82,433,878 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,292 | py | """
The cat face data comes from https://sites.google.com/site/catdatacollection/data
"""
import numpy as np
import pickle
from PIL import Image
from tensorflow.examples.tutorials.mnist import input_data
def resize_images(image_arrays, size=[32, 32]):
# convert float type to integer
image_arrays = (image_arrays * 255).astype('uint8')
resized_image_arrays = np.zeros([image_arrays.shape[0]]+size)
for i, image_array in enumerate(image_arrays):
image = Image.fromarray(image_array)
resized_image = image.resize(size=size, resample=Image.ANTIALIAS)
resized_image_arrays[i] = np.asarray(resized_image)
return np.expand_dims(resized_image_arrays, 3)
def save_pickle(data, path):
with open(path, 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
print ('Saved %s..' %path)
def main():
mnist = input_data.read_data_sets(train_dir='mnist')
train = {'X': resize_images(mnist.train.images.reshape(-1, 28, 28)),
'y': mnist.train.labels}
test = {'X': resize_images(mnist.test.images.reshape(-1, 28, 28)),
'y': mnist.test.labels}
save_pickle(train, 'mnist/train.pkl')
save_pickle(test, 'mnist/test.pkl')
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
573a193c39d9439dca5d7bd30a379c92a02b9329 | 4a81e33fe6d214f2efaeb97b03b5b05fae12b0d8 | /demos/great-expectations/venv/lib/python3.8/site-packages/great_expectations/expectations/core/expect_column_value_lengths_to_equal.py | 082cd7affc8769938d3c0e6cee1b6a9c66d84c5e | [] | no_license | franciscojavierarceo/Python | 29aaea28642dde151255c5b4a813158e975a073d | 02715ca6f19fd3c76cefa12de92deeae4ddf9684 | refs/heads/main | 2023-08-27T14:23:04.376095 | 2023-08-27T10:30:37 | 2023-08-27T10:30:37 | 33,146,755 | 7 | 9 | null | 2023-02-16T06:40:35 | 2015-03-30T20:38:00 | Jupyter Notebook | UTF-8 | Python | false | false | 9,388 | py | from typing import Optional
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.expectations.expectation import (
ColumnMapExpectation,
InvalidExpectationConfigurationError,
)
from great_expectations.expectations.util import render_evaluation_parameter_string
from great_expectations.render.renderer.renderer import renderer
from great_expectations.render.types import RenderedStringTemplateContent
from great_expectations.render.util import (
num_to_str,
parse_row_condition_string_pandas_engine,
substitute_none_for_missing,
)
class ExpectColumnValueLengthsToEqual(ColumnMapExpectation):
"""Expect column entries to be strings with length equal to the provided value.
This expectation only works for string-type values. Invoking it on ints or floats will raise a TypeError.
expect_column_values_to_be_between is a \
:func:`column_map_expectation <great_expectations.execution_engine.execution_engine.MetaExecutionEngine
.column_map_expectation>`.
Args:
column (str): \
The column name.
value (int or None): \
The expected value for a column entry length.
Keyword Args:
mostly (None or a float between 0 and 1): \
Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_value_lengths_to_be_between \
<great_expectations.execution_engine.execution_engine.ExecutionEngine
.expect_column_value_lengths_to_be_between>`
"""
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"maturity": "production",
"tags": ["core expectation", "column map expectation"],
"contributors": ["@great_expectations"],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
map_metric = "column_values.value_length.equals"
success_keys = ("value", "mostly", "parse_strings_as_datetimes")
default_kwarg_values = {
"row_condition": None,
"condition_parser": None, # we expect this to be explicitly set whenever a row_condition is passed
"mostly": 1,
"parse_strings_as_datetimes": False,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": False,
}
args_keys = (
"column",
"value",
)
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> None:
super().validate_configuration(configuration)
if configuration is None:
configuration = self.configuration
try:
assert (
"value" in configuration.kwargs
), "The length parameter 'value' is required"
assert isinstance(
configuration.kwargs["value"], (float, int, dict)
), "given value must be numerical"
if isinstance(configuration.kwargs["value"], dict):
assert (
"$PARAMETER" in configuration.kwargs["value"]
), 'Evaluation Parameter dict for value kwarg must have "$PARAMETER" key.'
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
@classmethod
def _atomic_prescriptive_template(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "value", "mostly", "row_condition", "condition_parser"],
)
params_with_json_schema = {
"column": {"schema": {"type": "string"}, "value": params.get("column")},
"value": {
"schema": {"type": "number"},
"value": params.get("value"),
},
"mostly": {
"schema": {"type": "number"},
"value": params.get("mostly"),
},
"mostly_pct": {
"schema": {"type": "string"},
"value": params.get("mostly_pct"),
},
"row_condition": {
"schema": {"type": "string"},
"value": params.get("row_condition"),
},
"condition_parser": {
"schema": {"type": "string"},
"value": params.get("condition_parser"),
},
}
if params.get("value") is None:
template_str = "values may have any length."
else:
template_str = "values must be $value characters long"
if params["mostly"] is not None and params["mostly"] < 1.0:
params_with_json_schema["mostly_pct"]["value"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if include_column_name:
template_str = f"$column {template_str}"
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(
params["row_condition"], with_schema=True
)
template_str = f"{conditional_template_str}, then {template_str}"
params_with_json_schema.update(conditional_params)
return (template_str, params_with_json_schema, styling)
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "value", "mostly", "row_condition", "condition_parser"],
)
if params.get("value") is None:
template_str = "values may have any length."
else:
template_str = "values must be $value characters long"
if params["mostly"] is not None and params["mostly"] < 1.0:
params["mostly_pct"] = num_to_str(
params["mostly"] * 100, precision=15, no_scientific=True
)
# params["mostly_pct"] = "{:.14f}".format(params["mostly"]*100).rstrip("0").rstrip(".")
template_str += ", at least $mostly_pct % of the time."
else:
template_str += "."
if include_column_name:
template_str = f"$column {template_str}"
if params["row_condition"] is not None:
(
conditional_template_str,
conditional_params,
) = parse_row_condition_string_pandas_engine(params["row_condition"])
template_str = f"{conditional_template_str}, then {template_str}"
params.update(conditional_params)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
| [
"[email protected]"
] | |
d0ecd64d2fe16d6c50d3a51d49b2fca1bdfdb0cd | e70b6032024b290e2ba11fa5266ef912d9ac14a2 | /crits/comments/urls.py | bdb50ae5c327a6f98d12bcca1f9b041ce9ffffc0 | [
"MIT"
] | permissive | ckane/crits | 105b45c00b6ad85064b8e33ecd12e7474fc84fd6 | e2f2b71927d08e6432a0e540a0f4634da675ce53 | refs/heads/master | 2021-01-17T07:57:01.495393 | 2016-04-11T04:29:35 | 2016-04-11T04:29:35 | 20,622,493 | 2 | 0 | null | 2016-04-11T04:29:36 | 2014-06-08T17:45:07 | JavaScript | UTF-8 | Python | false | false | 558 | py | from django.conf.urls import patterns
urlpatterns = patterns('crits.comments.views',
(r'^remove/(?P<obj_id>\S+)/$', 'remove_comment'),
(r'^(?P<method>\S+)/(?P<obj_type>\S+)/(?P<obj_id>\S+)/$', 'add_update_comment'),
(r'^activity/$', 'activity'),
(r'^activity/(?P<atype>\S+)/(?P<value>\S+)/$', 'activity'),
(r'^activity/get_new_comments/$', 'get_new_comments'),
(r'^search/(?P<stype>[A-Za-z0-9\-\._]+)/(?P<sterm>.+?)/$', 'comment_search'),
(r'^list/$', 'comments_listing'),
(r'^list/(?P<option>\S+)/$', 'comments_listing'),
)
| [
"[email protected]"
] | |
03d3d6b6062452e771dd0f53277a1ff0b3f6c1e9 | 1df7ba55c4b61772c1a31c503e6b8881f1456dc5 | /untitled9/apps/courses/migrations/0010_auto_20170209_1147.py | dfa9d9ea221e6a9ea9b645c7a86fa3dfcace7f37 | [] | no_license | fzk466569/python-django-pro | 35918756060fcae375d3c99ea1a6934949b6d605 | 9add086b7a910f255df5b192268f1e117057e053 | refs/heads/master | 2021-01-19T13:18:14.141880 | 2017-02-19T12:16:29 | 2017-02-19T12:16:29 | 82,374,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 545 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-02-09 11:47
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0009_auto_20170209_1146'),
]
operations = [
migrations.AlterField(
model_name='course',
name='learn_about',
field=models.CharField(default='', max_length=200, verbose_name='\u901a\u8fc7\u672c\u8bfe\u7a0b\u540e\u80fd\u5b66\u5230\u7684'),
),
]
| [
"fzk466569"
] | fzk466569 |
637c9052ec6cfa09517634c6b68099e9a6470ff8 | 7a8fcae483d18e87481443f4476d56b5180459e6 | /statemachine/registry.py | c1632ebc937664e2f22b410a9f4334a197abc83f | [
"MIT"
] | permissive | Gariagi/python-statemachine | 63c67696f57eeda75054df399999bfe7fb21c783 | 7dddc714752ef56dd9e54fe246c0050f40c0ad2d | refs/heads/develop | 2020-03-09T01:04:15.124145 | 2018-04-29T17:53:36 | 2018-04-29T17:53:36 | 128,504,334 | 0 | 0 | MIT | 2018-04-29T17:53:37 | 2018-04-07T06:56:05 | Python | UTF-8 | Python | false | false | 1,206 | py | # coding: utf-8
_REGISTRY = {}
_initialized = False
def register(cls):
_REGISTRY[cls.__name__] = cls
return cls
def get_machine_cls(name):
init_registry()
return _REGISTRY[name]
def init_registry():
global _initialized
if not _initialized:
load_modules(['statemachine', 'statemachines'])
_initialized = True
def load_modules(modules=None):
try:
import django # noqa
except ImportError:
# Not a django project
return
try: # pragma: no cover
from django.utils.module_loading import autodiscover_modules
except ImportError: # pragma: no cover
# Django 1.6 compat to provide `autodiscover_modules`
def autodiscover_modules(module_name):
from django.conf import settings
from django.utils.importlib import import_module
for app in settings.INSTALLED_APPS:
# Attempt to import the app's `module_name`.
try:
import_module('{app}.{module}'.format(app=app, module=module_name))
except Exception:
pass
for module in modules:
autodiscover_modules(module)
| [
"[email protected]"
] | |
14f178cebd12871b2e29d5b54a2c6d71d47622a4 | 3a1bae5b4a49c8f3050f37c3875a5a1dfd6f8316 | /bookzen_flask/bookzen.py | 7c91da763601eb07dd5749b3d1277ba732dc04ae | [] | no_license | meomap/bookzen | 9e86f5d3e63bfbd1e87fae41465115d27ebdd321 | e764dea45c7a5174b728d225ef2aaef3ed09bc9b | refs/heads/master | 2021-01-19T13:25:23.704102 | 2017-02-17T01:52:26 | 2017-02-17T01:52:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,555 | py | # -*- coding: iso-8859-15 -*-
import json
from email.header import Header
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.utils import formataddr
import smtplib
from flask import Flask, render_template, redirect, url_for
from flask_mongoengine import MongoEngine
from flask_wtf import FlaskForm as Form
from wtforms import StringField, SubmitField, TextAreaField
from wtforms.validators import DataRequired, Email
app = Flask(__name__)
app.config.from_pyfile('settings.py')
db = MongoEngine(app)
class Books(db.Document):
name = db.StringField()
name_unidecode = db.StringField()
author = db.StringField()
description = db.StringField()
image_uri = db.StringField()
price = db.StringField()
url = db.StringField()
spider = db.StringField()
server = db.StringField()
project = db.StringField()
date = db.DateTimeField()
meta = {'indexes': [
{'fields': ['$name', "$name_unidecode"]}]}
class SearchForm(Form):
flash_msg = "Please search something so we can serve you"
search = StringField("Search book\'s title", validators=[DataRequired(flash_msg)])
submit = SubmitField()
class ContactForm(Form):
flash_msg = "Oops, look like you forget to fill this field."
name = StringField("Name", [DataRequired(flash_msg)])
email = StringField("Email", [Email(flash_msg)])
subject = StringField("Subject", [DataRequired(flash_msg)])
message = TextAreaField("Message", [DataRequired(flash_msg)])
submit = SubmitField()
def str_handler(string):
if isinstance(string, str):
return json.dumps(string)
elif isinstance(string, unicode):
return '''\"{0}\"'''.format(string.encode('utf-8'))
@app.route('/', methods=["GET", "POST"])
def index():
form = SearchForm()
if form.validate_on_submit():
keyword = form.search.data
return redirect(url_for('search', keyword=keyword))
else:
return render_template('index.html', form=form)
@app.route('/search/<keyword>')
def search(keyword):
form = SearchForm()
if form.validate_on_submit():
keyword = form.search.data
return redirect(url_for('search', keyword=keyword))
query = Books.objects.search_text(str_handler(keyword))
books = [dict(json.loads(i.to_json())) for i in query.order_by('+price')]
if books:
return render_template('results.html', form=form, books=books)
else:
return render_template('not_found.html', form=form)
@app.route('/contact/', methods=["GET", "POST"])
def contact():
form = ContactForm()
if form.validate_on_submit():
msg = MIMEMultipart()
fromaddr = form.email.data
toaddr = app.config["MY_EMAIL_ADDRESS"]
msg['subject'] = form.subject.data
msg['from'] = formataddr((str(Header(form.name.data, 'utf-8')), fromaddr))
msg['to'] = toaddr
msg['reply-to'] = fromaddr
body = form.message.data
msg.attach(MIMEText(body, 'plain', 'utf-8'))
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(app.config['EMAIL_ACCOUNT'], app.config["EMAIL_PASSWORD"])
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
return render_template('thanks.html')
else:
return render_template('contact.html', form=form)
@app.route('/about/')
def about():
return render_template('about.html')
if __name__ == "__main__":
app.run(host='0.0.0.0', debug=True)
| [
"[email protected]"
] | |
0f260f1dbdd72e3b8c0677de922da98c7967e15b | bb7ebdd990d3265a585700e1083b3e916842aec6 | /scrapper/article.py | c5d9ffb192e8310d3615e63aaf29c76355f4090d | [] | no_license | c24b/clean_crawtext | 2c2a6d6292f5bb00afa45ebb28ba16bf2f0a229e | f1dc20298da418680b1b96dd0468846cbf09a112 | refs/heads/master | 2020-05-18T00:58:55.308025 | 2014-08-19T11:50:52 | 2014-08-19T11:50:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,889 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
from copy import deepcopy
from parsers import Parser
from cleaners import StandardDocumentCleaner
from formatters import StandardOutputFormatter
from extractors import StandardContentExtractor
import datetime
from BeautifulSoup import BeautifulSoup as bs
class Extractor(object):
'''Generic Extractor'''
@staticmethod
def run( url, raw_html,type, lang="en"):
if type == "article":
content = Article(url, raw_html, lang)
elif type == "defaut":
raise NotImplementedError
else:
raise NotImplementedError
return content.get()
class Article(Extractor):
'''Article'''
def __init__(self, url, raw_html, lang):
self.status = True
self.url = url
self.lang = lang
# title of the article
self.title = None
#text
self.article = u""
self.cleaned_text = u""
# meta
self.meta_description = u""
self.meta_lang = u""
self.meta_favicon = u""
self.meta_keywords = u""
#link and domain
self.canonical_link = u""
self.domain = u""
# cleaned text
self.top_node = None
self.tags = set()
self.final_url = url
self.raw_html = raw_html
# the lxml Document object
self.parser = Parser()
self.raw_doc = u""
self.publish_date = None
self.additional_data = {}
self.links = []
self.outlinks = []
self.inlinks = []
self.start_date = datetime.datetime.today()
def get(self):
try:
self.doc = self.parser.fromstring(self.raw_html)
#init extractor method
extractor = StandardContentExtractor(self,"en")
# init the document cleaner
cleaner = StandardDocumentCleaner(self)
# init the output formatter
formatter = StandardOutputFormatter(self, stopwords_class="en")
#doc
#self.doc = doc
self.raw_doc = deepcopy(self.raw_html)
self.title = extractor.get_title()
#self.title = self.title
#meta
self.meta_lang = extractor.get_meta_lang()
#self.meta_favicon = extractor.get_favicon()
#self.meta_description = extractor.get_meta_description()
#self.meta_description = self.meta_description.decode("utf-8")
#self.meta_keywords = extractor.get_meta_keywords()
#domain and url
self.canonical_link = extractor.get_canonical_link()
self.domain = extractor.get_domain()
#~
#~ #tag
#self.tags = extractor.extract_tags()
#~ #text
self.doc = cleaner.clean()
self.top_node = extractor.calculate_best_node()
if self.top_node is not None:
# post cleanup
self.top_node = extractor.post_cleanup(self.top_node)
# clean_text
#self.cleaned_text = formatter.get_formatted_text()
#self.content = self.content.decode("utf-8")
self.links = extractor.get_links()
self.outlinks = extractor.get_outlinks()
try:
self.content = formatter.get_formatted_text()
except Exception as e:
self.content = bs(self.raw_html).text
#self.inlinks, self.inlinks_err = extractor.get_outlinks(self.links)
# TODO
# self.article.publish_date = self.extractor.get_pub_date(doc)
# self.article.additional_data = self.extractor.more(doc)
return self
except Exception as e:
self.status = False
self.logs = {
"url": self.url,
"scope": "article extraction",
"msg": e.args,
"status": False,
"code": -2
}
return self
def repr(self):
return {
"url": self.canonical_link,
"domain": self.domain,
"title": self.title,
"content": self.content,
"description": self.meta_description,
"outlinks": self.outlinks,
"crawl_date": self.start_date,
"raw_html": self.raw_html,
}
def is_relevant(self, query):
self.content = {"title":unicode(self.title), "content": unicode(self.content)}
if query.match(self.content) is False:
self.status = {"url":self.url, "code": -1, "msg": "Not Relevant","status": False, "title": self.title, "content": self.content}
return False
else:
return True
| [
"[email protected]"
] | |
61b4081b28ae6a8a0b66826389ed5b2bf8f6a8bd | 82b728e805d887102c0b8c415731b353877690cd | /samples/generated_samples/aiplatform_generated_aiplatform_v1_job_service_list_data_labeling_jobs_async.py | ff63ecabec79d5b44b37a6226ced8498752c4070 | [
"Apache-2.0"
] | permissive | geraint0923/python-aiplatform | 90c7742c9bdbde05b9688b117e8e59c0406d6f85 | 7ab05d5e127636d96365b7ea408974ccd6c2f0fe | refs/heads/main | 2023-08-24T05:30:38.519239 | 2021-10-27T20:38:25 | 2021-10-27T20:38:25 | 370,803,114 | 0 | 0 | Apache-2.0 | 2021-05-25T19:15:47 | 2021-05-25T19:15:46 | null | UTF-8 | Python | false | false | 1,627 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListDataLabelingJobs
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_JobService_ListDataLabelingJobs_async]
from google.cloud import aiplatform_v1
async def sample_list_data_labeling_jobs():
"""Snippet for list_data_labeling_jobs"""
# Create a client
client = aiplatform_v1.JobServiceAsyncClient()
# Initialize request argument(s)
request = aiplatform_v1.ListDataLabelingJobsRequest(
parent="projects/{project}/locations/{location}",
)
# Make the request
page_result = client.list_data_labeling_jobs(request=request)
async for response in page_result:
print(response)
# [END aiplatform_generated_aiplatform_v1_JobService_ListDataLabelingJobs_async]
| [
"[email protected]"
] | |
90229bbbe4785a5b999ee5d8722e20f28827c56f | c05ed32f1ef7e1eb7d73efd674e7d1fd710ad171 | /daily-coding-problems/problem395.py | bf3a13cf875b08ed3da6182d02891d6d297eafa6 | [] | no_license | carlhinderer/python-exercises | c8367517fdf835fa1117f96dbfee3dccc596afa6 | 4e09bbb4c4e2bd5644ed50e997db9f3c289a18f7 | refs/heads/master | 2021-06-01T16:17:00.389134 | 2021-02-09T18:21:01 | 2021-02-09T18:21:01 | 150,902,917 | 0 | 0 | null | 2021-04-20T20:33:11 | 2018-09-29T21:03:36 | Python | UTF-8 | Python | false | false | 275 | py | # Problem 395
# Medium
# Asked by Robinhood
#
# Given an array of strings, group anagrams together.
#
# For example, given the following array:
#
# ['eat', 'ate', 'apt', 'pat', 'tea', 'now']
#
# Return:
#
# [['eat', 'ate', 'tea'],
# ['apt', 'pat'],
# ['now']]
# | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.