ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b401dfe189ca23e7c1a0796839f440e141d29fd8 | # -*- coding: utf-8 -*-
"""
Tests use of IsoCurve item displayed with image
"""
import initExample ## Add path to library (just for examples; you do not need this)
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
app = QtGui.QApplication([])
## make pretty looping data
frames = 200
data = np.random.normal(size=(frames,30,30), loc=0, scale=100)
data = np.concatenate([data, data], axis=0)
data = pg.gaussianFilter(data, (10, 10, 10))[frames/2:frames + frames/2]
data[:, 15:16, 15:17] += 1
win = pg.GraphicsWindow()
win.setWindowTitle('pyqtgraph example: Isocurve')
vb = win.addViewBox()
img = pg.ImageItem(data[0])
vb.addItem(img)
vb.setAspectLocked()
## generate empty curves
curves = []
levels = np.linspace(data.min(), data.max(), 10)
for i in range(len(levels)):
v = levels[i]
## generate isocurve with automatic color selection
c = pg.IsocurveItem(level=v, pen=(i, len(levels)*1.5))
c.setParentItem(img) ## make sure isocurve is always correctly displayed over image
c.setZValue(10)
curves.append(c)
## animate!
ptr = 0
imgLevels = (data.min(), data.max() * 2)
def update():
global data, curves, img, ptr, imgLevels
ptr = (ptr + 1) % data.shape[0]
data[ptr]
img.setImage(data[ptr], levels=imgLevels)
for c in curves:
c.setData(data[ptr])
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(50)
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
py | b401e013f19f87ffb9051bdbc41adf2d2613ef48 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "caracole.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
py | b401e093688b62794124ec1b31acf38791007e82 | from coeffs_routine import get_Coeffs
import pandas as pd
from matplotlib import pyplot as plt
folderpath = "Domingo (31-03-2019)"
velocities = [50]
incidencies = [0,3,6,9,12]
with_load = True
CL_alpha = {}
CD_alpha = {}
CM_alpha = {}
for velocity in velocities:
for incidency in incidencies:
if with_load:
filename = "2. {} kmh - {} graus - Com Asa.txt".format(velocity, incidency)
CL, CD, CM = get_Coeffs(folderpath, filename)
CL_alpha[incidency] = CL
CD_alpha[incidency] = CD
CM_alpha[incidency] = CM
else:
filename = "1. {} kmh - {} graus - Sem Asa.txt".format(velocity, incidency)
CL, CD, CM = get_Coeffs(folderpath, filename)
CL_alpha[incidency] = CL
CD_alpha[incidency] = CD
CM_alpha[incidency] = CM
CL_alpha_df = pd.DataFrame.from_dict(CL_alpha, orient='index', columns=['CL'])
CD_alpha_df = pd.DataFrame.from_dict(CD_alpha, orient='index', columns=['CD'])
CM_alpha_df = pd.DataFrame.from_dict(CM_alpha, orient='index', columns=['CM'])
print(CL_alpha_df)
print(CD_alpha_df)
print(CM_alpha_df)
fig1, ax1 = plt.subplots()
plt.plot(CL_alpha_df, label='CL')
plt.legend()
plt.grid()
fig2, ax2 = plt.subplots()
plt.plot(CD_alpha_df, label='CD')
plt.plot(CM_alpha_df, label='CM')
plt.legend()
plt.grid()
plt.show() |
py | b401e1302dc6d26f5705d0f141dc30b143b9607c | # encoding=utf-8
import numpy as np
import csv
import pandas as pd
from sklearn import metrics
import sys
import argparse
def parse_args():
parser = argparse.ArgumentParser(description="""['label', 'uid', 'aid', 'time', 'siteid', 'slotid', 'cid', 'net',
'age', 'gender', 'city', 'province', 'phoneType', 'carrier',
'billid', 'primid', 'creative', 'inter', 'app', 'c1', 'c2']""")
parser.add_argument('path')
parser.add_argument('--tab', '-t', action='store_true', help='csv file use \\t split')
parser.add_argument('--save', '-s', action='store_true', help='save result to local file')
parser.add_argument('--group', '-g', action='store_true', help='group final result')
parser.add_argument('--all', '-a', action='store_true', help='show all')
parser.add_argument('--descend', '-d', action='store_true', help='descend sort')
parser.add_argument('--field', '-f', default='uid', type=str, help="required. default='uid'")
parser.add_argument('--field2', '-f2', default='', type=str,
help="field2, use for cross feature, empty for single feature")
parser.add_argument('--limit', '-l', default=10, type=int, help='show items limit')
parser.add_argument('--mode', '-m', help='statistics mode, default=count',
choices=['count', 'ratio', 'mean', 'unique'],
default='count')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
path = args.path
limit = args.limit
mode = args.mode
field = args.field
field2 = args.field2
group = args.group
all = args.all
descend = args.descend
save = args.save
train_df = pd.read_csv(path, header=None)
train_df.columns = ['label', 'uid', 'aid', 'time', 'siteid', 'slotid', 'cid', 'net',
'age', 'gender', 'city', 'province', 'phoneType', 'carrier',
'billid', 'primid', 'creative', 'inter', 'app', 'c1', 'c2']
#test.columns = ['index', 'uid', 'aid', 'time', 'siteid', 'slotid', 'cid', 'net']
if mode == 'count':
if field2: # cross feature
cross_field = field + '_' + field2
new_field = train_df[field].astype(str).values + '_' + train_df[field2].astype(str).values
train_df[cross_field] = new_field
c = train_df.groupby(cross_field).size().reset_index(name=cross_field + '_count')
train = pd.merge(train_df, c, on=cross_field, how='left')
if group:
gp = [field, field2, cross_field + '_count']
train = train.groupby(gp).size().reset_index(name='count')
else:
if group:
c = train_df[field].value_counts().reset_index(name=field + "_count")
#gp = [field, field + "_count"]
#train = train.groupby(gp).size().reset_index(name='count')
train = c
else:
c = train_df.groupby(field).size().reset_index(name=field + "_count")
train = pd.merge(train_df, c, on=field, how='left')
if save: # save
pass
else: # preview
#train_df['new_field'] = train_df['aid'].astype(str).values + '_' + train_df['gender'].astype(str).values
#c2 = train_df.groupby('new_field').size().reset_index(name='new_field' + "_count")
#train = pd.merge(train_df, c2, on='new_field', how='left')
print(train)
#print(train_df[field].value_counts()[:limit])
#train_df['count'] = train_df[field].values
#print(train_df[:limit])
elif mode == 'ratio':
if field2: # cross feature
if field2 == 'label':
train_1 = train_df[train_df.label == 1]
b = train_1.groupby([field]).size().reset_index(name=field + '_1')
train_df = pd.merge(train_df, b, on=field, how='left')
#del b
train_df = train_df.fillna(0)
c = train_df.groupby(field).size().reset_index(name=field + "_count")
train = pd.merge(train_df, c, on=field, how='left')
del c
train[field + '_1_ratio'] = train[field + '_1'] / train[field + "_count"] / 0.061935
train = train.fillna(1.00000)
del train_df
if descend:
train = train.sort_values(field + '_1_ratio', axis=0,ascending=False, inplace=False)
if group:
train = train.groupby([field, field + '_1_ratio']).size().reset_index(name='count')
#################
'''
c = train_df['label'].value_counts(normalize=True).reset_index(name=field2 + "_ratio")
r_1 = train = c[field2 + "_ratio"].max()
train_df['label_1'] = r_1
train = train_df
'''
#train = b[b.label == 1]
#b[field2 + "_ratio"] = b[field2 + "_ratio"].max()
else:
norm = True
relative = [('primid', 'aid')]
if (field, field2) in relative :
norm = False
cross_field = field + '_' + field2
new_field = train_df[field].astype(str).values + '_' + train_df[field2].astype(str).values
train_df[cross_field] = new_field
b = train_df.groupby(cross_field).size().reset_index(name=cross_field + '_count')
train_df = pd.merge(train_df, b, on=cross_field, how='left')
train_df.drop([cross_field], axis=1, inplace=True)
del b
c = train_df.groupby(field).size().reset_index(name=field + "_count")
train = pd.merge(train_df, c, on=field, how='left')
del c
del train_df
if norm:
d = train[field2].value_counts(normalize=True).reset_index(name=field2 + "_ratio")
d.columns = [field2, field2 + '_ratio']
train = pd.merge(train, d, on=field2, how='left')
train[field + '_' + field2 + '_ratio'] = train[cross_field + '_count'] / train[field + "_count"]
if norm:
train[field + '_' + field2 + '_norm'] = train[field + '_' + field2 + '_ratio'] / train[field2 + '_ratio']
train.drop([cross_field + '_count', field + "_count"], axis=1, inplace=True)
if group:
gp = [field, field2, field + '_' + field2 + '_ratio']
if norm:
gp.append(field + '_' + field2 + '_norm')
train = train.groupby(gp).size().reset_index(name='count')
else:
c = train_df[field].value_counts(normalize=True).reset_index(name=field + "_ratio")
c.columns = [field, field + '_ratio']
train = pd.merge(train_df, c, on=field, how='left')
if all:
pd.set_option('display.max_rows', None)
print(train)
elif mode == 'mean':
pass
elif mode == 'unique':
pass
print()
# print(train_df[a>2]) train_df['uid'].isin
|
py | b401e149e76f131e15f0b6634c6acd73bde7e62b | from ._pdeep_constant import BasicpDeepInfo
from ._pdeep_constant import MOD
import re
import os
from collections import defaultdict
import pandas as pd
from deep_phospho.proteomics_utils import rapid_kit
from deep_phospho.proteomics_utils.post_analysis.spectronaut import SpectronautLibrary
def intprec_to_pdeep_test(intprec_list):
"""
从 intprec 转换为 pDeep2 的 test input
intprec: 如 DKEAIQA4SESLMTSAPK.2
pDeep2 test input 格式为
peptide modification charge
FRTPSFLK 3,Phospho[T];5,Phospho[S]; 2
...
"""
title = ['peptide', 'modification', 'charge']
pdeep_test_data_list = []
for each_intprec in intprec_list:
intprec_result = intprec_to_pdeep(each_intprec)
if intprec_result is not None:
stripped_pep, mod_info, charge = intprec_result
else:
continue
pdeep_test_data_list.append([stripped_pep, mod_info, charge])
pdeep_test_df = pd.DataFrame(pdeep_test_data_list, columns=title)
return pdeep_test_df
def intprec_to_pdeep(intprec: str):
int_to_pdeep2_mod = {
'C': 'Carbamidomethyl[C]',
'1': 'Oxidation[M]',
'2': 'Phospho[S]',
'3': 'Phospho[T]',
'4': 'Phospho[Y]',
}
intseq, charge = intprec.split('.')
if intseq.startswith('@'):
intseq = intseq[1:]
elif intseq.startswith('*'):
return None
else:
pass
stripped_pep = intseq.replace('1', 'M').replace('2', 'S').replace('3', 'T').replace('4', 'Y')
mod_info = ''
for _ in re.finditer('[C1234]', intseq):
site = _.end()
mod_char = _.group()
mod = int_to_pdeep2_mod[mod_char]
mod_info += f'{site},{mod};'
return stripped_pep, mod_info, charge
def mod_extraction_for_pdeep(mod_pep):
"""
The
"""
mod_pep = mod_pep.replace('_', '')
if '[' not in mod_pep:
return ''
else:
modinfo = ''
mod_start = [left_bracket.start() for left_bracket in re.finditer('\[', mod_pep)]
mod_end = [right_bracket.start() for right_bracket in re.finditer(']', mod_pep)]
mod_len = 0
for mod_site in zip(mod_start, mod_end):
if mod_site[0] == 0: # or mod_site[1] == len(mod_pep) - 1:
return 'Unsupport'
else:
mod_residu = mod_pep[mod_site[0] - 1]
mod_type = mod_pep[mod_site[0] + 1: mod_site[1]].replace(' ', '')
mod_type = re.sub(r'\(.+?\)', f'[{mod_residu}]', mod_type)
modinfo += '{mod_site},{mod_type};'.format(mod_site=mod_site[0] - mod_len, mod_type=mod_type)
mod_len += (mod_site[1] - mod_site[0] + 1)
return modinfo
def inten_dict_to_plabel(inten_dict: dict):
"""
:param inten_dict: The input dict should have the k[v] pairs as 'Prec': {'Frag_1': Inten_1, 'Frag_2': Inten_2, ...}
"""
plabel_rows = []
for prec, ion_inten_dict in inten_dict.items():
intprec_trans = intprec_to_pdeep(prec)
if intprec_trans is None:
continue
stripped_pep, mod_info, charge = intprec_trans
spec = f'Unknown.{charge}.0.0'
plabel_ion_str = plabel_one_ion_row(ion_inten_dict, return_type='str')
plabel_rows.append(f'{spec}\t{stripped_pep}\t{mod_info}\t{plabel_ion_str}')
return plabel_rows
def write_plabel_with_inten_dict(inten_dict: dict, output_path: str):
plabel_rows = inten_dict_to_plabel(inten_dict)
with open(output_path, 'w') as f:
f.write('spec\tpeptide\tmodinfo\tb\tb-NH3\tb-H2O\tb-ModLoss\ty\ty-NH3\ty-H2O\ty-ModLoss\n')
for row in plabel_rows:
f.write(row + '\n')
def plabel_to_pred_input(plabel_path):
plabel_df = pd.read_csv(plabel_path, sep='\t', low_memory=False)
plabel_df['charge'] = plabel_df['spec'].apply(lambda x: x.split('.')[-3])
plabel_df = plabel_df[['peptide', 'modinfo', 'charge']]
plabel_df.columns = ['peptide', 'modification', 'charge']
return plabel_df
def plabel_one_ion_row(ion_inten_dict: dict,
ion_type=('b', 'b-NH3', 'b-H2O', 'b-ModLoss', 'y', 'y-NH3', 'y-H2O', 'y-ModLoss'),
return_type='str'):
ion_dict = defaultdict(list)
ion_dict.fromkeys(ion_type)
loss_trans = {'1,H3PO4': 'ModLoss',
'1,H2O': 'H2O',
'1,NH3': 'NH3'}
for frag, inten in ion_inten_dict.items():
frag_type, frag_num, frag_charge, frag_loss = re.findall(r'([abcxyz])(\d+)\+(\d)-(.+)', frag)[0]
if frag_loss == 'Noloss':
ion_name = f'{frag_type}'
elif frag_loss in ['1,H2O', '1,NH3', '1,H3PO4']:
ion_name = f'{frag_type}-{loss_trans[frag_loss]}'
else:
continue
ion_dict[ion_name].append((f'{frag_type}{frag_num}{ion_name[1:]}+{frag_charge},{inten};',
int(frag_num),
int(frag_charge)))
if return_type == 'dict':
return ion_dict
elif return_type == 'str':
ion_info = []
for each_ion_type in ion_type:
if each_ion_type[0] in ['a', 'b', 'c']:
sorted_ions = sorted(ion_dict[each_ion_type], key=lambda x: (x[2], x[1]), reverse=False)
elif each_ion_type[0] in ['x', 'y', 'z']:
sorted_ions = sorted(ion_dict[each_ion_type], key=lambda x: (-x[2], x[1]), reverse=True)
else:
raise
ions = [_[0] for _ in sorted_ions]
ion_info.append(''.join(ions))
return '\t'.join(ion_info)
def plabel_ion_info(one_psm_df, return_type):
ion_info = {'b': '', 'b-NH3': '', 'b-H2O': '', 'b-ModLoss': '', 'y': [], 'y-NH3': [], 'y-H2O': [], 'y-ModLoss': []}
for row_index, each_row in one_psm_df.iterrows():
fragment_type = each_row['FragmentType']
fragment_num = each_row['FragmentNumber']
fragment_charge = each_row['FragmentCharge']
fragment_relative_intensity = each_row['RelativeIntensity']
fragment_losstype = each_row['FragmentLossType']
if fragment_type == 'b':
if fragment_losstype == 'noloss':
ion_info['b'] += 'b{num}+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity)
elif fragment_losstype == 'NH3':
ion_info['b-NH3'] += 'b{num}-NH3+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity)
elif fragment_losstype == 'H2O':
ion_info['b-H2O'] += 'b{num}-H2O+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity)
elif fragment_losstype == 'H3PO4':
ion_info['b-ModLoss'] += 'b{num}-ModLoss+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity)
else:
continue
elif fragment_type == 'y':
if fragment_losstype == 'noloss':
ion_info['y'].append('y{num}+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity))
elif fragment_losstype == 'NH3':
ion_info['y-NH3'].append('y{num}-NH3+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity))
elif fragment_losstype == 'H2O':
ion_info['y-H2O'].append('y{num}-H2O+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity))
elif fragment_losstype == 'H3PO4':
ion_info['y-ModLoss'].append('y{num}-ModLoss+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity))
else:
continue
if return_type == 'dict':
return ion_info
elif return_type == 'str':
str_ion_info = ''
b_ion_order = ['b', 'b-NH3', 'b-H2O', 'b-ModLoss']
# ion_order = ['b', 'y']
for ion_losstype in b_ion_order:
str_ion_info += ion_info[ion_losstype]
str_ion_info += '\t'
y_ion_order = ['y', 'y-NH3', 'y-H2O', 'y-ModLoss']
for ion_losstype in y_ion_order:
str_ion_info += ''.join(ion_info[ion_losstype][::-1])
if ion_losstype != 'y-ModLoss':
str_ion_info += '\t'
# if ion_losstype != 'y':
# str_ion_info += '\t'
return str_ion_info
def sn_lib_to_plabel(lib, plabel_output):
if isinstance(lib, pd.DataFrame):
lib_df = lib
else:
if os.path.exists(lib):
lib_df = pd.read_csv(lib, sep='\t', low_memory=False)
else:
raise FileNotFoundError
lib_df['Prec'] = lib_df['ModifiedPeptide'] + '.' + lib_df['PrecursorCharge'].astype(str)
with open(plabel_output, 'w') as plabel_handle:
plabel_handle.write('spec\tpeptide\tmodinfo\tb\tb-NH3\tb-H2O\tb-ModLoss\ty\ty-NH3\ty-H2O\ty-ModLoss\n')
# handle_plabel.write('spec\tpeptide\tmodinfo\tb\ty\n')
for psm_index, (each_prec, each_psm_df) in enumerate(lib_df.groupby('Prec')):
first_row = each_psm_df.iloc[0]
spec = '{title}.{charge}.0.0'.format(title=first_row['ReferenceRun'], charge=first_row['PrecursorCharge'])
# spec = '{charge}.0.0'.format(charge=first_fragment[1])
stripped_pep = first_row['StrippedPeptide']
mod_pep = first_row['ModifiedPeptide']
modinfo = mod_extraction_for_pdeep(mod_pep)
if modinfo == 'Unsupport':
continue
ion_info = plabel_ion_info(each_psm_df, 'str')
plabel_handle.write('{spec}\t{pep}\t{mod}\t{ioninfo}\n'.format(
spec=spec, pep=stripped_pep, mod=modinfo, ioninfo=ion_info))
def sn_lib_to_pdeep_test(test_lib, test_set_output):
if isinstance(test_lib, pd.DataFrame):
lib_df = test_lib
else:
if os.path.exists(test_lib):
lib_df = pd.read_csv(test_lib, sep='\t', low_memory=False)
else:
raise FileNotFoundError
lib_df['Prec'] = lib_df['ModifiedPeptide'] + '.' + lib_df['PrecursorCharge'].astype(str)
lib_df = lib_df.drop_duplicates('Prec')
with open(test_set_output, 'w') as test_handle:
test_handle.write('peptide\tmodification\tcharge\n')
for row_index, each_lib_row in lib_df.iterrows():
mod_pep = each_lib_row['ModifiedPeptide']
charge = str(each_lib_row['PrecursorCharge'])
stripped_pep = each_lib_row['StrippedPeptide']
mod = mod_extraction_for_pdeep(mod_pep)
if mod == 'Unsupport':
continue
test_handle.write('{}\t{}\t{}\n'.format(stripped_pep, mod, charge))
def extract_pdeep_mod(mod_pep, mod_ident='bracket', mod_trans=True):
"""
input: '_C[Carbamidomethyl (C)]DM[Oxidation (M)]EDER_'
output: 'CDMEDER', '1,Carbamidomethyl[C];3,Oxidation[M];'
"""
stripped_pep, mod = rapid_kit.split_mod(modpep=mod_pep, mod_ident=mod_ident)
if mod_trans:
mod = trans_sn_mod(mod)
return stripped_pep, mod
def trans_sn_mod(mod):
for sn_mod, pdeep_mod in MOD.items():
mod = mod.replace(sn_mod, pdeep_mod)
if '(' not in mod:
break
if '(' in mod:
return None
return mod
def restore_pdeep_mod_site(stripped_pep, mod_content, mod_processor):
"""
This will restore the modification to stripped peptide.
EXAMPLE: restore_pdeep_mod_site('MPALAIMGLSLAAFLELGMGASLCLSQQFK', '24,Carbamidomethyl[C];')
-> 'MPALAIMGLSLAAFLELGMGASLC[Carbamidomethyl (C)]LSQQFK'
"""
return rapid_kit.add_mod(stripped_pep, mod_content, mod_processor)
def pdeep_input(output_path, prec_list):
with open(output_path, 'w') as out_file:
pred_title = ['peptide', 'modification', 'charge']
out_file.write('\t'.join(pred_title) + '\n')
for _prec in prec_list:
modpep, charge = rapid_kit.split_prec(_prec)
strip_pep, mod = extract_pdeep_mod(modpep)
out_file.write(f'{strip_pep}\t{mod}\t{charge}\n')
def pdeep_trainset(output_path, prec_inten_dict):
with open(output_path, 'w') as out_file:
plabel_title_list = BasicpDeepInfo.pDeepTrainsetTitle
plabel_title = '\t'.join(plabel_title_list)
out_file.write(plabel_title + '\n')
for _prec, inten_dict in prec_inten_dict.items():
plabel_row_dict = plabel_one_row_dict(_prec, inten_dict)
if not plabel_row_dict:
continue
one_row_list = [plabel_row_dict[_] for _ in plabel_title_list]
out_file.write('\t'.join(one_row_list) + '\n')
def plabel_one_row_dict(prec, inten_dict: dict):
plabel_row_dict = defaultdict(str)
modpep, charge = rapid_kit.split_prec(prec)
strip_pep, mod = extract_pdeep_mod(modpep, mod_ident='bracket', mod_trans=True)
if not mod:
return None
plabel_row_dict['spec'] = f'{charge}.0.0'
plabel_row_dict['peptide'] = strip_pep
plabel_row_dict['modinfo'] = mod
for frag, inten in inten_dict.items():
frag_type, frag_num, frag_charge, frag_loss = rapid_kit.split_fragment_name(frag)
if frag_loss == 'noloss':
plabel_type = frag_type
plabel_frag = f'{frag_type}{frag_num}+{frag_charge}'
elif frag_loss == 'NH3' or frag_loss == 'H2O':
plabel_type = f'{frag_type}-{frag_loss}'
plabel_frag = f'{frag_type}{frag_num}-{frag_loss}+{frag_charge}'
else:
plabel_type = f'{frag_type}-ModLoss'
plabel_frag = f'{frag_type}{frag_num}-ModLoss+{frag_charge}'
plabel_row_dict[plabel_type] += f'{plabel_frag},{inten};'
return plabel_row_dict
def read_pdeep_result(pdeep_result, modloss_name='H3PO4',
require_mz=True, min_inten_ratio=0.01, min_frag_num=3,
exclude_frag_num=(1, 2), exclude_modloss=False):
mod_dict = {'Carbamidomethyl[C]': '[Carbamidomethyl (C)]',
'Oxidation[M]': '[Oxidation (M)]',
'Phospho[S]': '[Phospho (STY)]',
'Phospho[T]': '[Phospho (STY)]',
'Phospho[Y]': '[Phospho (STY)]',
}
with open(os.path.abspath(pdeep_result), 'r') as pdeep_handle:
predicted_fragment_data = dict()
for each_line in pdeep_handle:
each_line = each_line.strip('\n')
if each_line == 'BEGIN IONS':
fragment_dict = dict()
elif each_line == 'END IONS':
if len(fragment_dict) >= min_frag_num:
predicted_fragment_data[prec] = fragment_dict
else:
pass
else:
if each_line.startswith('TITLE'):
split_pep_title = each_line.replace('TITLE=', '').split('|')
stripped_pep = split_pep_title[0]
mod = split_pep_title[1].strip(';')
charge = split_pep_title[2]
if not mod:
prec = '_{}_.{}'.format(stripped_pep, charge)
else:
mod_pep = ''
previous_mod_site = 0
for each_mod in mod.split(';'):
each_mod_info = each_mod.split(',')
mod_site = int(each_mod_info[0])
mod_type = mod_dict[each_mod_info[1]]
mod_pep += stripped_pep[previous_mod_site: mod_site] + mod_type
previous_mod_site = mod_site
mod_pep += stripped_pep[previous_mod_site:]
prec = '_{}_.{}'.format(mod_pep, charge)
elif each_line[0].isdigit():
split_frag_inten_line = each_line.split(' ')
frag_inten = round(float(split_frag_inten_line[1]), 5) * 100
if frag_inten < min_inten_ratio:
continue
frag_mz = split_frag_inten_line[0]
if float(frag_mz) < 10:
continue
frag_name = split_frag_inten_line[2]
frag_type, frag_num, loss_type, frag_c = re.findall('([by])(\d+)-?(.+)?\+(\d)', frag_name)[0]
if int(frag_num) in exclude_frag_num:
continue
if exclude_modloss and loss_type == 'ModLoss':
continue
new_frag_name = f'{frag_type}{frag_num}+{frag_c}'
if not loss_type:
new_frag_name += '-noloss'
else:
new_frag_name += f'-{loss_type}' if loss_type != 'ModLoss' else f'-{modloss_name}'
if require_mz:
fragment_dict[new_frag_name] = (frag_mz, frag_inten)
else:
fragment_dict[new_frag_name] = frag_inten
else:
continue
return predicted_fragment_data
def trans_pdeep2_result_to_df(result: dict, frag_trans=None, pep_trans=None, pep_trans_col='IntPep') -> pd.DataFrame:
df_rows = []
for prec, inten_dict in result.items():
if frag_trans is not None:
inten_dict = {frag_trans[frag]: inten for frag, inten in inten_dict.items()}
one_row = [prec, inten_dict]
if pep_trans is not None:
modpep, charge = prec.split('.')
transed_pep = pep_trans(modpep)
one_row.append(transed_pep)
def read_inten_from_plabel(_plabel_file):
ion_type_list = ['b', 'b-NH3', 'b-H2O', 'b-ModLoss', 'y', 'y-NH3', 'y-H2O', 'y-ModLoss']
_p_df = pd.read_csv(_plabel_file, sep='\t')
_p_df = _p_df.fillna('')
_p_df['prec'] = _p_df.apply(lambda x: '|'.join([x['peptide'], x['modinfo'], x['spec'].split('.')[-3]]), axis=1)
_p_inten_dict = dict()
def _merge_plabel_inten(x):
_one_prec = x['prec']
_one_inten_info = ''.join(x[ion_type_list].tolist()).split(';')[:-1]
_p_inten_dict[_one_prec] = dict([(_o_f.split(',')[0], float(_o_f.split(',')[1])) for _o_f in _one_inten_info])
_p_df.progress_apply(_merge_plabel_inten, axis=1)
return _p_inten_dict
class pDeepSpectronaut(SpectronautLibrary):
def __init__(self, spectronaut_version=12):
super(pDeepSpectronaut, self).__init__(spectronaut_version)
self.plabel_title_list = BasicpDeepInfo.pDeepTrainsetTitle
def prec_ion_info(self, one_psm_df: pd.DataFrame, spectronaut_run_name=True):
"""
For pDeep trainset preparation.
This will receive get_one_prefix_result dataframe of one psm block and assemble get_one_prefix_result pd.series as one row of the plabel dataframe.
:param one_psm_df: This must contain columns after ['PrecursorCharge', 'StrippedPeptide', 'ModifiedPeptide',
'FragmentType', 'FragmentNumber', 'FragmentCharge', 'RelativeIntensity', 'FragmentLossType']
:param spectronaut_run_name: This can be choose as True or False and dont affect the result. This can make the plabel file have much information
:return: A series as one plabel dataframe row
"""
first_row = one_psm_df.iloc[0]
prec_charge = first_row['PrecursorCharge']
if spectronaut_run_name:
run_title = first_row['ReferenceRun']
spec = '{title}.{charge}.0.0'.format(title=run_title, charge=prec_charge)
else:
spec = '{charge}.0.0'.format(charge=prec_charge)
stripped_pep = first_row['StrippedPeptide']
mod_pep = first_row['ModifiedPeptide']
stripped_pep, modinfo = extract_pdeep_mod(mod_pep)
if modinfo == 'Unsupport':
return 'Unsupport'
current_prec_info = pd.Series(data=[spec, stripped_pep, modinfo] + [''] * 8, index=self.plabel_title_list)
for row_index in one_psm_df.index:
line_series = one_psm_df.loc[row_index]
fragment_type = line_series['FragmentType']
fragment_num = line_series['FragmentNumber']
fragment_charge = line_series['FragmentCharge']
fragment_relative_intensity = line_series['RelativeIntensity']
fragment_losstype = line_series['FragmentLossType']
if fragment_type == 'b':
if fragment_losstype == 'noloss':
current_prec_info['b'] += 'b{num}+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity)
elif fragment_losstype == 'NH3':
current_prec_info['b-NH3'] += 'b{num}-NH3+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity)
elif fragment_losstype == 'H2O':
current_prec_info['b-H2O'] += 'b{num}-H2O+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity)
else:
current_prec_info['b-ModLoss'] += 'b{num}-ModLoss+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity)
elif fragment_type == 'y':
if fragment_losstype == 'noloss':
current_prec_info['y'] += 'y{num}+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity)
elif fragment_losstype == 'NH3':
current_prec_info['y-NH3'] += 'y{num}-NH3+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity)
elif fragment_losstype == 'H2O':
current_prec_info['y-H2O'] += 'y{num}-H2O+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity)
else:
current_prec_info['y-ModLoss'] += 'y{num}-ModLoss+{charge},{relative_intensity};'.format(num=fragment_num, charge=fragment_charge, relative_intensity=fragment_relative_intensity)
return current_prec_info
def plabel_trainset(self, output_path, spectronaut_run_name=True):
"""
Write get_one_prefix_result pDeep trainset file by calling function prec_ion_info to process the library dataframe
"""
trainset_df = pd.DataFrame(columns=self.plabel_title_list)
for each_psm_index in self.get_psm_block_index(self._lib_df):
current_prec_info = self.prec_ion_info(self._lib_df.loc[each_psm_index[0]: each_psm_index[1]], spectronaut_run_name)
if not isinstance(current_prec_info, pd.DataFrame):
continue
trainset_df = trainset_df.append(current_prec_info, ignore_index=True)
trainset_df.to_csv(output_path, sep='\t', index=False)
def extract_bracket(str_with_bracket):
bracket_start = [left_bracket.start() for left_bracket in re.finditer('\(', str_with_bracket)]
bracket_end = [right_bracket.start() for right_bracket in re.finditer('\)', str_with_bracket)]
return bracket_start, bracket_end
mod_dict = {'M(ox)': 'Oxidation[M]',
'Y(ph)': "Phospho[Y]",
'S(ph)': "Phospho[S]",
'T(ph)': "Phospho[T]",
}
def _plabel_from_mq(x):
def pdeep_mod_extraction(mod_pep):
mod_pep = mod_pep.replace('_', '')
modinfo = ''
mod_start, mod_end = extract_bracket(mod_pep)
mod_len = 0
for mod_site in zip(mod_start, mod_end):
mod_type = mod_pep[mod_site[0] - 1: mod_site[1] + 1].replace(' ', '')
mod_type = mod_dict[mod_type]
modinfo += '{mod_site},{mod_type};'.format(mod_site=mod_site[0] - mod_len, mod_type=mod_type)
mod_len += (mod_site[1] - mod_site[0] + 1)
return modinfo
ion_type_list = ['b', 'b-NH3', 'b-H2O', 'b-ModLoss', 'y', 'y-NH3', 'y-H2O', 'y-ModLoss']
plabel_title = ['spec', 'peptide', 'modinfo', *ion_type_list]
spec_name = '{}.{}.{}.{}.0.dta'.format(x['Raw file'], x['Scan number'], x['Scan number'], x['Charge'])
pep = x['Sequence']
mod_pep = x['Modified sequence']
mod_info = pdeep_mod_extraction(mod_pep)
ions = x['Matches']
intens = x['Intensities']
inten_dict = dict(zip(ion_type_list, [''] * 8))
ion_intens_list = list(zip(ions.split(';'), intens.split(';')))
b_ion_info = [_ for _ in ion_intens_list if _[0].startswith('b')]
y_ion_info = [_ for _ in ion_intens_list if _[0].startswith('y')]
for diff_ion_info in [b_ion_info, y_ion_info]:
current_num = 0
_mod_start = False
_second_mod_start = False
for ion, inten in diff_ion_info:
if '*' in ion:
if not _mod_start:
current_num = 0
_mod_start = True
if '-' in ion:
if _mod_start:
continue
ion_type, ion_num = re.findall('([by])(\d+)', ion)[0]
ion_num = int(ion_num)
re_charge = re.findall('\((\d)\+\)', ion)
if re_charge:
ion_charge = re_charge[0]
else:
ion_charge = '1'
if ion_num <= current_num and '*' in ion:
_second_mod_start = True
continue
if '*' in ion and _second_mod_start:
continue
current_num = ion_num
tag = ion_type
if '*' in ion:
tag += '-ModLoss'
elif '-' in ion:
tag += '-{}'.format(re.findall('-(.+)', ion)[0])
inten_dict[tag] += '{}{}{}+{},{};'.format(ion_type,
ion_num,
'-' + tag.split('-')[1] if '-' in tag else '',
ion_charge,
inten
)
one_psm_data = [spec_name, pep, mod_info, *[inten_dict[_] for _ in ion_type_list]]
return one_psm_data
""" NOTICE This one is for MQ > 1.6, in which the modifications added in the peptide sequence was set as Phospho (STY) but not (ph) in 1.5
def extract_bracket(str_with_bracket):
bracket_start = [left_bracket.start() for left_bracket in re.finditer('\(', str_with_bracket)][::2]
bracket_end = [right_bracket.start() for right_bracket in re.finditer('\)', str_with_bracket)][1::2]
return bracket_start, bracket_end
mod_dict2 = {'M(Oxidation (M))': 'Oxidation[M]',
'Y(Phospho (STY))' : "Phospho[Y]",
'S(Phospho (STY))' : "Phospho[S]",
'T(Phospho (STY))' : "Phospho[T]",}
def pdeep_mod_extraction(mod_pep):
mod_pep = mod_pep.replace('_', '')
modinfo = ''
mod_start, mod_end = extract_bracket(mod_pep)
mod_len = 0
for mod_site in zip(mod_start, mod_end):
mod_type = mod_pep[mod_site[0] - 1: mod_site[1] + 1]# .replace(' ', '')
mod_type = mod_dict2[mod_type]
modinfo += '{mod_site},{mod_type};'.format(mod_site=mod_site[0] - mod_len, mod_type=mod_type)
mod_len += (mod_site[1] - mod_site[0] + 1)
return modinfo
"""
|
py | b401e228653b4aee189fbe9b8926d585732682a8 | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import cvxpy
from cvxpy.expressions.expression import Expression
from cvxpy.atoms.norm_nuc import normNuc
from cvxpy.atoms.sigma_max import sigma_max
from cvxpy.atoms.pnorm import pnorm
from cvxpy.atoms.norm1 import norm1
from cvxpy.atoms.norm_inf import norm_inf
from cvxpy.atoms.affine.vec import vec
def norm(x, p=2, axis=None):
"""Wrapper on the different norm atoms.
Parameters
----------
x : Expression or numeric constant
The value to take the norm of.
p : int or str, optional
The type of norm.
Returns
-------
Expression
An Expression representing the norm.
"""
x = Expression.cast_to_const(x)
# matrix norms take precedence
num_nontrivial_idxs = sum([d > 1 for d in x.shape])
if axis is None and x.ndim == 2:
if p == 1: # matrix 1-norm
return cvxpy.atoms.max(norm1(x, axis=0))
# Frobenius norm
elif p == 'fro' or (p == 2 and num_nontrivial_idxs == 1):
return pnorm(vec(x), 2)
elif p == 2: # matrix 2-norm is largest singular value
return sigma_max(x)
elif p == 'nuc': # the nuclear norm (sum of singular values)
return normNuc(x)
elif p in [np.inf, "inf", "Inf"]: # the matrix infinity-norm
return cvxpy.atoms.max(norm1(x, axis=1))
else:
raise RuntimeError('Unsupported matrix norm.')
else:
if p == 1 or x.is_scalar():
return norm1(x, axis=axis)
elif p in [np.inf, "inf", "Inf"]:
return norm_inf(x, axis)
else:
return pnorm(x, p, axis)
|
py | b401e28785c13ec7ebf67499661aec7aa7258265 | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
def attr_str(value):
""" Convert attribute to string value."""
if isinstance(value, bool):
return "true" if value else "false"
elif isinstance(value, int):
return str(value)
elif isinstance(value, long):
return str(value)
elif isinstance(value, str):
return value
elif isinstance(value, list):
l = []
for v in value: l.append(attr_str(v))
return ",".join(l)
elif value.__class__.__name__ == "TensorShapeProto":
dims = []
for d in value.dim: dims.append(str(d.size))
return "x".join(dims)
elif value.__class__.__name__ == "TensorProto":
return str(value)
elif value.__class__.__name__ == "DType":
return value.name
else:
return str(type(value)) + ":" + str(value).replace('\n', ' ')
class Extractor:
"""Extract myelin flow from tensorflow graph."""
def __init__(self, sess, flow):
"""Initialize empty flow builder."""
self.sess = sess
self.feed = None
self.flow = flow
self.vars = []
self.ops = []
def add(self, func, inputs, outputs):
"""Add ops to flow."""
for var in outputs:
self.expand(func, var, inputs)
def expand(self, func, var, inputs):
"""Traverse graphs and add ops to flow."""
if var not in self.vars:
# Add new variable to flow.
self.vars.append(var)
v = self.flow.var(var.name, var.dtype.base_dtype.name, [])
# Get data for constants and variables.
if var.op.type in ["Const", "ConstV2"]:
v.data = tf.contrib.util.constant_value(var)
elif var.op.type in ["Variable", "VariableV2"]:
if self.feed is None:
v.data = var.eval(session=self.sess)
else:
v.data = self.sess.run(var, feed_dict=self.feed)
# Get shape.
if v.data is None:
shape = var.get_shape()
for d in shape.as_list():
if d != None:
v.shape.append(d)
else:
v.shape.append(-1)
else:
for d in v.data.shape:
v.shape.append(d)
if not var in inputs:
op = var.op
if op not in self.ops:
# Add new operation to flow function.
self.ops.append(op)
o = self.flow.op(op.name)
func.add(o)
o.type = op.type
for input in op.inputs:
o.add_input(self.flow.var(input.name))
for output in op.outputs:
o.add_output(self.flow.var(output.name))
for a in op.node_def.attr:
o.add_attr(a, attr_str(op.get_attr(a)))
# Traverse dependencies.
for dep in op.inputs:
self.expand(func, dep, inputs)
def compute_shapes(self):
"""Compute shapes for variables with missing shape information."""
# Find all variables with missing shape information.
missing = {}
for var in self.vars:
v = self.flow.var(var.name)
if not v.shape_defined():
missing[v] = var
if len(missing) > 0:
# Compute variables from feed.
results = self.sess.run(missing, feed_dict=self.feed)
# Use the shape of the computed variables for the flow.
for v in results:
v.shape = results[v].shape |
py | b401e3ed87e56d88d83c3df6cb75e43a7d4c9114 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/MeasureReport) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class MeasureReport(domainresource.DomainResource):
""" Results of a measure evaluation.
The MeasureReport resource contains the results of the calculation of a
measure; and optionally a reference to the resources involved in that
calculation.
"""
resource_type = "MeasureReport"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.date = None
""" When the report was generated.
Type `FHIRDate` (represented as `str` in JSON). """
self.evaluatedResource = None
""" What data was used to calculate the measure score.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.group = None
""" Measure results for each group.
List of `MeasureReportGroup` items (represented as `dict` in JSON). """
self.identifier = None
""" Additional identifier for the MeasureReport.
List of `Identifier` items (represented as `dict` in JSON). """
self.improvementNotation = None
""" increase | decrease.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.measure = None
""" What measure was calculated.
Type `str`. """
self.period = None
""" What period the report covers.
Type `Period` (represented as `dict` in JSON). """
self.reporter = None
""" Who is reporting the data.
Type `FHIRReference` (represented as `dict` in JSON). """
self.status = None
""" complete | pending | error.
Type `str`. """
self.subject = None
""" What individual(s) the report is for.
Type `FHIRReference` (represented as `dict` in JSON). """
self.type = None
""" individual | subject-list | summary | data-collection.
Type `str`. """
super(MeasureReport, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MeasureReport, self).elementProperties()
js.extend([
("date", "date", fhirdate.FHIRDate, False, None, False),
("evaluatedResource", "evaluatedResource", fhirreference.FHIRReference, True, None, False),
("group", "group", MeasureReportGroup, True, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("improvementNotation", "improvementNotation", codeableconcept.CodeableConcept, False, None, False),
("measure", "measure", str, False, None, True),
("period", "period", period.Period, False, None, True),
("reporter", "reporter", fhirreference.FHIRReference, False, None, False),
("status", "status", str, False, None, True),
("subject", "subject", fhirreference.FHIRReference, False, None, False),
("type", "type", str, False, None, True),
])
return js
from . import backboneelement
class MeasureReportGroup(backboneelement.BackboneElement):
""" Measure results for each group.
The results of the calculation, one for each population group in the
measure.
"""
resource_type = "MeasureReportGroup"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" Meaning of the group.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.measureScore = None
""" What score this group achieved.
Type `Quantity` (represented as `dict` in JSON). """
self.population = None
""" The populations in the group.
List of `MeasureReportGroupPopulation` items (represented as `dict` in JSON). """
self.stratifier = None
""" Stratification results.
List of `MeasureReportGroupStratifier` items (represented as `dict` in JSON). """
super(MeasureReportGroup, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MeasureReportGroup, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("measureScore", "measureScore", quantity.Quantity, False, None, False),
("population", "population", MeasureReportGroupPopulation, True, None, False),
("stratifier", "stratifier", MeasureReportGroupStratifier, True, None, False),
])
return js
class MeasureReportGroupPopulation(backboneelement.BackboneElement):
""" The populations in the group.
The populations that make up the population group, one for each type of
population appropriate for the measure.
"""
resource_type = "MeasureReportGroupPopulation"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" initial-population | numerator | numerator-exclusion | denominator
| denominator-exclusion | denominator-exception | measure-
population | measure-population-exclusion | measure-observation.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.count = None
""" Size of the population.
Type `int`. """
self.subjectResults = None
""" For subject-list reports, the subject results in this population.
Type `FHIRReference` (represented as `dict` in JSON). """
super(MeasureReportGroupPopulation, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MeasureReportGroupPopulation, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("count", "count", int, False, None, False),
("subjectResults", "subjectResults", fhirreference.FHIRReference, False, None, False),
])
return js
class MeasureReportGroupStratifier(backboneelement.BackboneElement):
""" Stratification results.
When a measure includes multiple stratifiers, there will be a stratifier
group for each stratifier defined by the measure.
"""
resource_type = "MeasureReportGroupStratifier"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" What stratifier of the group.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.stratum = None
""" Stratum results, one for each unique value, or set of values, in
the stratifier, or stratifier components.
List of `MeasureReportGroupStratifierStratum` items (represented as `dict` in JSON). """
super(MeasureReportGroupStratifier, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MeasureReportGroupStratifier, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, True, None, False),
("stratum", "stratum", MeasureReportGroupStratifierStratum, True, None, False),
])
return js
class MeasureReportGroupStratifierStratum(backboneelement.BackboneElement):
""" Stratum results, one for each unique value, or set of values, in the
stratifier, or stratifier components.
This element contains the results for a single stratum within the
stratifier. For example, when stratifying on administrative gender, there
will be four strata, one for each possible gender value.
"""
resource_type = "MeasureReportGroupStratifierStratum"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.component = None
""" Stratifier component values.
List of `MeasureReportGroupStratifierStratumComponent` items (represented as `dict` in JSON). """
self.measureScore = None
""" What score this stratum achieved.
Type `Quantity` (represented as `dict` in JSON). """
self.population = None
""" Population results in this stratum.
List of `MeasureReportGroupStratifierStratumPopulation` items (represented as `dict` in JSON). """
self.value = None
""" The stratum value, e.g. male.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MeasureReportGroupStratifierStratum, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MeasureReportGroupStratifierStratum, self).elementProperties()
js.extend([
("component", "component", MeasureReportGroupStratifierStratumComponent, True, None, False),
("measureScore", "measureScore", quantity.Quantity, False, None, False),
("population", "population", MeasureReportGroupStratifierStratumPopulation, True, None, False),
("value", "value", codeableconcept.CodeableConcept, False, None, False),
])
return js
class MeasureReportGroupStratifierStratumComponent(backboneelement.BackboneElement):
""" Stratifier component values.
A stratifier component value.
"""
resource_type = "MeasureReportGroupStratifierStratumComponent"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" What stratifier component of the group.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.value = None
""" The stratum component value, e.g. male.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(MeasureReportGroupStratifierStratumComponent, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MeasureReportGroupStratifierStratumComponent, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, True),
("value", "value", codeableconcept.CodeableConcept, False, None, True),
])
return js
class MeasureReportGroupStratifierStratumPopulation(backboneelement.BackboneElement):
""" Population results in this stratum.
The populations that make up the stratum, one for each type of population
appropriate to the measure.
"""
resource_type = "MeasureReportGroupStratifierStratumPopulation"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.code = None
""" initial-population | numerator | numerator-exclusion | denominator
| denominator-exclusion | denominator-exception | measure-
population | measure-population-exclusion | measure-observation.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.count = None
""" Size of the population.
Type `int`. """
self.subjectResults = None
""" For subject-list reports, the subject results in this population.
Type `FHIRReference` (represented as `dict` in JSON). """
super(MeasureReportGroupStratifierStratumPopulation, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(MeasureReportGroupStratifierStratumPopulation, self).elementProperties()
js.extend([
("code", "code", codeableconcept.CodeableConcept, False, None, False),
("count", "count", int, False, None, False),
("subjectResults", "subjectResults", fhirreference.FHIRReference, False, None, False),
])
return js
import sys
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period']
try:
from . import quantity
except ImportError:
quantity = sys.modules[__package__ + '.quantity']
|
py | b401e3f73ade224a4885feafd7803f9027a912d6 | import numpy as np
import soundfile as sf
from scipy.io.wavfile import read
from scipy import interpolate
from herpetologist import check_type
def resample(data, old_samplerate, new_samplerate):
"""
Resample signal.
Parameters
----------
data: np.array
old_samplerate: int
old sample rate.
new_samplerate: int
new sample rate.
Returns
-------
result: data
"""
old_audio = data
duration = data.shape[0] / old_samplerate
time_old = np.linspace(0, duration, old_audio.shape[0])
time_new = np.linspace(
0, duration, int(old_audio.shape[0] * new_samplerate / old_samplerate)
)
interpolator = interpolate.interp1d(time_old, old_audio.T)
data = interpolator(time_new).T
return data
def read_audio(data, old_samplerate, sample_rate=22050):
if len(data.shape) == 2:
data = data[:, 0]
if old_samplerate != sample_rate and sample_rate is not None:
data = resample(data, old_samplerate, sample_rate)
else:
sample_rate = old_samplerate
return data, sample_rate
@check_type
def load(file: str, sr=16000, scale: bool = True):
"""
Read sound file, any format supported by soundfile.read
Parameters
----------
file: str
sr: int, (default=16000)
new sample rate. If input sample rate is not same, will resample automatically.
scale: bool, (default=True)
Scale to -1 and 1.
Returns
-------
result: (y, sr)
"""
data, old_samplerate = sf.read(file)
y, sr = read_audio(data, old_samplerate, sr)
if scale:
y = y / (np.max(np.abs(y)) + 1e-9)
return y, sr
|
py | b401e4c043e2abccf371a616716fefbd25783b85 | """ Implementation of the most-frequent answer (MFA) model which predicts responses based on the
most-frequently selected choice from the available background (training) data.
"""
import ccobra
import numpy as np
import copy
from bidict import bidict
import random
'''
Mental Model to guess generalized syllogisms answers. Some Definitions:
Task (encoded as [Quantifier1][Quantifier2][Figure])
All: A
Most: T
Most not: D
Some: I
Some not: O
Few: B
No: E
'''
class CustomModel(ccobra.CCobraModel):
# Conversion Functions
@staticmethod
def addall(s, elements):
for e in elements:
if not (e in s):
s.append(e)
def conversion_predict(self, item, **kwargs):
reverse_first_premise = True if random.random() < self.params["reverse_first_premise"] else False
reverse_second_premise = True if random.random() < self.params["reverse_second_premise"] else False
proposition1 = item.task[0]
proposition2 = item.task[1]
premises1 = [proposition1]
premises2 = [proposition2]
if reverse_first_premise and random.random() < self.params[proposition1[0]]:
premises1.append([proposition1[0], proposition1[2], proposition1[1]])
if reverse_second_premise and random.random() < self.params[proposition2[0]]:
premises2.append([proposition2[0], proposition2[2], proposition2[1]])
if item.task[0][1] == item.task[1][1]:
a = item.task[0][2]
b = item.task[0][1]
c = item.task[1][2]
elif item.task[0][1] == item.task[1][2]:
a = item.task[0][2]
b = item.task[0][1]
c = item.task[1][1]
elif item.task[0][2] == item.task[1][1]:
a = item.task[0][1]
b = item.task[0][2]
c = item.task[1][2]
else:
a = item.task[0][1]
b = item.task[0][2]
c = item.task[1][1]
predictions = []
for p1 in premises1:
for p2 in premises2:
if p1 == ["All", a, b]:
if p2 == ["All", b, c]:
self.addall(predictions, [["All", a, c], ["Some", a, c], ["Some", c, a]])
elif p2 in [["No", b, c], ["No", c, b]]:
self.addall(predictions, [["No", a, c], ["No", c, a], ["Some not", a, c], ["Some not", c, a]])
elif p2 in [["Some not", c, b], ["Few", c, b], ["Most", c, b], ["Few not", c, b], ["Most not", c, b]]:
self.addall(predictions, [["Some not", c, a]])
elif p1 == ["All", b, a]:
if p2 == ["All", c, b]:
self.addall(predictions, [["All", a, c], ["Some", a, c], ["Some", c, a]])
elif p2 in [["All", b, c], ["Some", c, b], ["Some", b, c]]:
self.addall(predictions, [["Some", a, c], ["Some", c, a]])
elif p2 in [["No", c, b], ["No", b, c], ["Some not", b, c]]:
self.addall(predictions, [["Some not", a, c]])
elif p2 in [["Few", b, c], ["Most", b, c], ["Few not", b, c], ["Most not", b, c]]:
self.addall(predictions, [["Some", a, c], ["Some", c, a], ["Some not", a, c]])
elif p2 in [["Few", c, b], ["Most not", c, b]]:
self.addall(predictions, [["Few", c, a], ["Some", a, c], ["Some", c, a], ["Most not", c, a],
["Some not", c, a]])
elif p2 in [["Most", c, b], ["Few not", c, b]]:
self.addall(predictions, [["Most", c, a], ["Some", a, c], ["Some", c, a], ["Few not", c, a],
["Some not", c, a]])
elif p1 == ["Some", a, b]:
if p2 == ["All", b, c]:
self.addall(predictions, [["Some", a, c], ["Some", c, a]])
elif p2 in [["No", b, c], ["No", c, b]]:
self.addall(predictions, [["Some not", a, c]])
elif p1 == ["Some", b, a]:
if p2 == ["All", b, c]:
self.addall(predictions, [["Some", a, c], ["Some", c, a]])
elif p2 in [["No", c, b], ["No", b, c]]:
self.addall(predictions, [["Some not", a, c]])
elif p1[0] == "No":
if p2 == ["All", c, b]:
self.addall(predictions, [["No", c, a], ["No", a, c], ["Some not", a, c], ["Some not", c, a]])
elif p2 == ["All", b, c] or p2[0] in ["Some", "Few", "Most", "Most not", "Few not"]:
self.addall(predictions, [["Some not", c, a]])
elif p1 == ["Some not", a, b]:
if p2 == ["All", c, b]:
self.addall(predictions, [["Some not", a, c]])
elif p1 == ["Some not", b, a]:
if p2 == ["All", b, c]:
self.addall(predictions, [["Some not", c, a]])
elif p1 in [["Few", a, b], ["Most Not", a, b]]:
if p2 == ['All', b, c]:
self.addall(predictions, [["Few", a, c], ["Some", a, c], ["Some", c, a], ["Some not", a, c],
["Most not", a, c]])
elif p2 == ['All', c, b] or p2[0] == 'No':
self.addall(predictions, [["Some not", a, c]])
elif p1 in [["Few", b, a], ["Most not", b, a]]:
if p2 == ["All", b, c]:
self.addall(predictions, [["Some", a, c], ["Some", c, a], ["Some not", c, a]])
elif p2 in [["Most", b, c], ["Few not", b, c]]:
self.addall(predictions, [["Some not", c, a]])
elif p1 in [["Most", a, b], ["Few not", a, b]]:
if p2 == ['All', b, c]:
self.addall(predictions, [["Most", a, c], ["Some", a, c], ["Some", c, a], ["Few not", a, c],
["Some not", a, c]])
elif p2 == ['All', c, b] or p2[0] == "No":
self.addall(predictions, [["Some not", a, c]])
elif p1 == [["Most", b, a], ["Few not", b, a]]:
if p2 == ["All", b, c]:
self.addall(predictions, [["Some", a, c], ["Some", c, a], ["Some not", c, a]])
elif p2 in [["Most", b, c], ["Few not", b, c]]:
self.addall(predictions, [["Some", a, c], ["Some", c, a]])
elif p2 in [["Most not", b, c], ["Few", b, c]]:
self.addall(predictions, [["Some not", a, c]])
for p in predictions:
if item.task[0][0] in p[0] or item.task[1][0] in p[0]:
return p
for p in predictions:
if p[0] == "Some":
return p
# NVC
if [["NVC"]] in item.choices:
return ["NVC"]
else:
return random.choices(item.choices)
# Matching Functions
def get_conclusion_mood(self, item):
"""computes the most conservative moods of a task."""
most_conservative_rank = max(self.mood_to_rank[item.task[0][0]], self.mood_to_rank[item.task[1][0]])
conclusion_mood = self.rank_to_mood[most_conservative_rank]
return conclusion_mood
def get_conclusion_terms(self, item):
"""extracts the two elements of the premises that are used for the conclusion, aka. removes the "connection"."""
elements = [item.task[0][1], item.task[0][2], item.task[1][1], item.task[1][2]]
connecting_element = None
valid = True
for i in range(1, 3):
for j in range(1, 3):
if item.task[0][i] == item.task[1][j]:
connecting_element = item.task[1][j]
for removals in range(2):
elements.remove(connecting_element)
if not connecting_element:
print("Found no connecting element in task {}".format(item.task))
valid = False
return elements, valid
def build_conclusion(self, conclusion_mood, elements):
"""uses the given mood and elements to build all possible conclusions according to our Matching hypothesis"""
possible_conclusions = []
for mood in conclusion_mood:
possible_conclusions.append([mood, elements[0], elements[1]])
possible_conclusions.append([mood, elements[1], elements[0]])
return possible_conclusions
def matching_predict(self, item, **kwargs):
"""Predict the responses based on the extension of the students of the Matching hypothesis to generalized quantifiers"""
elements, is_valid = self.get_conclusion_terms(item)
conclusion_mood = self.get_conclusion_mood(item)
possible_conclusions = self.build_conclusion(conclusion_mood, elements)
conclusion_list = []
for poss in item.choices:
conclusion_list.append(poss[0])
for computed_conclusion in possible_conclusions:
if computed_conclusion not in conclusion_list:
possible_conclusions.remove(computed_conclusion)
if len(possible_conclusions) == 0:
return ['NVC']
return random.choice(possible_conclusions)
# PMM Functions
def sharing_rows(self, model, x, y, invert_row = None):
copy_model = copy.copy(model)
if invert_row:
copy_model[:, invert_row] = np.invert(copy_model[:, invert_row])
count = 0
for row in copy_model:
if row[x] and row[y]:
count += 1
return count
def axiom_all(self, model, x, y):
return (model[:, x].sum() != 0 and
model[:, y].sum() != 0 and
model[:, x].sum() == self.sharing_rows(model, x, y)) #and
#model[:, y].sum() == self.sharing_rows(model, x, y))
def axiom_some(self, model, x, y):
return (self.sharing_rows(model, x, y) != 0 and
self.sharing_rows(model, x, y) == self.sharing_rows(model, x, y, invert_row=y))
def axiom_no(self, model, x, y):
return (self.sharing_rows(model, x, y) == 0 and
model[:, x].sum != 0 and
model[:, y].sum != 0)
def axiom_some_not(self, model, x, y):
return (0 != self.sharing_rows(model, x, y, invert_row=y) and
self.sharing_rows(model, x, y) == self.sharing_rows(model, x, y, invert_row=y))
def axiom_most(self, model, x, y):
return (self.sharing_rows(model, x, y, invert_row=y) < self.sharing_rows(model, x, y) and
0 != self.sharing_rows(model, x, y, invert_row=y))
def axiom_few(self, model, x, y):
return (self.sharing_rows(model, x, y, invert_row=y) > self.sharing_rows(model, x, y) and
0 != self.sharing_rows(model, x, y))
def fill_first_premise(self, model, quantifier, instance_a, instance_b):
if quantifier == 'All':
model[0, instance_a] = 1
model[0, instance_b] = 1
elif quantifier == 'Most':
model[0:3, instance_a] = 1
model[0:2, instance_b] = 1
elif quantifier == 'Few' or quantifier == 'Most not':
model[0:3, instance_a] = 1
model[0, instance_b] = 1
elif quantifier == 'Some' or quantifier == 'Some not':
model[0:2, instance_a] = 1
model[0, instance_b] = 1
model[2, instance_b] = 1
else:
model[0, instance_a] = 1
model[1, instance_b] = 1
return model
def fill_second_premise(self, model, quantifier, instance_b, instance_c, instance_a):
if quantifier == 'All':
for i in range(model.shape[0]):
if model[i, instance_b] == 1:
model[i, instance_c] = 1
num_a = model[:, instance_a].sum()
if quantifier == 'Most':
for i in range(num_a, model.shape[0]):
if model[:, instance_b].sum() == 3:
break
if model[i, instance_b] == 0:
model[i, instance_b] = 1
for i in range(model.shape[0]):
if model[:, instance_c].sum() == 2:
break
if model[i, instance_b] == 1:
model[i, instance_c] = 1
if quantifier == 'Few' or quantifier == 'Most not':
for i in range(num_a, model.shape[0]):
if model[:, instance_b].sum() == 3:
break
if model[i, instance_b] == 0:
model[i, instance_b] = 1
for i in range(model.shape[0]):
if model[:, instance_c].sum() == 1:
break
if model[i, instance_b] == 1:
model[i, instance_c] = 1
if quantifier == 'Some' or quantifier == 'Some not':
for i in range(num_a, model.shape[0]):
if model[:, instance_b].sum() == 2:
break
if model[i, instance_b] == 0:
model[i, instance_b] = 1
for i in range(model.shape[0]):
if model[i, instance_b] == 1:
model[i, instance_c] = 1
break
if quantifier == 'No':
for i in range(model.shape[0]):
if model[i, instance_b] == 0:
model[i, instance_c] = 1
break
return model
def pmm_predict(self, item, **kwargs):
model = np.zeros((6, 3), dtype=bool)
syl = ccobra.syllogistic_generalized.GeneralizedSyllogism(item)
instances = bidict({syl.A: 0, syl.B: 1, syl.C: 2})
# Fill model with first premise
q1, inst1_1_str, inst1_2_str = syl.p1
inst1_1 = instances[inst1_1_str]
inst1_2 = instances[inst1_2_str]
model = self.fill_first_premise(model, q1, inst1_1, inst1_2)
# Fill model with second premise
q2, inst2_1_str, inst2_2_str, = syl.p2
inst2_1 = instances[inst2_1_str]
inst2_2 = instances[inst2_2_str]
model = self.fill_second_premise(model, q2, inst2_1, inst2_2, 0)
for quantifier in self.axioms:
if self.axioms[quantifier](model, 0, 2):
if [[quantifier, syl.A, syl.C]] in item.choices:
return [[quantifier, syl.A, syl.C]]
elif [[quantifier, syl.C, syl.A]] in item.choices:
return [[quantifier, syl.C, syl.A]]
if [['NVC']] in item.choices:
return [['NVC']]
# Custom functions
def encode_item(self, item):
return ccobra.syllogistic_generalized.GeneralizedSyllogism(item)
def rank_encoded_task(self, q1, q2, fig):
if q1 == q2:
return 0
difficulty = 1
if q1 not in ['A', 'E']:
difficulty += 1
if q2 not in ['A', 'E']:
difficulty += 1
if fig == '2':
difficulty += 1
if fig == '3':
difficulty +=2
return difficulty
def __init__(self, name='Custom'):
super(CustomModel, self).__init__(name, ['syllogistic-generalized'], ['single-choice'])
# Conversion
self.params = {
"reverse_first_premise": 0.2,
"reverse_second_premise": 0.2,
"All": 0.4,
"No": 0,
"Some": 0,
"Some not": 0.4,
"Most": 0.4,
"Few": 0.4,
"Most not": 0.4,
"Few not": 0.4
}
# PartNeg Rules
self.nvc_answered = False
# Matching
self.mood_to_rank = {'No': 6, 'Most not': 5, 'Some not': 4, 'Some': 3, 'Few': 2, 'Most': 1, 'All': 0}
self.rank_to_mood = {6: ['No'], 5: ['Most not'], 4: ['Some not'], 3: ['Some'], 2: ['Few'], 1: ['Most'], 0: ['All']}
# PMM
self.axioms = {
'All': self.axiom_all,
'No': self.axiom_no,
'Some not': self.axiom_some_not,
'Some': self.axiom_some,
'Few': self.axiom_few,
'Most': self.axiom_most,
'Most not': self.axiom_few
}
# List ranking a question's difficulty and most frequent answer
quantifiers = ['A', 'T', 'D', 'I', 'O', 'B', 'E']
combinations = [x+y+z for x in quantifiers for y in quantifiers for z in ['1', '2', '3', '4']]
self.rank = {}
for entry in combinations:
self.rank[entry] = self.rank_encoded_task(entry[0], entry[1], entry[2])
def pre_train(self, dataset, **kwargs):
""" No custom pretrained since we're using other models
"""
def predict(self, item, **kwargs):
""" Generate prediction based on difficulty and using other models
"""
#Using the rules for NVC
syl = ccobra.syllogistic_generalized.GeneralizedSyllogism(item)
if self.nvc_answered and item.task[0][0] != 'All' and item.task[1][0] != 'All':
return syl.decode_response('NVC')
syl = self.encode_item(item)
enc_choices = [syl.encode_response(x) for x in item.choices]
difficulty = self.rank[syl.encoded_task]
if difficulty == 0:
# Use conversion
return self.conversion_predict(item)
if difficulty == 1 or difficulty == 2:
# Use matching (3 since it delivered best results)
return self.matching_predict(item)
elif difficulty >= 3:
# Use pmm
return self.pmm_predict(item)
else:
# Return a random answer
return syl.decode_response(np.random.choice(enc_choices))
def adapt(self, item, truth, **kwargs):
""" Just used to check if the participant has already answered nvc at least once
"""
syl = ccobra.syllogistic_generalized.GeneralizedSyllogism(item)
task_enc = syl.encoded_task
true = syl.encode_response(truth)
if true == "NVC":
self.nvc_answered = True
|
py | b401e5006c135a430bdefd3112c35f13ce20a630 | class ABC():
class_var = 0
def __init__(self,var):
ABC.class_var += 1
self.var = var
print("object value is :",var)
print("The value of the class variable is :",ABC.class_var)
def __del__(self):
ABC.class_var -= 1
print("Object with value %d is going out of scope"%self.var)
obj1 = ABC(10)
obj2 = ABC(20)
obj3 = ABC(30)
del obj1
del obj2
del obj3
|
py | b401e50c2b34a988c70f837658816c23af41290d | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class LoadBalancerBackendAddressPoolsOperations:
"""LoadBalancerBackendAddressPoolsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
load_balancer_name: str,
**kwargs: Any
) -> AsyncIterable["_models.LoadBalancerBackendAddressPoolListResult"]:
"""Gets all the load balancer backed address pools.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LoadBalancerBackendAddressPoolListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.LoadBalancerBackendAddressPoolListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LoadBalancerBackendAddressPoolListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('LoadBalancerBackendAddressPoolListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools'} # type: ignore
async def get(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
**kwargs: Any
) -> "_models.BackendAddressPool":
"""Gets load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address pool.
:type backend_address_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackendAddressPool, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.BackendAddressPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackendAddressPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
parameters: "_models.BackendAddressPool",
**kwargs: Any
) -> "_models.BackendAddressPool":
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackendAddressPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'BackendAddressPool')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
parameters: "_models.BackendAddressPool",
**kwargs: Any
) -> AsyncLROPoller["_models.BackendAddressPool"]:
"""Creates or updates a load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address pool.
:type backend_address_pool_name: str
:param parameters: Parameters supplied to the create or update load balancer backend address
pool operation.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.BackendAddressPool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BackendAddressPool or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.BackendAddressPool]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackendAddressPool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
backend_address_pool_name=backend_address_pool_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('BackendAddressPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
load_balancer_name: str,
backend_address_pool_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified load balancer backend address pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param load_balancer_name: The name of the load balancer.
:type load_balancer_name: str
:param backend_address_pool_name: The name of the backend address pool.
:type backend_address_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
load_balancer_name=load_balancer_name,
backend_address_pool_name=backend_address_pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'loadBalancerName': self._serialize.url("load_balancer_name", load_balancer_name, 'str'),
'backendAddressPoolName': self._serialize.url("backend_address_pool_name", backend_address_pool_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/backendAddressPools/{backendAddressPoolName}'} # type: ignore
|
py | b401e55e04024d241e086f1a8ae095f226764e42 | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class WemLearningAssignmentRuleRunTopicWemLearningAssignmentsCreated(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
WemLearningAssignmentRuleRunTopicWemLearningAssignmentsCreated - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'module': 'WemLearningAssignmentRuleRunTopicLearningModuleReference'
}
self.attribute_map = {
'module': 'module'
}
self._module = None
@property
def module(self):
"""
Gets the module of this WemLearningAssignmentRuleRunTopicWemLearningAssignmentsCreated.
:return: The module of this WemLearningAssignmentRuleRunTopicWemLearningAssignmentsCreated.
:rtype: WemLearningAssignmentRuleRunTopicLearningModuleReference
"""
return self._module
@module.setter
def module(self, module):
"""
Sets the module of this WemLearningAssignmentRuleRunTopicWemLearningAssignmentsCreated.
:param module: The module of this WemLearningAssignmentRuleRunTopicWemLearningAssignmentsCreated.
:type: WemLearningAssignmentRuleRunTopicLearningModuleReference
"""
self._module = module
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | b401e5d0924cf9b89dc050045a9a5b3bf9d02422 | # coding: utf-8
"""
Statuspage API
# Code of Conduct Please don't abuse the API, and please report all feature requests and issues to https://help.statuspage.io/help/contact-us-30 # Rate Limiting Each API token is limited to 1 request / second as measured on a 60 second rolling window. To get this limit increased or lifted, please contact us at https://help.statuspage.io/help/contact-us-30 # Basics ## HTTPS It's required ## URL Prefix In order to maintain version integrity into the future, the API is versioned. All calls currently begin with the following prefix: https://api.statuspage.io/v1/ ## RESTful Interface Wherever possible, the API seeks to implement repeatable patterns with logical, representative URLs and descriptive HTTP verbs. Below are some examples and conventions you will see throughout the documentation. * Collections are buckets: https://api.statuspage.io/v1/pages/asdf123/incidents.json * Elements have unique IDs: https://api.statuspage.io/v1/pages/asdf123/incidents/jklm456.json * GET will retrieve information about a collection/element * POST will create an element in a collection * PATCH will update a single element * PUT will replace a single element in a collection (rarely used) * DELETE will destroy a single element ## Sending Data Information can be sent in the body as form urlencoded or JSON, but make sure the Content-Type header matches the body structure or the server gremlins will be angry. All examples are provided in JSON format, however they can easily be converted to form encoding if required. Some examples of how to convert things are below: // JSON { \"incident\": { \"name\": \"test incident\", \"components\": [\"8kbf7d35c070\", \"vtnh60py4yd7\"] } } // Form Encoded (using curl as an example): curl -X POST https://api.statuspage.io/v1/example \\ -d \"incident[name]=test incident\" \\ -d \"incident[components][]=8kbf7d35c070\" \\ -d \"incident[components][]=vtnh60py4yd7\" # Authentication <!-- ReDoc-Inject: <security-definitions> --> # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import spio
from spio.models.post_pages_page_id_incidents_incident_components import PostPagesPageIdIncidentsIncidentComponents # noqa: E501
from spio.rest import ApiException
class TestPostPagesPageIdIncidentsIncidentComponents(unittest.TestCase):
"""PostPagesPageIdIncidentsIncidentComponents unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPostPagesPageIdIncidentsIncidentComponents(self):
"""Test PostPagesPageIdIncidentsIncidentComponents"""
# FIXME: construct object with mandatory attributes with example values
# model = spio.models.post_pages_page_id_incidents_incident_components.PostPagesPageIdIncidentsIncidentComponents() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | b401e6433d4bf0201fb08b047dd220f97d387282 | def isUnique(test_str):
test_str=list(test_str)
test_str.sort()
for i in range(0,len(test_str)-1):
if test_str[i]==test_str[i+1]:
return False
return True
#complexity is o(n)
def isUniqueBitVector(test_str):
if(len(test_str)>126):
return False
'''suppose we are considering that 26 caharacter are allowed in shtroi'''
#we can make that kind of hasing too ut we didint whnt to
test_str_list=list(test_str)
test_str_words_dict={}
#for optimisation we are going to use bit vector
#and remove the workin of dictionary
elem_occurance=0
for elem in test_str_list:
index=(ord(elem)-ord('a'))
if (elem_occurance&(1<<index))>0:
return False
else:
elem_occurance|=(1<index)
return True
#code complexity o(n) and
if __name__=="__main__":
test_string1="kuldeepparashar"
test_string2="ajioklm"
print("test_case1",isUnique(test_string1))
print("testcase2",isUnique(test_string2))
print("test_case1",isUniqueBitVector(test_string1))
print("testcase2",isUniqueBitVector(test_string2))
|
py | b401e73637e391ede88ff90d97a24cf5408a5896 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-20 01:18
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('checkout', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BaseVoucher',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(blank=True, help_text='Leave blank to auto-generate', max_length=32, unique=True)),
('created', models.DateTimeField(auto_now_add=True)),
('limit', models.PositiveSmallIntegerField(blank=True, null=True)),
('minimum_spend', models.PositiveIntegerField(default=0)),
],
),
migrations.CreateModel(
name='Discount',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=2, max_digits=6)),
],
),
migrations.CreateModel(
name='FixedVoucher',
fields=[
('basevoucher_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='vouchers.BaseVoucher')),
('amount', models.DecimalField(decimal_places=2, max_digits=6)),
('order_line', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, to='checkout.OrderLine')),
],
bases=('vouchers.basevoucher',),
),
migrations.CreateModel(
name='FreeShippingVoucher',
fields=[
('basevoucher_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='vouchers.BaseVoucher')),
],
bases=('vouchers.basevoucher',),
),
migrations.CreateModel(
name='PercentageVoucher',
fields=[
('basevoucher_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='vouchers.BaseVoucher')),
('amount', models.PositiveSmallIntegerField(validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)])),
],
bases=('vouchers.basevoucher',),
),
migrations.AddField(
model_name='discount',
name='base_voucher',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='vouchers.BaseVoucher'),
),
migrations.AddField(
model_name='discount',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='checkout.Order'),
),
]
|
py | b401e8fa343be9404c1ead427e265485c30646af | from __future__ import print_function
import json
import os
from os.path import join, dirname
from watson_developer_cloud import SpeechToTextV1
from secret import myusername, mypassword
speech_to_text = SpeechToTextV1(
username = myusername,
password = mypassword,
x_watson_learning_opt_out=False
)
#print(json.dumps(speech_to_text.models(), indent=2))
#print(json.dumps(speech_to_text.get_model('en-US_BroadbandModel'), indent=2))
fileArray = [];
for files in os.listdir("./mp3"):
fileArray.append(files)
print("================================")
print("Total files: " + str(len(fileArray)))
print("================================")
print("Started")
print("================================")
for i in range(0, len(fileArray)):
with open(join(dirname(__file__), './mp3/' + fileArray[i]),'rb') as audio_file:
print("Currently on file no: " + str(i))
with open(str(i) + '.txt','w') as newJson:
try:
newJson.write(json.dumps(speech_to_text.recognize(audio_file, content_type='audio/mp3', timestamps=True, word_confidence=True), indent=2))
except:
continue
print("Done with file " + str(i));
print("Remaining files: " + str(fileArray - i))
print("================================")
|
py | b401eb3df7a97f74683e89f9459717bdc8d7089c | from sqlalchemy import Column, Integer, ForeignKey, Boolean, UniqueConstraint
from sqlalchemy.orm import relationship
from app.models.model_base import BareBaseModel
class ClinicHistory(BareBaseModel):
__tablename__ = 'clinic_history'
clinic_id = Column(Integer, ForeignKey("clinic.id"))
history_id = Column(Integer, ForeignKey("history.id"))
clinic = relationship('Clinic', remote_side='Clinic.id')
history_medical = relationship('History', remote_side='History.id')
|
py | b401ebf01b1a84761b9cebf00cb01dc2de4ea265 | # Given some integer, find the maximal number you can
# obtain by deleting exactly one digit of the given number.
def deleteDigit(n):
n = str(n)
last_strang = 0
for i in range(len(n)):
strang = ''
for item in range(len(n)):
if n.index(n[item],item) != n.index(n[i],i):
strang+=n[item]
if last_strang < int(strang):
last_strang = int(strang)
strang = ''
return last_strang |
py | b401ed12b6221b7c00274768403f688d371e33c0 | import tvm
from ...hw_abstraction import (
HardwareAbstraction,
register_abstraction,
MemoryAbstraction,
ComputeAbstraction,
ElementwiseComputeAbstraction,
ElementwiseMemoryAbstraction,
)
from ..hw_abs_dag_base import (
HardwareAbstractionDAG,
register_hw_abs_dag
)
from ..hw_abs_dag_base import InstructionScope
class WMMABaseHwAbsDAG(HardwareAbstractionDAG):
scope = InstructionScope.warp
def get_name(self):
raise NotImplementedError()
def get_all_compute_keys(self):
raise NotImplementedError()
def get_all_shape_keys(self):
raise NotImplementedError()
def get_main_compute_expression(self, compute_key, shape_key):
"""
---
Returns:
inputs, outputs: list of tvm.te.tensor.Tensor
the compute expression can be tracked
through [output.op.body for output in outputs]
"""
tA, tB, tC = [x == "t" for x in compute_key]
hw_abs_class = self.hw_abs_dict[self.main_hw_abs_name]
hw_abs = hw_abs_class(self.get_name())
problem_size = self.get_problem_size(shape_key)
return hw_abs.get_compute_expression(
self.input_dtypes[self.main_hw_abs_name],
self.output_dtypes[self.main_hw_abs_name],
problem_size,
trans_A=tA,
trans_B=tB,
trans_C=tC,
)
def get_hw_abs_compute_expression(self, compute_key, shape_key, hw_abs_key):
"""
---
Returns:
inputs, outputs: list of tvm.te.tensor.Tensor
the compute expression can be tracked
through [output.op.body for output in outputs]
"""
tA, tB, tC = [x == "t" for x in compute_key]
hw_abs_class = self.hw_abs_dict[hw_abs_key]
hw_abs = hw_abs_class(self.get_name())
problem_size = self.get_problem_size(shape_key)
m, n, k = problem_size
A_shape = (m, k) if not tA else (k, m)
B_shape = (k, n) if not tB else (n, k)
C_shape = (m, n) if not tC else (m, n)
if hw_abs_key == "mma":
return hw_abs.get_compute_expression(
self.input_dtypes[hw_abs_key],
self.output_dtypes[hw_abs_key],
problem_size,
trans_A=tA,
trans_B=tB,
trans_C=tC,
)
elif hw_abs_key == "load_a":
return hw_abs.get_compute_expression(
[A_shape],
[A_shape],
self.input_dtypes[hw_abs_key],
self.output_dtypes[hw_abs_key],
problem_size,
)
elif hw_abs_key == "load_b":
return hw_abs.get_compute_expression(
[B_shape],
[B_shape],
self.input_dtypes[hw_abs_key],
self.output_dtypes[hw_abs_key],
problem_size,
)
elif hw_abs_key == "store":
return hw_abs.get_compute_expression(
[C_shape],
[C_shape],
self.input_dtypes[hw_abs_key],
self.output_dtypes[hw_abs_key],
problem_size,
)
else:
raise RuntimeError("Unknown HW abstraction key: %s" % hw_abs_key)
def get_standalone_hw_abs_compute_expression(self, compute_key, shape_key, hw_abs_key):
return self.get_hw_abs_compute_expression(compute_key, shape_key, hw_abs_key)
def get_dag_compute_expression_with_inputs(
self, compute_key, shape_key, hw_abs_keys, read_graph
):
"""
---
Returns:
inputs, outputs: list of tvm.te.tensor.Tensor
the compute expression can be tracked
through [output.op.body for output in outputs]
"""
assert len(hw_abs_keys) > 0
tA, tB, tC = [x == "t" for x in compute_key]
problem_size = self.get_problem_size(shape_key)
m, n, k = problem_size
A_shape = (m, k) if not tA else (k, m)
B_shape = (k, n) if not tB else (n, k)
C_shape = (m, n) if not tC else (m, n)
cache = {
"a": tvm.te.placeholder(A_shape, name="A", dtype=self.input_dtypes["a"][0]),
"b": tvm.te.placeholder(B_shape, name="B", dtype=self.input_dtypes["b"][0]),
}
dag_inputs = []
dag_outputs = []
def helper(hw_abs_key):
if hw_abs_key in cache:
return
hw_abs_class = self.hw_abs_dict[hw_abs_key]
hw_abs = hw_abs_class(self.get_name())
if hw_abs_key in read_graph:
inputs = []
for parent in read_graph[hw_abs_key]:
helper(parent)
assert parent in cache
inputs.extend(cache[parent])
if hw_abs_key == "mma":
_, ret = hw_abs.get_compute_expression_with_inputs(
inputs,
self.input_dtypes[hw_abs_key],
self.output_dtypes[hw_abs_key],
problem_size,
trans_A=tA,
trans_B=tB,
trans_C=tC,
)
elif hw_abs_key == "load_a":
_, ret = hw_abs.get_compute_expression_with_inputs(
inputs,
[A_shape],
[A_shape],
self.input_dtypes[hw_abs_key],
self.output_dtypes[hw_abs_key],
problem_size,
)
elif hw_abs_key == "load_b":
_, ret = hw_abs.get_compute_expression_with_inputs(
inputs,
[B_shape],
[B_shape],
self.input_dtypes[hw_abs_key],
self.output_dtypes[hw_abs_key],
problem_size,
)
elif hw_abs_key == "store":
_, ret = hw_abs.get_compute_expression_with_inputs(
inputs,
[C_shape],
[C_shape],
self.input_dtypes[hw_abs_key],
self.output_dtypes[hw_abs_key],
problem_size,
)
else:
raise RuntimeError("Unknown HW abstraction key: %s" % hw_abs_key)
else:
tmp, ret = self.get_standalone_hw_abs_compute_expression(compute_key, shape_key, hw_abs_key)
dag_inputs.extend(tmp)
cache[hw_abs_key] = ret
for hw_abs_key in hw_abs_keys:
helper(hw_abs_key)
assert hw_abs_key in cache
dag_outputs.extend(cache[hw_abs_key])
return dag_inputs, dag_outputs, cache
def get_hw_abs_compute_expression_with_shape(
self, compute_key, shape_key, hw_abs_key, input_shapes, output_shapes
):
"""
---
Returns:
inputs, outputs: list of tvm.te.tensor.Tensor
the compute expression can be tracked
through [output.op.body for output in outputs]
"""
hw_abs_class = self.hw_abs_dict[hw_abs_key]
hw_abs = hw_abs_class(self.get_name())
problem_size = self.get_problem_size(shape_key)
if hw_abs_key == "mma":
raise RuntimeError("Can't get expression with customized shape for main HW abstraction.")
elif hw_abs_key == "load_a":
assert len(input_shapes) == 1
assert len(output_shapes) == 1
for ii, io in zip(input_shapes, output_shapes):
assert ii == io
return hw_abs.get_compute_expression(
input_shapes,
output_shapes,
self.input_dtypes[hw_abs_key],
self.output_dtypes[hw_abs_key],
problem_size,
)
elif hw_abs_key == "load_b":
assert len(input_shapes) == 1
assert len(output_shapes) == 1
for ii, io in zip(input_shapes, output_shapes):
assert ii == io
return hw_abs.get_compute_expression(
input_shapes,
output_shapes,
self.input_dtypes[hw_abs_key],
self.output_dtypes[hw_abs_key],
problem_size,
)
elif hw_abs_key == "store":
assert len(input_shapes) == 1
assert len(output_shapes) == 1
for ii, io in zip(input_shapes, output_shapes):
assert ii == io
return hw_abs.get_compute_expression(
input_shapes,
output_shapes,
self.input_dtypes[hw_abs_key],
self.output_dtypes[hw_abs_key],
problem_size,
)
else:
raise RuntimeError("Unknown HW abstraction key: %s" % hw_abs_key)
def get_problem_size(self, shape_key):
"""
---
Returns:
input_shapes, output_shapes: list of list/tuple of int
"""
m, n, k = [int(x) for x in shape_key.split("x")]
return [m, n, k]
def get_intrinsic(self, compute_key, shape_key, hw_abs_key, **kwargs):
"""
---
Returns:
tvm.te.TensorIntrin
"""
tA, tB, tC = [x == "t" for x in compute_key]
hw_abs_class = self.hw_abs_dict[hw_abs_key]
hw_abs = hw_abs_class(self.get_name())
problem_size = self.get_problem_size(shape_key)
m, n, k = problem_size
A_shape = (m, k) if not tA else (k, m)
B_shape = (k, n) if not tB else (n, k)
C_shape = (m, n) if not tC else (m, n)
if hw_abs_key == "load_a":
ldm = m if tA else k
layout = "nvcuda::wmma::col_major" if tA else "nvcuda::wmma::row_major"
return hw_abs.get_intrinsic(
[A_shape],
[A_shape],
self.input_dtypes[hw_abs_key],
self.output_dtypes[hw_abs_key],
problem_size,
ldm,
layout,
**kwargs
)
elif hw_abs_key == "load_b":
ldm = k if tB else n
layout = "nvcuda::wmma::col_major" if tB else "nvcuda::wmma::row_major"
return hw_abs.get_intrinsic(
[B_shape],
[B_shape],
self.input_dtypes[hw_abs_key],
self.output_dtypes[hw_abs_key],
problem_size,
ldm,
layout,
**kwargs
)
elif hw_abs_key == "mma":
return hw_abs.get_intrinsic(
self.input_dtypes[hw_abs_key],
self.output_dtypes[hw_abs_key],
problem_size,
trans_A=tA,
trans_B=tB,
trans_C=tC,
**kwargs
)
elif hw_abs_key == "store":
ldm = m if tC else n
layout = "nvcuda::wmma::mem_col_major" if tB else "nvcuda::wmma::mem_row_major"
return hw_abs.get_intrinsic(
[C_shape],
[C_shape],
self.input_dtypes[hw_abs_key],
self.output_dtypes[hw_abs_key],
problem_size,
ldm,
layout,
**kwargs
)
else:
raise RuntimeError("Unknown HW abstraction key: %s" % hw_abs_key)
def get_memory_scope_realize(self, dtype, scope, constant_size, attributes):
"""
dtype: str
e.g. float16
scope: str
e.g. wmma::matrix_a
constant_size: int
size of elements in the buffer
attributes: dict of {tvm.runtime.String, tvm.tir.StringImm}
other useful information, e.g., layout/leading dimension length
---
Returns:
memory scope realization: [str, int]
as for str, e.g. nvcuda::wmma::fragment<
nvcuda::wmma::matrix_a, 16, 16, 16,
nvcuda::wmma::row_major, 16>
"""
assert "storage_shape" in attributes
storage_shape = attributes["storage_shape"]
m, n, k = [int(x) for x in storage_shape.split(", ")]
if scope == "nvcuda::wmma::matrix_a":
assert "storage_layout" in attributes
storage_layout = attributes["storage_layout"]
storage = (
"nvcuda::wmma::fragment<"
+ scope
+ ", "
+ storage_shape
+ ", "
+ dtype
+ ", "
+ storage_layout
+ ">"
)
assert constant_size % (m * k) == 0
storage_size = constant_size // (m * k)
return [storage, storage_size]
elif scope == "nvcuda::wmma::matrix_b":
assert "storage_layout" in attributes
storage_layout = attributes["storage_layout"]
storage = (
"nvcuda::wmma::fragment<"
+ scope
+ ", "
+ storage_shape
+ ", "
+ dtype
+ ", "
+ storage_layout
+ ">"
)
assert constant_size % (n * k) == 0
storage_size = constant_size // (n * k)
return [storage, storage_size]
elif scope == "nvcuda::wmma::accumulator":
storage = "nvcuda::wmma::fragment<" + scope + ", " + storage_shape + ", " + dtype + ">"
assert constant_size % (m * n) == 0
storage_size = constant_size // (m * n)
return [storage, storage_size]
else:
raise RuntimeError("Unknown scope: %s" % scope)
def get_header(self):
return "#include <mma.h>\n"
|
py | b401eda038557a1c7c3a0a66368d82373f405c41 | import numpy as np
import sklearn.discriminant_analysis as sklda
import scipy as sp
def train(train_data_1, train_data_2, numFilt):
numTrials_1 = np.size(train_data_1,0)
numTrials_2 = np.size(train_data_1,0)
# train the CCACSP filters
ccacsp_filts = calc_CCACSP(train_data_1, train_data_2, numFilt)
# extract the features
train_filt_1 = apply_CCACSP(train_data_1, ccacsp_filts, numFilt)
train_logP_1 = np.squeeze(np.log(np.var(train_filt_1, axis=2)))
train_filt_2 = apply_CCACSP(train_data_2, ccacsp_filts, numFilt)
train_logP_2 = np.squeeze(np.log(np.var(train_filt_2, axis=2)))
# define the classifier
clf = sklda.LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto')
X = np.concatenate((train_logP_1, train_logP_2), axis=0)
y1 = np.zeros(numTrials_1)
y2 = np.ones(numTrials_2)
y = np.concatenate((y1, y2))
# train the classifier
clf.fit(X, y)
return ccacsp_filts, clf
def test(test_data, ccacsp_filts, clf):
total_filts = np.size(ccacsp_filts,1)
# test the classifier on the test data
test_filt = np.matmul(ccacsp_filts.transpose(), test_data)
test_logP = np.squeeze(np.log(np.var(test_filt, axis=1)))
test_logP = np.reshape(test_logP,(1,total_filts))
return clf.predict(test_logP)
def calc_CCACSP(x1,x2, numFilt):
num_trials_1 = np.size(x1,0)
num_trials_2 = np.size(x2,0)
# number of channels and time samples should be the same between x1 and x2
n_samps = np.size(x1,2)
n_chans = np.size(x1,1)
c1_shifted = np.zeros([n_chans,n_chans])
c2_shifted = np.zeros([n_chans,n_chans])
c1 = np.zeros([n_chans,n_chans])
c2 = np.zeros([n_chans,n_chans])
range0 = range(0,n_samps-2)
range1 = range(1,n_samps-1)
range2 = range(2,n_samps)
# estimate the covariances
for ik in range(num_trials_1):
Samp = x1[ik]
temp1 = 0.5*(Samp[:,range0]+Samp[:,range2])
temp2 = Samp[:,range1]
c1_shifted = c1_shifted+my_cov(temp2, temp1)/np.trace(my_cov(temp2, temp1))
c1 = c1+np.cov(x1[ik])/np.trace(np.cov(x1[ik]))
c1_shifted = np.divide(c1_shifted,num_trials_1)
c1 = np.divide(c1,num_trials_1)
for ik in range(num_trials_2):
Samp = x2[ik]
temp1 = 0.5*(Samp[:,range0]+Samp[:,range2])
temp2 = Samp[:,range1]
c2_shifted = c2_shifted+my_cov(temp2, temp1)/np.trace(my_cov(temp2, temp1))
c2 = c2+np.cov(x2[ik])/np.trace(np.cov(x2[ik]))
c2_shifted = np.divide(c2_shifted,num_trials_2)
c2 = np.divide(c2,num_trials_2)
# taking care of rank deficiency for a more robust result
D, V = sp.linalg.eigh(c1+c2)
indx = np.argsort(D)
indx = indx[::-1]
d = D[indx[0:np.linalg.matrix_rank(c1+c2)]]
W = V[:,indx[0:np.linalg.matrix_rank(c1+c2)]]
W_T = np.matmul(np.sqrt(sp.linalg.pinv(np.diag(d))),W.transpose())
S1 = np.matmul(np.matmul(W_T,c1),W_T.transpose())
S2 = np.matmul(np.matmul(W_T,c2),W_T.transpose())
S1_shifted = np.matmul(np.matmul(W_T,c1_shifted),W_T.transpose())
S2_shifted = np.matmul(np.matmul(W_T,c2_shifted),W_T.transpose())
# find filters for class 1
d,v = sp.linalg.eigh(S1_shifted,S1+S2)
indx = np.argsort(d)
indx = indx[::-1]
filts_1 = v.take(indx, axis=1)
filts_1 = np.matmul(filts_1.transpose(),W_T)
filts_1 = filts_1.transpose()
filts_1 = select_filts(filts_1, numFilt)
# find filters for class 2
d,v = sp.linalg.eigh(S2_shifted,S1+S2)
indx = np.argsort(d)
indx = indx[::-1]
filts_2 = v.take(indx, axis=1)
filts_2 = np.matmul(filts_2.transpose(),W_T)
filts_2 = filts_2.transpose()
filts_2 = select_filts(filts_2, numFilt)
# concatenate filters for classes 1 and 2 and return
return np.concatenate((filts_1, filts_2), axis=1)
def select_filts(filt, col_num):
temp = np.shape(filt)
columns = np.arange(0,col_num)
#print(columns)
f = filt[:, columns]
for ij in range(col_num):
f[:, ij] = f[:, ij]/np.linalg.norm(f[:, ij])
return f
def apply_CCACSP(X, f, col_num):
f = np.transpose(f)
temp = np.shape(X)
num_trials = temp[0]
#dat = np.zeros(np.shape(X), dtype = object)
dat = np.zeros((num_trials, 2*col_num, temp[2]))
for ik in range(num_trials):
dat[ik,:,:] = np.matmul(f,X[ik,:,:])
return dat
def my_cov(X, Y):
avg_X = np.mean(X, axis=1)
avg_Y = np.mean(X, axis=1)
X = X - avg_X[:,None]
Y = Y - avg_Y[:,None]
return np.matmul(X, Y.transpose())
|
py | b401ee3ee424cbf13a949da2563d01dc32fd06fa | import pytest
from django.utils.text import slugify
@pytest.mark.django_db
def test_generate_unique_slug_automatically(court):
old_slug = court.slug
assert old_slug == slugify(court.name_abbreviation)
court.pk = None
court.slug = None
court.save()
assert court.slug == old_slug + '-1'
|
py | b401ee6a581e905b9321460a2ab5f23fed2b73ba | """
Get API information encoded in C files.
See ``find_function`` for how functions should be formatted, and
``read_order`` for how the order of the functions should be
specified.
"""
from numpy.distutils.conv_template import process_file as process_c_file
import hashlib
import io
import os
import re
import sys
import textwrap
from os.path import join
__docformat__ = 'restructuredtext'
# The files under src/ that are scanned for API functions
API_FILES = [join('multiarray', 'alloc.c'),
join('multiarray', 'abstractdtypes.c'),
join('multiarray', 'arrayfunction_override.c'),
join('multiarray', 'array_assign_array.c'),
join('multiarray', 'array_assign_scalar.c'),
join('multiarray', 'array_coercion.c'),
join('multiarray', 'array_method.c'),
join('multiarray', 'arrayobject.c'),
join('multiarray', 'arraytypes.c.src'),
join('multiarray', 'buffer.c'),
join('multiarray', 'calculation.c'),
join('multiarray', 'common_dtype.c'),
join('multiarray', 'conversion_utils.c'),
join('multiarray', 'convert.c'),
join('multiarray', 'convert_datatype.c'),
join('multiarray', 'ctors.c'),
join('multiarray', 'datetime.c'),
join('multiarray', 'datetime_busday.c'),
join('multiarray', 'datetime_busdaycal.c'),
join('multiarray', 'datetime_strings.c'),
join('multiarray', 'descriptor.c'),
join('multiarray', 'dlpack.c'),
join('multiarray', 'dtypemeta.c'),
join('multiarray', 'einsum.c.src'),
join('multiarray', 'flagsobject.c'),
join('multiarray', 'getset.c'),
join('multiarray', 'item_selection.c'),
join('multiarray', 'iterators.c'),
join('multiarray', 'mapping.c'),
join('multiarray', 'methods.c'),
join('multiarray', 'multiarraymodule.c'),
join('multiarray', 'nditer_api.c'),
join('multiarray', 'nditer_constr.c'),
join('multiarray', 'nditer_pywrap.c'),
join('multiarray', 'nditer_templ.c.src'),
join('multiarray', 'number.c'),
join('multiarray', 'refcount.c'),
join('multiarray', 'scalartypes.c.src'),
join('multiarray', 'scalarapi.c'),
join('multiarray', 'sequence.c'),
join('multiarray', 'shape.c'),
join('multiarray', 'strfuncs.c'),
join('multiarray', 'usertypes.c'),
join('umath', 'loops.c.src'),
join('umath', 'ufunc_object.c'),
join('umath', 'ufunc_type_resolution.c'),
join('umath', 'reduction.c'),
]
THIS_DIR = os.path.dirname(__file__)
API_FILES = [os.path.join(THIS_DIR, '..', 'src', a) for a in API_FILES]
def file_in_this_dir(filename):
return os.path.join(THIS_DIR, filename)
def remove_whitespace(s):
return ''.join(s.split())
def _repl(str):
return str.replace('Bool', 'npy_bool')
class StealRef:
def __init__(self, arg):
self.arg = arg # counting from 1
def __str__(self):
try:
return ' '.join('NPY_STEALS_REF_TO_ARG(%d)' % x for x in self.arg)
except TypeError:
return 'NPY_STEALS_REF_TO_ARG(%d)' % self.arg
class NonNull:
def __init__(self, arg):
self.arg = arg # counting from 1
def __str__(self):
try:
return ' '.join('NPY_GCC_NONNULL(%d)' % x for x in self.arg)
except TypeError:
return 'NPY_GCC_NONNULL(%d)' % self.arg
class Function:
def __init__(self, name, return_type, args, doc=''):
self.name = name
self.return_type = _repl(return_type)
self.args = args
self.doc = doc
def _format_arg(self, typename, name):
if typename.endswith('*'):
return typename + name
else:
return typename + ' ' + name
def __str__(self):
argstr = ', '.join([self._format_arg(*a) for a in self.args])
if self.doc:
doccomment = '/* %s */\n' % self.doc
else:
doccomment = ''
return '%s%s %s(%s)' % (doccomment, self.return_type, self.name, argstr)
def to_ReST(self):
lines = ['::', '', ' ' + self.return_type]
argstr = ',\000'.join([self._format_arg(*a) for a in self.args])
name = ' %s' % (self.name,)
s = textwrap.wrap('(%s)' % (argstr,), width=72,
initial_indent=name,
subsequent_indent=' ' * (len(name)+1),
break_long_words=False)
for l in s:
lines.append(l.replace('\000', ' ').rstrip())
lines.append('')
if self.doc:
lines.append(textwrap.dedent(self.doc))
return '\n'.join(lines)
def api_hash(self):
m = hashlib.md5()
m.update(remove_whitespace(self.return_type))
m.update('\000')
m.update(self.name)
m.update('\000')
for typename, name in self.args:
m.update(remove_whitespace(typename))
m.update('\000')
return m.hexdigest()[:8]
class ParseError(Exception):
def __init__(self, filename, lineno, msg):
self.filename = filename
self.lineno = lineno
self.msg = msg
def __str__(self):
return '%s:%s:%s' % (self.filename, self.lineno, self.msg)
def skip_brackets(s, lbrac, rbrac):
count = 0
for i, c in enumerate(s):
if c == lbrac:
count += 1
elif c == rbrac:
count -= 1
if count == 0:
return i
raise ValueError("no match '%s' for '%s' (%r)" % (lbrac, rbrac, s))
def split_arguments(argstr):
arguments = []
current_argument = []
i = 0
def finish_arg():
if current_argument:
argstr = ''.join(current_argument).strip()
m = re.match(r'(.*(\s+|\*))(\w+)$', argstr)
if m:
typename = m.group(1).strip()
name = m.group(3)
else:
typename = argstr
name = ''
arguments.append((typename, name))
del current_argument[:]
while i < len(argstr):
c = argstr[i]
if c == ',':
finish_arg()
elif c == '(':
p = skip_brackets(argstr[i:], '(', ')')
current_argument += argstr[i:i+p]
i += p-1
else:
current_argument += c
i += 1
finish_arg()
return arguments
def find_functions(filename, tag='API'):
"""
Scan the file, looking for tagged functions.
Assuming ``tag=='API'``, a tagged function looks like::
/*API*/
static returntype*
function_name(argtype1 arg1, argtype2 arg2)
{
}
where the return type must be on a separate line, the function
name must start the line, and the opening ``{`` must start the line.
An optional documentation comment in ReST format may follow the tag,
as in::
/*API
This function does foo...
*/
"""
if filename.endswith(('.c.src', '.h.src')):
fo = io.StringIO(process_c_file(filename))
else:
fo = open(filename, 'r')
functions = []
return_type = None
function_name = None
function_args = []
doclist = []
SCANNING, STATE_DOC, STATE_RETTYPE, STATE_NAME, STATE_ARGS = list(range(5))
state = SCANNING
tagcomment = '/*' + tag
for lineno, line in enumerate(fo):
try:
line = line.strip()
if state == SCANNING:
if line.startswith(tagcomment):
if line.endswith('*/'):
state = STATE_RETTYPE
else:
state = STATE_DOC
elif state == STATE_DOC:
if line.startswith('*/'):
state = STATE_RETTYPE
else:
line = line.lstrip(' *')
doclist.append(line)
elif state == STATE_RETTYPE:
# first line of declaration with return type
m = re.match(r'NPY_NO_EXPORT\s+(.*)$', line)
if m:
line = m.group(1)
return_type = line
state = STATE_NAME
elif state == STATE_NAME:
# second line, with function name
m = re.match(r'(\w+)\s*\(', line)
if m:
function_name = m.group(1)
else:
raise ParseError(filename, lineno+1,
'could not find function name')
function_args.append(line[m.end():])
state = STATE_ARGS
elif state == STATE_ARGS:
if line.startswith('{'):
# finished
# remove any white space and the closing bracket:
fargs_str = ' '.join(function_args).rstrip()[:-1].rstrip()
fargs = split_arguments(fargs_str)
f = Function(function_name, return_type, fargs,
'\n'.join(doclist))
functions.append(f)
return_type = None
function_name = None
function_args = []
doclist = []
state = SCANNING
else:
function_args.append(line)
except ParseError:
raise
except Exception as e:
msg = "see chained exception for details"
raise ParseError(filename, lineno + 1, msg) from e
fo.close()
return functions
def should_rebuild(targets, source_files):
from distutils.dep_util import newer_group
for t in targets:
if not os.path.exists(t):
return True
sources = API_FILES + list(source_files) + [__file__]
if newer_group(sources, targets[0], missing='newer'):
return True
return False
def write_file(filename, data):
"""
Write data to filename
Only write changed data to avoid updating timestamps unnecessarily
"""
if os.path.exists(filename):
with open(filename) as f:
if data == f.read():
return
with open(filename, 'w') as fid:
fid.write(data)
# Those *Api classes instances know how to output strings for the generated code
class TypeApi:
def __init__(self, name, index, ptr_cast, api_name, internal_type=None):
self.index = index
self.name = name
self.ptr_cast = ptr_cast
self.api_name = api_name
# The type used internally, if None, same as exported (ptr_cast)
self.internal_type = internal_type
def define_from_array_api_string(self):
return "#define %s (*(%s *)%s[%d])" % (self.name,
self.ptr_cast,
self.api_name,
self.index)
def array_api_define(self):
return " (void *) &%s" % self.name
def internal_define(self):
if self.internal_type is None:
return f"extern NPY_NO_EXPORT {self.ptr_cast} {self.name};\n"
# If we are here, we need to define a larger struct internally, which
# the type can be cast safely. But we want to normally use the original
# type, so name mangle:
mangled_name = f"{self.name}Full"
astr = (
# Create the mangled name:
f"extern NPY_NO_EXPORT {self.internal_type} {mangled_name};\n"
# And define the name as: (*(type *)(&mangled_name))
f"#define {self.name} (*({self.ptr_cast} *)(&{mangled_name}))\n"
)
return astr
class GlobalVarApi:
def __init__(self, name, index, type, api_name):
self.name = name
self.index = index
self.type = type
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s (*(%s *)%s[%d])" % (self.name,
self.type,
self.api_name,
self.index)
def array_api_define(self):
return " (%s *) &%s" % (self.type, self.name)
def internal_define(self):
astr = """\
extern NPY_NO_EXPORT %(type)s %(name)s;
""" % {'type': self.type, 'name': self.name}
return astr
# Dummy to be able to consistently use *Api instances for all items in the
# array api
class BoolValuesApi:
def __init__(self, name, index, api_name):
self.name = name
self.index = index
self.type = 'PyBoolScalarObject'
self.api_name = api_name
def define_from_array_api_string(self):
return "#define %s ((%s *)%s[%d])" % (self.name,
self.type,
self.api_name,
self.index)
def array_api_define(self):
return " (void *) &%s" % self.name
def internal_define(self):
astr = """\
extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
"""
return astr
class FunctionApi:
def __init__(self, name, index, annotations, return_type, args, api_name):
self.name = name
self.index = index
self.annotations = annotations
self.return_type = return_type
self.args = args
self.api_name = api_name
def _argtypes_string(self):
if not self.args:
return 'void'
argstr = ', '.join([_repl(a[0]) for a in self.args])
return argstr
def define_from_array_api_string(self):
define = """\
#define %s \\\n (*(%s (*)(%s)) \\
%s[%d])""" % (self.name,
self.return_type,
self._argtypes_string(),
self.api_name,
self.index)
return define
def array_api_define(self):
return " (void *) %s" % self.name
def internal_define(self):
annstr = [str(a) for a in self.annotations]
annstr = ' '.join(annstr)
astr = """\
NPY_NO_EXPORT %s %s %s \\\n (%s);""" % (annstr, self.return_type,
self.name,
self._argtypes_string())
return astr
def order_dict(d):
"""Order dict by its values."""
o = list(d.items())
def _key(x):
return x[1] + (x[0],)
return sorted(o, key=_key)
def merge_api_dicts(dicts):
ret = {}
for d in dicts:
for k, v in d.items():
ret[k] = v
return ret
def check_api_dict(d):
"""Check that an api dict is valid (does not use the same index twice)."""
# remove the extra value fields that aren't the index
index_d = {k: v[0] for k, v in d.items()}
# We have if a same index is used twice: we 'revert' the dict so that index
# become keys. If the length is different, it means one index has been used
# at least twice
revert_dict = {v: k for k, v in index_d.items()}
if not len(revert_dict) == len(index_d):
# We compute a dict index -> list of associated items
doubled = {}
for name, index in index_d.items():
try:
doubled[index].append(name)
except KeyError:
doubled[index] = [name]
fmt = "Same index has been used twice in api definition: {}"
val = ''.join(
'\n\tindex {} -> {}'.format(index, names)
for index, names in doubled.items() if len(names) != 1
)
raise ValueError(fmt.format(val))
# No 'hole' in the indexes may be allowed, and it must starts at 0
indexes = set(index_d.values())
expected = set(range(len(indexes)))
if indexes != expected:
diff = expected.symmetric_difference(indexes)
msg = "There are some holes in the API indexing: " \
"(symmetric diff is %s)" % diff
raise ValueError(msg)
def get_api_functions(tagname, api_dict):
"""Parse source files to get functions tagged by the given tag."""
functions = []
for f in API_FILES:
functions.extend(find_functions(f, tagname))
dfunctions = [(api_dict[func.name][0], func) for func in functions]
dfunctions.sort()
return [a[1] for a in dfunctions]
def fullapi_hash(api_dicts):
"""Given a list of api dicts defining the numpy C API, compute a checksum
of the list of items in the API (as a string)."""
a = []
for d in api_dicts:
for name, data in order_dict(d):
a.extend(name)
a.extend(','.join(map(str, data)))
return hashlib.md5(''.join(a).encode('ascii')).hexdigest()
# To parse strings like 'hex = checksum' where hex is e.g. 0x1234567F and
# checksum a 128 bits md5 checksum (hex format as well)
VERRE = re.compile(r'(^0x[\da-f]{8})\s*=\s*([\da-f]{32})')
def get_versions_hash():
d = []
file = os.path.join(os.path.dirname(__file__), 'cversions.txt')
with open(file, 'r') as fid:
for line in fid:
m = VERRE.match(line)
if m:
d.append((int(m.group(1), 16), m.group(2)))
return dict(d)
def main():
tagname = sys.argv[1]
order_file = sys.argv[2]
functions = get_api_functions(tagname, order_file)
m = hashlib.md5(tagname)
for func in functions:
print(func)
ah = func.api_hash()
m.update(ah)
print(hex(int(ah, 16)))
print(hex(int(m.hexdigest()[:8], 16)))
if __name__ == '__main__':
main()
|
py | b401ef1dd4cf198fb831045103723040ac396127 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
from kivymt.calendar.calendar_ui import DatePicker, CalendarWidget
|
py | b401ef7158b8f928ae67e27da247cd0fa8cdd41e | from parsl.addresses import address_by_interface
from parsl.launchers import SrunLauncher
from parsl.providers import SlurmProvider
from funcx_endpoint.endpoint.utils.config import Config
from funcx_endpoint.executors import HighThroughputExecutor
# fmt: off
# PLEASE UPDATE user_opts BEFORE USE
user_opts = {
'cori': {
'worker_init': 'source ~/setup_funcx_test_env.sh',
'scheduler_options': '#SBATCH --constraint=knl,quad,cache',
}
}
config = Config(
executors=[
HighThroughputExecutor(
max_workers_per_node=2,
worker_debug=False,
address=address_by_interface('bond0.144'),
provider=SlurmProvider(
partition='debug', # Partition / QOS
# We request all hyperthreads on a node.
launcher=SrunLauncher(overrides='-c 272'),
# string to prepend to #SBATCH blocks in the submit
# script to the scheduler eg: '#SBATCH --constraint=knl,quad,cache'
scheduler_options=user_opts['cori']['scheduler_options'],
# Command to be run before starting a worker, such as:
# 'module load Anaconda; source activate parsl_env'.
worker_init=user_opts['cori']['worker_init'],
# Increase timeout as Cori's scheduler may be slow
# to respond
cmd_timeout=120,
# Scale between 0-1 blocks with 2 nodes per block
nodes_per_block=2,
init_blocks=0,
min_blocks=0,
max_blocks=1,
# Hold blocks for 10 minutes
walltime='00:10:00',
),
),
],
)
# fmt: on
|
py | b401efa5d3151e93d28bd3e075b6aa79d77c6fed | import populate_graph as pg
import networkx as nx
import utils as ut
FOLDER = 'data/Centrality/'
# populate the graph from the snapshot
G = nx.DiGraph()
G,m = pg.populate_nodes(G)
G,m1=pg.populate_channels(G,m,ut.getBlockHeight())
G = pg.populate_policies(G,m1)
# curate nodes and channels removing channels that are closed and those that do not have public policies
G1 = nx.DiGraph()
for [u,v] in G.edges():
if(G.edges[u,v]["marked"]==1 and G.edges[v,u]["marked"]==1):
if (u not in G1.nodes()):
G1.add_node(u)
G1.nodes[u]["name"] = G.nodes[u]["name"]
G1.nodes[u]["pubadd"] = G.nodes[u]["pubadd"]
G1.nodes[u]["Tech"] = G.nodes[u]["Tech"]
#print(G1.nodes[u]["Tech"])
if (v not in G1.nodes()):
G1.add_node(v)
G1.nodes[v]["name"] = G.nodes[v]["name"]
G1.nodes[v]["pubadd"] = G.nodes[v]["pubadd"]
G1.nodes[v]["Tech"] = G.nodes[v]["Tech"]
#print(G1.nodes[v]["Tech"])
G1.add_edge(u,v)
G1.edges[u,v]["Balance"] = G.edges[u,v]["Balance"]
G1.edges[u, v]["Age"] = G.edges[u, v]["Age"]
G1.edges[u, v]["BaseFee"] = G.edges[u, v]["BaseFee"]
G1.edges[u, v]["FeeRate"] = G.edges[u, v]["FeeRate"]
G1.edges[u, v]["Delay"] = G.edges[u, v]["Delay"]
G1.edges[u, v]["id"] = G.edges[u, v]["id"]
# The networkx library is used to calculate all the centrality metrics.
B = nx.betweenness_centrality(G1)
C = nx.closeness_centrality(G1)
D = nx.degree_centrality(G1)
E = nx.eigenvector_centrality(G1)
betweenness = ''
for b in B:
betweenness += f'{b},{B[b]}\n'
with open(FOLDER + 'betweenness.csv', 'w') as between:
between.write(betweenness.strip())
closeness = ''
for c in C:
closeness += f'{c},{C[c]}\n'
with open(FOLDER + 'closeness.csv', 'w') as close:
close.write(closeness.strip())
degree = ''
for d in D:
degree += f'{d},{D[d]}\n'
with open(FOLDER + 'degree.csv', 'w') as deg:
deg.write(degree.strip())
eigen = ''
for e in E:
eigen += f'{e},{E[e]}\n'
with open(FOLDER + 'eigen.csv', 'w') as eig:
eig.write(eigen.strip())
|
py | b401f094f46346558296c8055e85435a136b8352 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=16)),
('order', models.IntegerField(default=1)),
],
),
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=32)),
('order', models.IntegerField(default=1)),
('recipe', models.TextField()),
('image', models.ImageField(upload_to=b'recipes/', blank=True)),
('category', models.ForeignKey(to='recipes.Category', on_delete=models.CASCADE)),
],
),
]
|
py | b401f1cb6fe79d6067cc24c6c67352ee18b8cd23 | import os
stream = os.popen('ls /root/Github/Project_Mendel/Data/Genera_Search_Data/MSA_Scans_From_nhmmerscan/')
output = stream.readlines()
msa_path = "/root/Github/Project_Mendel/Data/Genera_Search_Data/MSA_Scans_From_nhmmerscan/"
top_hit_output = "/root/Github/Project_Mendel/Data/Genera_Search_Data/Top_Hits_Scans/"
with open(top_hit_output + "Scanned_Results.csv", "a") as file:
file.write("Query Name, Top Hit, E-Value, Bit Score \n")
file.close()
for fam in output:
path = msa_path + fam.strip()
with open(path, 'r') as handler:
top_hit = handler.readlines()[2:3]
top_hit_line = str(top_hit).split()
if (len(top_hit_line) > 1):
target_name_family = top_hit_line.__getitem__(0).strip("[").strip("'")
query_name = top_hit_line.__getitem__(2)
e_value = top_hit_line.__getitem__(12)
bit_score = top_hit_line.__getitem__(13)
print(target_name_family, query_name, e_value, bit_score)
with open(top_hit_output + "Scanned_Results.csv", "a") as file:
file.write(query_name + ',' + target_name_family + ',' + e_value + ',' + bit_score + '\n')
file.close()
# print(target_name_family.strip("[").strip("'"))
# print(len(top_hit_line))
if (len(top_hit_line) == 1):
print("Scan did not show no hits")
|
py | b401f247bfed73b5343251bfb211ba2fa5f02f05 | # authors: anonymized
import numpy as np
from modelTransitions import *
from scipy import stats
from math import log,sqrt
import spibb_utils
def policy_evaluation_modified(gamma, pi, r, p, theta = 0.000001):
"""
Evaluate pi using an initial v estimate and iterate
Args:
pi: policy, array of shape |S| x |A|
v: state values, array of shape |S|
r: rewards, array of shape |S| x |A|
p: state transition probabilities, array of shape |S| x |A| x |S|
Return:
v: 1D array with updated state values
"""
v=np.zeros((pi.shape[0],))
max_iteration = 10000 # avoid core meltdown
for i in range(max_iteration):
# Rewards according to policy: Hadamard product and row-wise sum
r_pi = np.einsum('ij,ij->i', pi, r)
# Policy-weighted transitions:
# multiply p by pi by broadcasting pi, then sum second axis
# result is an array of shape |S| x |S|
p_pi = np.einsum('ijk, ij->ik', p, pi)
# New values
v_new = r_pi + gamma * np.dot(p_pi, v)
# Stop condition
if np.max(np.absolute(v - v_new)) < theta:
v = v_new
break;
v = v_new
q = r + gamma * np.einsum('ijk, k->ij', p, v) # get q values
return v,q
class EstimatorReturn:
"""
Estimate the return of a target policy pi_t
given a set of trajectories generated by a baseline policy pi_b
The available estimator are :
- importance sampling;
- weighted importance sampling;
- doubly robust;
"""
def __init__(self,gamma, pi_b, pi_t, estimator_type, R, list_format = True,trajectories_reg=None):
self.estimator_dict = {"importance_sampling": self.importance_sampling,
"weighted_importance_sampling": self.weighted_importance_sampling,
"weighted_per_decision_IS": self.weighted_per_decision_importance_sampling,
"per_decision_IS": self.per_decision_importance_sampling,
"doubly_robust": self.doubly_robust
}
self.R = R
self.gamma=gamma
self.estimator_type = estimator_type
self.pi_b = pi_b
self.pi_t = pi_t
self.list_format = list_format
self.trajectories_reg = trajectories_reg
def __call__(self, trajectories):
return self.estimator_dict[self.estimator_type](trajectories)
def importance_sampling(self, trajectories):
if self.list_format:
estimate=[]
else:
estimate=0
for trajectory in trajectories:
t = 0
cum_rew = 0
weight = 1
for [action,state,_,reward] in trajectory:
cum_rew += self.gamma**t*reward
weight = weight*(self.pi_t[state,action]/self.pi_b[state,action])
t += 1
if self.list_format:
estimate.append(weight*cum_rew)
else:
estimate += weight*cum_rew
if self.list_format:
return estimate
else:
return estimate/len(trajectories)
def weighted_importance_sampling(self, trajectories):
if self.list_format:
estimate=[]
else:
estimate=0
sum_weight = 0
for trajectory in trajectories:
t = 0
cum_rew = 0
weight = 1
for [action, state, _, reward] in trajectory:
cum_rew += self.gamma ** t * reward
weight = weight * (self.pi_t[state, action] / self.pi_b[state, action])
t += 1
if self.list_format:
estimate.append(cum_rew)
else:
estimate += weight * cum_rew
sum_weight +=weight
if self.list_format:
return estimate
else:
return estimate / sum_weight
def per_decision_importance_sampling(self, trajectories,max_steps = 200):
if self.list_format:
estimate=[]
else:
estimate=0
rho = np.zeros((max_steps,len(trajectories)))
sum_per_decision = np.zeros((max_steps,1))
n = 0
for trajactory in trajectories:
weight = 1
t=0
for [action, state, _, _] in trajactory:
current_weight = self.pi_t[state, action] / self.pi_b[state, action]
weight = weight*current_weight
rho[t,n] = weight
t += 1
n += 1
n = 0
for trajectory in trajectories:
t = 0
cum_rew = 0
for [_, _, _, reward] in trajectory:
cum_rew += self.gamma ** t * reward * float(rho[t,n])
t += 1
if self.list_format:
estimate.append(cum_rew)
else:
estimate += cum_rew
n += 1
if self.list_format:
return estimate
else:
return float(estimate) /len(trajectories)
def weighted_per_decision_importance_sampling(self, trajectories,max_steps = 200):
if self.list_format:
estimate=[]
else:
estimate=0
rho = np.zeros((max_steps,len(trajectories)))
sum_per_decision = np.zeros((max_steps,1))
n = 0
for trajactory in trajectories:
weight = 1
t=0
for [action, state, _, _] in trajactory:
current_weight = self.pi_t[state, action] / self.pi_b[state, action]
weight = weight*current_weight
rho[t,n] = weight
sum_per_decision[t] += weight*self.gamma ** t
t += 1
n += 1
n = 0
for trajectory in trajectories:
t = 0
cum_rew = 0
for [_, _, _, reward] in trajectory:
cum_rew += self.gamma ** t * reward * float(rho[t,n])/float(sum_per_decision[t])
t += 1
if self.list_format:
estimate.append(cum_rew)
else:
estimate += cum_rew
n += 1
if self.list_format:
return estimate
else:
return float(estimate)/float(np.sum(sum_per_decision))
def doubly_robust(self, trajectories):
"""
As implemented in Jiang and Li, 2015;
Make use of a control variate build from an approximate
model of the MDP
:param trajectories: a set of trajectories
:return: an estimate of the return
"""
if self.list_format:
estimate = self.compute_estimare_dr(self.trajectories_reg, trajectories)
return estimate
else:
# We used the 2-fold DR as model fitting
index_mid = int(len(trajectories) / 2)
trajectories_reg = trajectories[:index_mid]
trajectories_eval = trajectories[index_mid:]
estimate1 = self.compute_estimare_dr(trajectories_reg,trajectories_eval,False)
estimate2 = self.compute_estimare_dr(trajectories_eval, trajectories_reg,False)
return (estimate1+estimate2)/2.
def compute_estimare_dr(self,trajectories_reg,trajectories_eval,is_list=True):
if is_list:
estimate=[]
else:
estimate = 0
[V_hat, Q_hat] = self.estimate_q(trajectories_reg)
for trajectory in trajectories_eval:
estimate_trajectory = 0
for [action, state, _, reward] in trajectory[::-1]:
estimate_trajectory = int(V_hat[state]) + \
self.pi_t[state, action] / self.pi_b[state, action]* (reward +
self.gamma*estimate_trajectory-int(Q_hat[state,action]))
if is_list:
estimate.append(estimate_trajectory)
else:
estimate += estimate_trajectory
if not(is_list):
estimate = estimate / len(trajectories_eval)
return estimate
def estimate_q(self, trajectories):
batch = []
for trajectory in trajectories:
for [action, state, next_state, reward] in trajectory:
batch.append([action, state, next_state, reward])
model = ModelTransitions(batch,self.pi_b.shape[0],self.pi_b.shape[1])
reward_model = spibb_utils.get_reward_model(model.transitions, self.R)
return policy_evaluation_modified(self.gamma, self.pi_t, reward_model, model.transitions)
class LowerBoundEstimator:
"""
Estimate the lower bound on the return;
The available strategies are (from HCPI Phillip Thomas):
- Concentration inequality;
- Student t-test;
- BCa (bootstrapping based method);
"""
def __init__(self, gamma, pi_b, pi_t, lower_bound_strategy,confidence,estimator_type,rho_min,rho_max,R,trajectories_reg=None):
self.strategy_dict = {"CI": self.confidence_interval_based,
"student_t_test": self.student_t_test,
"BCa": self.bootstrap_method
}
self.R = R
self.lower_bound_strategy = lower_bound_strategy
self.trajectories_reg = trajectories_reg
self.gamma = gamma
self.pi_b = pi_b
self.pi_t = pi_t
self.confidence = confidence
self.estimator_type = estimator_type
self.rho_min = rho_min
self.rho_max = rho_max
def __call__(self, trajectories):
return self.strategy_dict[self.lower_bound_strategy](trajectories)
def confidence_interval_based(self, trajectories):
"""
:param trajectories: a batch of trajectories
:return: a lower bound on the estimate return
"""
c = 0.5
estimator = EstimatorReturn(self.gamma, self.pi_b, self.pi_t, self.estimator_type, self.R, True, self.trajectories_reg)
list_estimates = estimator(trajectories)
list_estimates = self.normalize_return(list_estimates)
list_estimates_cut = [min(x,c) for x in list_estimates]
cross_substract = np.subtract.outer(np.square(list_estimates_cut), np.square(list_estimates_cut))
cross_substract_squared = np.square(cross_substract)
n = len(list_estimates)
lower_bound= (1./n)*np.sum(list_estimates_cut)- \
(7*c*log(2./self.confidence,2))/(3.*(n-1))-n/c*sqrt((log(2/self.confidence))*np.sum(np.sum(cross_substract_squared)))
return lower_bound*(self.rho_max-self.rho_min)+self.rho_min
def student_t_test(self, trajectories):
"""
Warning !! This method relies on the assumption that the return is normally distributed
:param trajectories: a batch of trajectories
:return: a lower bound on the estimate return
"""
estimator = EstimatorReturn(self.gamma, self.pi_b, self.pi_t, self.estimator_type, self.R, True, self.trajectories_reg)
list_estimates = estimator(trajectories)
list_estimates = self.normalize_return(list_estimates)
estimated_return = np.mean(list_estimates)
n = len(list_estimates)
sigma = np.sqrt(1./(n-1)*np.sum(np.square(np.array(list_estimates)-estimated_return)))
lower_bound = estimated_return-sigma/sqrt((n-1))*stats.t.ppf(1-self.confidence, n-1)
return lower_bound*(self.rho_max-self.rho_min)+self.rho_min
def bootstrap_method(self, trajectories):
"""
Warning ! This method is approximate and can be in practice time consuming
:param trajectories:
:return: an approximate lower bound on the estimate return
"""
n_bootstrap = 2000
list_estimated_return = []
n = len(trajectories)
list_indexes = np.arange(n)
# Repeat the process 2000 times as recommended in P.Thomas 2015
for time_repeat in range(n_bootstrap):
indexes_chosen = np.random.choice(list_indexes,n,True)
estimator = EstimatorReturn(self.gamma, self.pi_b, self.pi_t, self.estimator_type, self.R, False, self.trajectories_reg)
current_trajectories = [trajectories[i] for i in indexes_chosen]
list_estimated_return.append(estimator(current_trajectories))
index_to_consider = int(self.confidence*n_bootstrap)
list_estimated_return.sort()
return list_estimated_return[index_to_consider]
def normalize_return(self,list_estimate):
return [(x-self.rho_min)/(self.rho_max-self.rho_min) for x in list_estimate]
|
py | b401f3af7c8b142255e94721309f0e97ed32b58f | # coding: utf-8
from __future__ import unicode_literals, division
__author__ = "Chen Zheng"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Chen Zheng"
__email__ = "[email protected]"
__date__ = "Oct 18, 2017"
import unittest
import os
import glob
import shutil
from custodian.feff.handlers import UnconvergedErrorHandler
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
"test_files")
def clean_dir():
for f in glob.glob("error.*.tar.gz"):
os.remove(f)
class UnconvergedErrorHandlerTest(unittest.TestCase):
def setUp(cls):
os.chdir(test_dir)
subdir = os.path.join(test_dir, "feff_unconverge")
os.chdir(subdir)
shutil.copy("ATOMS", "ATOMS.orig")
shutil.copy("PARAMETERS", "PARAMETERS.orig")
shutil.copy("HEADER", "HEADER.orig")
shutil.copy("POTENTIALS", "POTENTIALS.orig")
shutil.copy("feff.inp", "feff.inp.orig")
shutil.copy("log1.dat", "log1.dat.orig")
def test_check_unconverged(self):
h = UnconvergedErrorHandler()
self.assertTrue(h.check())
d = h.correct()
self.assertEqual(d["errors"], ["Non-converging job"])
self.assertEqual(d['actions'],
[{"dict": "PARAMETERS",
"action": {"_set": {"RESTART": []}}},
{'action': {'_set': {'SCF': [7, 0, 100, 0.2, 3]}},
'dict': 'PARAMETERS'}])
shutil.move("ATOMS.orig", "ATOMS")
shutil.move("PARAMETERS.orig", "PARAMETERS")
shutil.move("HEADER.orig", "HEADER")
shutil.move("POTENTIALS.orig", "POTENTIALS")
shutil.move("feff.inp.orig", "feff.inp")
shutil.move("log1.dat.orig", "log1.dat")
clean_dir()
|
py | b401f507464f37d0ab1e4fe3eab8feffdcef4e46 | # Copyright 2020, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Python version specifics.
This abstracts the Python version decisions. This makes decisions based on
the numbers, and attempts to give them meaningful names. Where possible it
should attempt to make run time detections.
"""
import os
import re
import sys
def getSupportedPythonVersions():
"""Officially supported Python versions for Nuitka."""
return ("2.6", "2.7", "3.3", "3.4", "3.5", "3.6", "3.7", "3.8", "3.9")
def getPartiallySupportedPythonVersions():
"""Partially supported Python versions for Nuitka."""
return ()
def getSupportedPythonVersionStr():
supported_python_versions = getSupportedPythonVersions()
supported_python_versions_str = repr(supported_python_versions)[1:-1]
supported_python_versions_str = re.sub(
r"(.*),(.*)$", r"\1, or\2", supported_python_versions_str
)
return supported_python_versions_str
def _getPythonVersion():
big, major, minor = sys.version_info[0:3]
# TODO: Give up on decimal versions already.
return big * 256 + major * 16 + min(15, minor)
python_version = _getPythonVersion()
python_version_full_str = ".".join(str(s) for s in sys.version_info[0:3])
python_version_str = ".".join(str(s) for s in sys.version_info[0:2])
def getErrorMessageExecWithNestedFunction():
"""Error message of the concrete Python in case an exec occurs in a
function that takes a closure variable.
"""
assert python_version < 0x300
# Need to use "exec" to detect the syntax error, pylint: disable=W0122
try:
exec(
"""
def f():
exec ""
def nested():
return closure"""
)
except SyntaxError as e:
return e.message.replace("'f'", "'%s'")
def getComplexCallSequenceErrorTemplate():
if not hasattr(getComplexCallSequenceErrorTemplate, "result"):
try:
# We are doing this on purpose, to get the exception.
# pylint: disable=not-an-iterable,not-callable
f = None
f(*None)
except TypeError as e:
result = (
e.args[0]
.replace("NoneType object", "%s")
.replace("NoneType", "%s")
.replace("None ", "%s ")
)
getComplexCallSequenceErrorTemplate.result = result
else:
sys.exit("Error, cannot detect expected error message.")
return getComplexCallSequenceErrorTemplate.result
_needs_set_literal_reverse_insertion = None
def needsSetLiteralReverseInsertion():
"""For Python3, until Python3.5 ca. the order of set literals was reversed."""
# Cached result, pylint: disable=global-statement
global _needs_set_literal_reverse_insertion
if _needs_set_literal_reverse_insertion is None:
try:
value = eval("{1,1.0}.pop()") # pylint: disable=eval-used
except SyntaxError:
_needs_set_literal_reverse_insertion = False
else:
_needs_set_literal_reverse_insertion = type(value) is float
return _needs_set_literal_reverse_insertion
def needsDuplicateArgumentColOffset():
if python_version < 0x353:
return False
else:
return True
def isUninstalledPython():
if os.name == "nt":
import ctypes.wintypes
GetSystemDirectory = ctypes.windll.kernel32.GetSystemDirectoryW
GetSystemDirectory.argtypes = (ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD)
GetSystemDirectory.restype = ctypes.wintypes.DWORD
MAX_PATH = 4096
buf = ctypes.create_unicode_buffer(MAX_PATH)
res = GetSystemDirectory(buf, MAX_PATH)
assert res != 0
system_path = os.path.normcase(buf.value)
return not getRunningPythonDLLPath().startswith(system_path)
return (
os.path.exists(os.path.join(sys.prefix, "conda-meta"))
or "WinPython" in sys.version
)
def getRunningPythonDLLPath():
import ctypes.wintypes
MAX_PATH = 4096
buf = ctypes.create_unicode_buffer(MAX_PATH)
GetModuleFileName = ctypes.windll.kernel32.GetModuleFileNameW
GetModuleFileName.argtypes = (
ctypes.wintypes.HANDLE,
ctypes.wintypes.LPWSTR,
ctypes.wintypes.DWORD,
)
GetModuleFileName.restype = ctypes.wintypes.DWORD
# We trust ctypes internals here, pylint: disable=protected-access
res = GetModuleFileName(ctypes.pythonapi._handle, buf, MAX_PATH)
if res == 0:
# Windows only code, pylint: disable=I0021,undefined-variable
raise WindowsError(
ctypes.GetLastError(), ctypes.FormatError(ctypes.GetLastError())
)
dll_path = os.path.normcase(buf.value)
assert os.path.exists(dll_path), dll_path
return dll_path
def getTargetPythonDLLPath():
dll_path = getRunningPythonDLLPath()
from nuitka.Options import isPythonDebug
if dll_path.endswith("_d.dll"):
if not isPythonDebug():
dll_path = dll_path[:-6] + ".dll"
if not os.path.exists(dll_path):
sys.exit("Error, cannot switch to non-debug Python, not installed.")
else:
if isPythonDebug():
dll_path = dll_path[:-4] + "_d.dll"
if not os.path.exists(dll_path):
sys.exit("Error, cannot switch to debug Python, not installed.")
return dll_path
def isStaticallyLinkedPython():
try:
import sysconfig
except ImportError:
# Cannot detect this properly for Python 2.6, but we don't care much
# about that anyway.
return False
result = sysconfig.get_config_var("Py_ENABLE_SHARED") == 0
if result:
from nuitka.utils.Execution import check_output
with open(os.devnull, "w") as devnull:
output = check_output(
(os.path.realpath(sys.executable) + "-config", "--ldflags"),
stderr=devnull,
)
if str is not bytes:
output = output.decode("utf8")
import shlex
output = shlex.split(output)
python_abi_version = python_version_str + getPythonABI()
result = ("-lpython" + python_abi_version) not in output
return result
def getPythonABI():
if hasattr(sys, "abiflags"):
abiflags = sys.abiflags
# Cyclic dependency here.
from nuitka.Options import isPythonDebug
if isPythonDebug() or hasattr(sys, "getobjects"):
if not abiflags.startswith("d"):
abiflags = "d" + abiflags
else:
abiflags = ""
return abiflags
def getSystemPrefixPath():
"""Return real sys.prefix as an absolute path.
Note: This breaks out of virtualenvs as necessary.
Returns: path to system prefix
"""
sys_prefix = getattr(sys, "real_prefix", getattr(sys, "base_prefix", sys.prefix))
sys_prefix = os.path.abspath(sys_prefix)
return sys_prefix
|
py | b401f638411a0c5e3b84eb843a4e84ff50c33b10 | """
problog.cnf_formula - CNF
-------------------------
Provides access to CNF and weighted CNF.
..
Part of the ProbLog distribution.
Copyright 2015 KU Leuven, DTAI Research Group
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from problog.logic import Clause
from .cycles import break_cycles
from .formula import BaseFormula, LogicFormula, LogicGraph
from .core import transform
from .util import Timer
from .evaluator import SemiringLogProbability
class CNF(BaseFormula):
"""A logic formula in Conjunctive Normal Form."""
# noinspection PyUnusedLocal
def __init__(self, **kwdargs):
BaseFormula.__init__(self)
self._clauses = [] # All clauses in the CNF (incl. comment)
self._clausecount = 0 # Number of actual clauses (not incl. comment)
# noinspection PyUnusedLocal
def add_atom(self, atom, force=False):
"""Add an atom to the CNF.
:param atom: name of the atom
:param force: add a clause for each atom to force it's existence in the final CNF
"""
self._atomcount += 1
if force:
self._clauses.append([atom, -atom])
self._clausecount += 1
def add_comment(self, comment):
"""Add a comment clause.
:param comment: text of the comment
"""
self._clauses.append(["c", comment])
def add_clause(self, head, body):
"""Add a clause to the CNF.
:param head: head of the clause (i.e. atom it defines)
:param body: body of the clause
"""
self._clauses.append([head] + list(body))
self._clausecount += 1
def add_constraint(self, constraint, force=False):
"""Add a constraint.
:param constraint: constraint to add
:param force: force constraint to be true even though none of its values are set
:type constraint: problog.constraint.Constraint
"""
BaseFormula.add_constraint(self, constraint)
for c in constraint.as_clauses():
self.add_clause(force, c)
def _clause2str(self, clause, weighted=False):
if weighted:
raise NotImplementedError()
else:
if clause[1] is None:
return " ".join(map(str, clause[2:])) + " 0"
else:
return " ".join(map(str, clause[1:])) + " 0"
def to_dimacs(
self,
partial=False,
weighted=False,
semiring=None,
smart_constraints=False,
names=False,
invert_weights=False,
):
"""Transform to a string in DIMACS format.
:param partial: split variables if possibly true / certainly true
:param weighted: created a weighted (False, :class:`int`, :class:`float`)
:param semiring: semiring for weight transformation (if weighted)
:param names: Print names in comments
:return: string in DIMACS format
"""
if weighted:
t = "wcnf"
else:
t = "cnf"
header, content = self._contents(
partial=partial,
weighted=weighted,
semiring=semiring,
smart_constraints=smart_constraints,
invert_weights=invert_weights,
)
result = "p %s %s\n" % (t, " ".join(map(str, header)))
if names:
tpl = "c {{:<{}}} {{}}\n".format(len(str(self._atomcount)) + 1)
for n, i, l in self.get_names_with_label():
result += tpl.format(i, n)
result += "\n".join(map(lambda cl: " ".join(map(str, cl)) + " 0", content))
return result
def to_lp(self, partial=False, semiring=None, smart_constraints=False):
"""Transfrom to CPLEX lp format (MIP program).
This is always weighted.
:param partial: split variables in possibly true / certainly true
:param semiring: semiring for weight transformation (if weighted)
:param smart_constraints: only enforce constraints when variables are set
:return: string in LP format
"""
header, content = self._contents(
partial=partial,
weighted=False,
semiring=semiring,
smart_constraints=smart_constraints,
)
if semiring is None:
semiring = SemiringLogProbability()
var2str = lambda var: "x%s" % var if var > 0 else "-x%s" % -var
if partial:
ct = lambda it: 2 * it
pt = lambda it: ct(it) - 1
weights = self.extract_weights(semiring)
objective = []
for v in range(0, self.atomcount + 1):
w_pos, w_neg = weights.get(v, (semiring.one(), semiring.one()))
if not semiring.is_one(w_pos):
w_ct = w_pos
objective.append("%s x%s" % (w_ct, ct(v)))
if not semiring.is_one(w_neg):
w_pt = -w_neg
objective.append("%s x%s" % (w_pt, pt(v)))
objective = " + ".join(objective)
else:
weights = {}
for i, w in self.extract_weights(semiring).items():
w = w[0] - w[1]
if w != 0:
weights[i] = str(w)
objective = " + ".join(
[
"%s x%s" % (weights[i], i)
for i in range(0, self.atomcount + 1)
if i in weights
]
)
result = "maximize\n"
result += " obj:" + objective + "\n"
result += "subject to\n"
for clause in content:
n_neg = len([c for c in clause if c < 0])
result += (
" "
+ " + ".join(map(var2str, clause))
+ " >= "
+ str(1 - n_neg)
+ "\n"
)
result += "bounds\n"
for i in range(1, self.atomcount + 1):
result += " 0 <= x%s <= 1\n" % i
result += "binary\n"
result += " " + " ".join(map(var2str, range(1, self.atomcount + 1))) + "\n"
result += "end\n"
return result
def _contents(
self,
partial=False,
weighted=False,
semiring=None,
smart_constraints=False,
invert_weights=False,
):
# Helper function to determine the certainly true / possibly true names (for partial)
ct = lambda i: 2 * i
pt = lambda i: ct(i) - 1
cpt = lambda i: -pt(-i) if i < 0 else ct(i)
w_mult = 1
w_max = []
weights = None
if weighted == int:
w_mult = 10000
w_min = -10000
wt1 = lambda w: int(max(w_min, w) * w_mult)
elif weighted == float:
w_mult = 1
w_min = -10000
wt1 = lambda w: w
elif weighted:
w_min = -10000
w_mult = 10000
wt1 = lambda w: int(max(w_min, w) * w_mult)
if weighted:
if invert_weights:
wt = lambda w: wt1(-w)
else:
wt = wt1
if semiring is None:
semiring = SemiringLogProbability()
weights = self.extract_weights(semiring)
w_sum = 0.0
for w_pos, w_neg in weights.values():
w_sum += max(w_pos, w_min) + max(w_neg, w_min)
w_max = [int(-w_sum * w_mult) + 1]
atomcount = self.atomcount
if partial:
atomcount *= 2
clausecount = self.clausecount
if partial:
clausecount += atomcount
clauses = []
if partial:
# For each atom: add constraint
for a in range(1, self.atomcount + 1):
clauses.append(w_max + [pt(a), -ct(a)])
if weighted:
w_pos, w_neg = weights.get(a, (semiring.one(), semiring.one()))
if not semiring.is_one(w_pos):
clauses.append([-wt(w_pos), -ct(a)])
if not semiring.is_one(w_neg):
clauses.append([-wt(w_neg), pt(a)])
# For each clause:
for c in self.clauses:
head, body = c[0], c[1:]
if type(head) != bool:
# Clause does not represent a constraint.
head_neg = head < 0
head = abs(head)
head1, head2 = ct(head), pt(head)
if head_neg:
head1, head2 = -head1, -head2
clauses.append(w_max + [head1, head2] + list(map(cpt, body)))
elif smart_constraints and not head:
# It's a constraint => add an indicator variable.
# a \/ -b ===> -pt(a) \/ I => for all
atomcount += 1
ind = atomcount
v = []
for b in body:
clauses.append(w_max + [-ct(abs(b)), ind])
clauses.append(w_max + [pt(abs(b)), ind])
v += [ct(abs(b)), -pt(abs(b))]
clauses.append(w_max + v + [-ind])
clauses.append(w_max + list(map(cpt, body)) + [-ind])
else:
clauses.append(w_max + list(map(cpt, body)))
else:
if weighted:
for a in range(1, self.atomcount + 1):
w_pos, w_neg = weights.get(a, (semiring.one(), semiring.one()))
if not semiring.is_one(w_pos):
clauses.append([-wt(w_pos), -a])
if not semiring.is_one(w_neg):
clauses.append([-wt(w_neg), a])
for c in self.clauses:
head, body = c[0], c[1:]
if head is None or type(head) == bool and not head:
clauses.append(w_max + list(body))
else:
clauses.append(w_max + [head] + list(body))
return [atomcount, len(clauses)] + w_max, clauses
def from_partial(self, atoms):
"""Translates a (complete) conjunction in the partial formula back to the complete formula.
For example: given an original formula with one atom '1',
this atom is translated to two atoms '1' (pt) and '2' (ct).
The possible conjunctions are:
* [1, 2] => [1] certainly true (and possibly true) => true
* [-1, -2] => [-1] not possibly true (and certainly true) => false
* [1, -2] => [] possibly true but not certainly true => unknown
* [-1, 2] => INVALID certainly true but not possible => invalid (not checked)
:param atoms: complete list of atoms in partial CNF
:return: partial list of atoms in full CNF
"""
result = []
for s in atoms:
if s % 2 == 1 and s < 0:
r = (abs(s) + 1) // 2
if r in self.get_weights():
result.append(-r)
elif s % 2 == 0 and s > 0:
r = (abs(s) + 1) // 2
if r in self.get_weights():
result.append(r)
return result
def is_trivial(self):
"""Checks whether the CNF is trivial (i.e. contains no clauses)"""
return self.clausecount == 0
@property
def clauses(self):
"""Return the list of clauses"""
return self._clauses
@property
def clausecount(self):
"""Return the number of clauses"""
return self._clausecount
# noinspection PyUnusedLocal
@transform(LogicFormula, CNF)
def clarks_completion(source, destination, force_atoms=False, **kwdargs):
"""Transform an acyclic propositional program to a CNF using Clark's completion.
:param source: acyclic program to transform
:param destination: target CNF
:param kwdargs: additional options (ignored)
:return: destination
"""
with Timer("Clark's completion"):
# Each rule in the source formula will correspond to an atom.
num_atoms = len(source)
# Copy weight information.
destination.set_weights(source.get_weights())
# Add atoms.
for i in range(0, num_atoms):
destination.add_atom(i + 1, force=force_atoms)
# Complete other nodes
# Note: assumes negation is encoded as negative number.
for index, node, nodetype in source:
if nodetype == "conj":
destination.add_clause(index, list(map(lambda x: -x, node.children)))
for c in node.children:
destination.add_clause(-index, [c])
elif nodetype == "disj":
destination.add_clause(-index, node.children)
for c in node.children:
destination.add_clause(index, [-c])
elif nodetype == "atom":
pass
else:
raise ValueError("Unexpected node type: '%s'" % nodetype)
# Copy constraints.
for c in source.constraints():
destination.add_constraint(c)
# Copy cycle constraints.
for c in source.cycle_constraints():
destination.add_constraint(c)
# Copy node names.
for n, i, l in source.get_names_with_label():
destination.add_name(n, i, l)
return destination
class CNF_ASP(CNF):
def __init__(self, file=None, **kwdargs):
CNF.__init__(self, **kwdargs)
self.neg_cycles = True
self.file = file
def add_cnf_clause(self, cl):
"""Add a clause to the CNF.
:param head: head of the clause (i.e. atom it defines)
:param body: body of the clause
"""
self._clauses.append(cl)
self._clausecount += 1
def set_atomcount(self, n):
self._atomcount = n
def add_weight(self, atom, weight):
self._weights[atom] = weight
# noinspection PyUnusedLocal
@transform(LogicGraph, CNF_ASP)
def cnf_dsharp_asp(source, destination, force_atoms=False, **kwdargs):
source.compute_sccs()
destination.neg_cycles = source.neg_cycles
max_id = 0
# Add scc info
scc_keys = set(source.scc.values())
scc_stmt = f"s {len(scc_keys)}"
destination.add_comment(scc_stmt)
# Add founded vars
for f_var in source.founded_vars:
f_stmt = f"f {f_var} {source.scc[f_var]}"
destination.add_comment(f_stmt)
# Check if there is some extra body id in the commented rules
# that does not end up in the usual cnf clauses (needed for fake vars)
for index, node, nodetype in source:
if nodetype == 'disj':
for max_lit, r in source.get_rules_cnf(index):
destination.add_comment(r)
if max_lit>max_id:
max_id = max_lit
# Add evidence: must be propagated
# for e in source.evidence_all():
# destination.add_comment(f"e {e[1]} {e[2]}")
with Timer('Clark\'s completion'):
# Each rule in the source formula will correspond to an atom.
num_atoms = len(source)
# print(source)
# Copy weight information.
destination.set_weights(source.get_weights())
# Add atoms.
for i in range(0, num_atoms):
destination.add_atom(i+1, force=force_atoms)
# Complete other nodes
# Note: assumes negation is encoded as negative number.
for index, node, nodetype in source:
if nodetype == 'conj':
destination.add_clause(index, list(map(lambda x: -x, node.children)))
for c in node.children:
destination.add_clause(-index, [c])
elif nodetype == 'disj':
destination.add_clause(-index, node.children)
for c in node.children:
destination.add_clause(index, [-c])
elif nodetype == 'atom':
pass
else:
raise ValueError("Unexpected node type: '%s'" % nodetype)
# Copy constraints.
for c in source.constraints():
destination.add_constraint(c)
# Copy node names.
for n, i, l in source.get_names_with_label():
destination.add_name(n, i, l)
# Fake vars (required by dsharp with unfounded)
# (only if there are sccs/founded vars/rules)
if len(source.scc) > 0:
n = max(destination._atomcount, max_id) +1
destination.add_atom(n, force=force_atoms)
destination.add_atom(n+1, force=force_atoms)
destination.add_clause(-n,[n+1])
destination.add_clause(-n,[-n-1])
destination.add_clause(n,[n+1])
# print(destination.to_dimacs())
return destination
# @transform(CNF_ASP, LogicGraph)
# def cnf2lg(source, destination, **kwdargs):
# lg = LogicGraph(**kwdargs)
# lg._atomcount = source._atomcount
# lg._constraints = source._constraints
# lg._names = source._names
# lg.set_weights(source._weights)
# clauses_ids = [lg.add_or(clause) for clause in source.clauses ]
# lg.add_and(clauses_ids)
# lg.cnf_file = source.file
# return lg |
py | b401f67836451c027eeb9608fb4317ab10b799a8 | # Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from glob import glob
from setuptools import setup
INSTALL_REQUIREMENTS = [
'jsonrpclib',
'cjson'
]
setup(
name='simApi',
version=open('VERSION').read().split()[0],
description='vEOS extension to serve custom eAPI responses',
long_description=open('README.md').read(),
author='Andrei Dvornic, Arista EOS+',
author_email='[email protected]',
license='BSD-3',
url='http://eos.arista.com',
py_modules=['SimApi'],
install_requires=INSTALL_REQUIREMENTS,
data_files=[
('/etc/nginx/external_conf', ['conf/simApi.conf']),
('/etc/uwsgi', ['conf/simApi.ini']),
('/persist/sys/simAPI', ['conf/simApi.json']),
('/persist/sys/simAPI/plugins', glob('plugins/*')),
]
)
|
py | b401f6df462d37c367b763210bf967ec3d6deb11 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class StorageAccountsOperations:
"""StorageAccountsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.storage.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def check_name_availability(
self,
account_name: "_models.StorageAccountCheckNameAvailabilityParameters",
**kwargs: Any
) -> "_models.CheckNameAvailabilityResult":
"""Checks that the storage account name is valid and is not already in use.
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: ~azure.mgmt.storage.v2019_04_01.models.StorageAccountCheckNameAvailabilityParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckNameAvailabilityResult, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_04_01.models.CheckNameAvailabilityResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CheckNameAvailabilityResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.check_name_availability.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(account_name, 'StorageAccountCheckNameAvailabilityParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CheckNameAvailabilityResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/checkNameAvailability'} # type: ignore
async def _create_initial(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.StorageAccountCreateParameters",
**kwargs: Any
) -> Optional["_models.StorageAccount"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.StorageAccount"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'StorageAccountCreateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.StorageAccountCreateParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.StorageAccount"]:
"""Asynchronously creates a new storage account with the specified parameters. If an account is
already created and a subsequent create request is issued with different properties, the
account properties will be updated. If an account is already created and a subsequent create or
update request is issued with the exact same set of properties, the request will succeed.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide for the created account.
:type parameters: ~azure.mgmt.storage.v2019_04_01.models.StorageAccountCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either StorageAccount or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.storage.v2019_04_01.models.StorageAccount]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccount"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
account_name=account_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
async def delete(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> None:
"""Deletes a storage account in Microsoft Azure.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
async def get_properties(
self,
resource_group_name: str,
account_name: str,
expand: Optional[str] = "geoReplicationStats",
**kwargs: Any
) -> "_models.StorageAccount":
"""Returns the properties for the specified storage account including but not limited to name, SKU
name, location, and account status. The ListKeys operation should be used to retrieve storage
keys.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param expand: May be used to expand the properties within account's properties. By default,
data is not included when fetching properties. Currently we only support geoReplicationStats.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccount, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_04_01.models.StorageAccount
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccount"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self.get_properties.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_properties.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
async def update(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.StorageAccountUpdateParameters",
**kwargs: Any
) -> "_models.StorageAccount":
"""The update operation can be used to update the SKU, encryption, access tier, or tags for a
storage account. It can also be used to map the account to a custom domain. Only one custom
domain is supported per storage account; the replacement/change of custom domain is not
supported. In order to replace an old custom domain, the old value must be cleared/unregistered
before a new value can be set. The update of multiple properties is supported. This call does
not change the storage keys for the account. If you want to change the storage account keys,
use the regenerate keys operation. The location and name of the storage account cannot be
changed after creation.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide for the updated account.
:type parameters: ~azure.mgmt.storage.v2019_04_01.models.StorageAccountUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccount, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_04_01.models.StorageAccount
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccount"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'StorageAccountUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccount', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.StorageAccountListResult"]:
"""Lists all the storage accounts available under the subscription. Note that storage keys are not
returned; use the ListKeys operation for this.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StorageAccountListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2019_04_01.models.StorageAccountListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('StorageAccountListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Storage/storageAccounts'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.StorageAccountListResult"]:
"""Lists all the storage accounts available under the given resource group. Note that storage keys
are not returned; use the ListKeys operation for this.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StorageAccountListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.storage.v2019_04_01.models.StorageAccountListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('StorageAccountListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts'} # type: ignore
async def list_keys(
self,
resource_group_name: str,
account_name: str,
expand: Optional[str] = "kerb",
**kwargs: Any
) -> "_models.StorageAccountListKeysResult":
"""Lists the access keys or Kerberos keys (if active directory enabled) for the specified storage
account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param expand: Specifies type of the key to be listed. Possible value is kerb.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccountListKeysResult, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_04_01.models.StorageAccountListKeysResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListKeysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self.list_keys.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccountListKeysResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/listKeys'} # type: ignore
async def regenerate_key(
self,
resource_group_name: str,
account_name: str,
regenerate_key: "_models.StorageAccountRegenerateKeyParameters",
**kwargs: Any
) -> "_models.StorageAccountListKeysResult":
"""Regenerates one of the access keys or Kerberos keys for the specified storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param regenerate_key: Specifies name of the key which should be regenerated -- key1, key2,
kerb1, kerb2.
:type regenerate_key: ~azure.mgmt.storage.v2019_04_01.models.StorageAccountRegenerateKeyParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageAccountListKeysResult, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_04_01.models.StorageAccountListKeysResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageAccountListKeysResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.regenerate_key.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(regenerate_key, 'StorageAccountRegenerateKeyParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageAccountListKeysResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
regenerate_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/regenerateKey'} # type: ignore
async def list_account_sas(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.AccountSasParameters",
**kwargs: Any
) -> "_models.ListAccountSasResponse":
"""List SAS credentials of a storage account.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide to list SAS credentials for the storage account.
:type parameters: ~azure.mgmt.storage.v2019_04_01.models.AccountSasParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListAccountSasResponse, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_04_01.models.ListAccountSasResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListAccountSasResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.list_account_sas.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'AccountSasParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ListAccountSasResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_account_sas.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListAccountSas'} # type: ignore
async def list_service_sas(
self,
resource_group_name: str,
account_name: str,
parameters: "_models.ServiceSasParameters",
**kwargs: Any
) -> "_models.ListServiceSasResponse":
"""List service SAS credentials of a specific resource.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:param parameters: The parameters to provide to list service SAS credentials.
:type parameters: ~azure.mgmt.storage.v2019_04_01.models.ServiceSasParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ListServiceSasResponse, or the result of cls(response)
:rtype: ~azure.mgmt.storage.v2019_04_01.models.ListServiceSasResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListServiceSasResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.list_service_sas.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ServiceSasParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ListServiceSasResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_service_sas.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/ListServiceSas'} # type: ignore
async def _failover_initial(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
# Construct URL
url = self._failover_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_failover_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/failover'} # type: ignore
async def begin_failover(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Failover request can be triggered for a storage account in case of availability issues. The
failover occurs from the storage account's primary cluster to secondary cluster for RA-GRS
accounts. The secondary cluster will become primary after failover.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._failover_initial(
resource_group_name=resource_group_name,
account_name=account_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_failover.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/failover'} # type: ignore
async def revoke_user_delegation_keys(
self,
resource_group_name: str,
account_name: str,
**kwargs: Any
) -> None:
"""Revoke user delegation keys.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param account_name: The name of the storage account within the specified resource group.
Storage account names must be between 3 and 24 characters in length and use numbers and
lower-case letters only.
:type account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
# Construct URL
url = self.revoke_user_delegation_keys.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str', max_length=24, min_length=3),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
revoke_user_delegation_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Storage/storageAccounts/{accountName}/revokeUserDelegationKeys'} # type: ignore
|
py | b401f7c41f85266a2d5376278fd964f5bedfef31 | # coding: utf-8
"""
Main entry point
See:
- https://docs.python.org/3/library/__main__.html
"""
from legipy.cli import cli
if __name__ == "__main__":
cli()
|
py | b401fa2d256553e1049c89b37843460c66abbf65 | # Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
Example showing how to tune multiple objectives at once of an artificial function.
"""
import logging
from pathlib import Path
import numpy as np
from syne_tune.backend import LocalBackend
from syne_tune.optimizer.schedulers.multiobjective.moasha import MOASHA
from syne_tune import Tuner
from syne_tune.config_space import uniform
from syne_tune import StoppingCriterion
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
np.random.seed(0)
max_steps = 27
n_workers = 4
config_space = {
"steps": max_steps,
"theta": uniform(0, np.pi / 2),
"sleep_time": 0.01,
}
entry_point = Path(__file__).parent / "training_scripts" / "mo_artificial" / "mo_artificial.py"
mode = "min"
np.random.seed(0)
scheduler = MOASHA(
max_t=max_steps,
time_attr="step",
mode=mode,
metrics=["y1", "y2"],
config_space=config_space,
)
trial_backend = LocalBackend(entry_point=str(entry_point))
stop_criterion = StoppingCriterion(max_wallclock_time=30)
tuner = Tuner(
trial_backend=trial_backend,
scheduler=scheduler,
stop_criterion=stop_criterion,
n_workers=n_workers,
sleep_time=0.5,
)
tuner.run()
|
py | b401fa3685989724aae2c3145cb1abdc7537cab4 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 25 20:42:44 2017
@author: Yefee
"""
from .share_constant import *
# sec in siderial day ~ sec
sday = SHR_CONST_SDAY
# earth rot ~ rad/sec
omega = SHR_CONST_OMEGA
# radius of earth ~ m
rearth = SHR_CONST_REARTH
# acceleration of gravity ~ m/s^2
g = SHR_CONST_G
# Stefan-Boltzmann constant ~ W/m^2/K^4
stebol = SHR_CONST_STEBOL
# Boltzmann's constant ~ J/K/molecule
boltz = SHR_CONST_BOLTZ
# Avogadro's number ~ molecules/kmole
avogad = SHR_CONST_AVOGAD
# Universal gas constant ~ J/K/kmole
rgas = SHR_CONST_RGAS
# molecular weight dry air ~ kg/kmole
mwdair = SHR_CONST_MWDAIR
# molecular weight water vapor
mwwv = SHR_CONST_MWWV
# Dry air gas constant ~ J/K/kg
rdair = SHR_CONST_RDAIR
# Water vapor gas constant ~ J/K/kg
rwv = SHR_CONST_RWV
# RWV/RDAIR - 1.0
zvir = SHR_CONST_ZVIR
# Von Karman constant
karman = SHR_CONST_KARMAN
# freezing T of fresh water ~ K (intentionally made == to TKTRIP)
tkfrz = SHR_CONST_TKFRZ
# triple point of fresh water ~ K
tktrip = SHR_CONST_TKTRIP
# density of dry air at STP ~ kg/m^3
rhoair = SHR_CONST_RHODAIR
# density of fresh water ~ kg/m^3
rhofw = SHR_CONST_RHOFW
# density of sea water ~ kg/m^3
rhosw = SHR_CONST_RHOSW
# density of ice ~ kg/m^3
rhoice = SHR_CONST_RHOICE
# specific heat of dry air ~ J/kg/K
cpdair = SHR_CONST_CPDAIR
# specific heat of fresh h2o ~ J/kg/K
cpfw = SHR_CONST_CPFW
# specific heat of sea h2o ~ J/kg/K
cpsw = SHR_CONST_CPSW
# specific heat of water vap ~ J/kg/K
cpwv = SHR_CONST_CPWV
# specific heat of fresh ice ~ J/kg/K
cpice = SHR_CONST_CPICE
# latent heat of fusion ~ J/kg
latice = SHR_CONST_LATICE
# latent heat of evaporation ~ J/kg
latvap = SHR_CONST_LATVAP
# latent heat of sublimation ~ J/kg
latsub = SHR_CONST_LATSUB
# ocn ref salinity (psu)
ocn_ref_sal = SHR_CONST_OCN_REF_SAL
# ice ref salinity (psu)
ice_ref_sal = SHR_CONST_ICE_REF_SAL
# cappa in atmos
cappa = (SHR_CONST_RGAS/SHR_CONST_MWDAIR)/SHR_CONST_CPDAIR #! R/Cp |
py | b401fa5b36d6827fe91373c617de3a73d07a675f | import json
import pytest
import pdb
# Make pytest find the code, someone better at Python can help me make this better :-)
import sys
sys.path.append('/code/app/src')
from src import lambda_function
def event(country_code, slug="/"):
return {
"Records": [
{
"cf": {
"request": {
"uri": slug,
"headers": {
"cloudfront-viewer-country": [
{
"key": "CloudFront-Viewer-Country",
"value": country_code
}
],
"cookie": [
{
"key": "cookie",
"value": "somename=put_a_cookie_value_here"
}
],
}
}
}
}
]
}
def test_diagnostic(mocker):
response = lambda_function.lambda_handler(event("US", "/diagnostic"), "")
assert response['status'] == '200'
|
py | b401fb84c604caccfac352aace7e4d3bca3a06af | from epimargin.etl.csse import *
from epimargin.utils import setup
data, _ = setup()
csse_dir = data/"csse"
csse_dir.mkdir(exist_ok = True)
china = data/"china"
china.mkdir(exist_ok = True)
fetch_range(csse_dir, "April 13, 2020", "June 30, 2020")
df = load(csse_dir, "April 13, 2020", "June 30, 2020", "Country_Region == 'China'")\
.drop(columns = ["FIPS", "Admin2", "Last_Update", "Lat", "Long_", "Combined_Key", "Incidence_Rate", "Case-Fatality_Ratio", "Country_Region"])\
.assign(Active = lambda _:_["Active"].astype(int))
# .drop(columns = ["FIPS", "Admin2", "Last_Update", "Lat", "Long_", "Combined_Key", "Incident_Rate", "Case_Fatality_Ratio", "Country_Region"])
# df["Active"] = df["Active"].astype(int)
def assemble(province: str):
totals = df[df.Province_State == province]\
[["date", "Deaths", "Recovered", "Confirmed"]]\
.set_index("date")\
.rename(columns = {"Confirmed": "T", "Deaths": "D", "Recovered": "R"})
diffs = totals.diff().dropna().astype(int).rename(lambda x: "d" + x, axis = 1)
return pd.concat([totals, diffs], axis = 1).dropna().astype(int)
assemble("Beijing")["April 15, 2020":"May 18, 2020"].to_csv(china/"China_1_Beijing.csv")
|
py | b401fc0aac1d7630a2904a52a0c5cf7d4aee84f2 | from jschon.json import JSON
from jschon.jsonschema import Scope
from jschon.vocabulary import Keyword
__all__ = [
'TitleKeyword',
'DescriptionKeyword',
'DefaultKeyword',
'DeprecatedKeyword',
'ReadOnlyKeyword',
'WriteOnlyKeyword',
'ExamplesKeyword',
'ContentMediaTypeKeyword',
'ContentEncodingKeyword',
'ContentSchemaKeyword',
]
class AnnotationKeyword(Keyword):
def evaluate(self, instance: JSON, scope: Scope) -> None:
scope.annotate(self.json.value)
scope.noassert()
class TitleKeyword(AnnotationKeyword):
key = "title"
class DescriptionKeyword(AnnotationKeyword):
key = "description"
class DefaultKeyword(AnnotationKeyword):
key = "default"
class DeprecatedKeyword(AnnotationKeyword):
key = "deprecated"
class ReadOnlyKeyword(AnnotationKeyword):
key = "readOnly"
class WriteOnlyKeyword(AnnotationKeyword):
key = "writeOnly"
class ExamplesKeyword(AnnotationKeyword):
key = "examples"
class ContentMediaTypeKeyword(AnnotationKeyword):
key = "contentMediaType"
types = "string"
class ContentEncodingKeyword(AnnotationKeyword):
key = "contentEncoding"
types = "string"
class ContentSchemaKeyword(AnnotationKeyword):
key = "contentSchema"
types = "string"
depends = "contentMediaType"
def evaluate(self, instance: JSON, scope: Scope) -> None:
if scope.sibling(instance, "contentMediaType"):
super().evaluate(instance, scope)
else:
scope.discard()
|
py | b401fc5da3fd46f5b34d32973850c77b0dfb4c2d | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetPublicIPAddressResult',
'AwaitableGetPublicIPAddressResult',
'get_public_ip_address',
]
@pulumi.output_type
class GetPublicIPAddressResult:
"""
Public IP address resource.
"""
def __init__(__self__, ddos_settings=None, dns_settings=None, etag=None, id=None, idle_timeout_in_minutes=None, ip_address=None, ip_configuration=None, ip_tags=None, location=None, name=None, provisioning_state=None, public_ip_address_version=None, public_ip_allocation_method=None, public_ip_prefix=None, resource_guid=None, sku=None, tags=None, type=None, zones=None):
if ddos_settings and not isinstance(ddos_settings, dict):
raise TypeError("Expected argument 'ddos_settings' to be a dict")
pulumi.set(__self__, "ddos_settings", ddos_settings)
if dns_settings and not isinstance(dns_settings, dict):
raise TypeError("Expected argument 'dns_settings' to be a dict")
pulumi.set(__self__, "dns_settings", dns_settings)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if idle_timeout_in_minutes and not isinstance(idle_timeout_in_minutes, int):
raise TypeError("Expected argument 'idle_timeout_in_minutes' to be a int")
pulumi.set(__self__, "idle_timeout_in_minutes", idle_timeout_in_minutes)
if ip_address and not isinstance(ip_address, str):
raise TypeError("Expected argument 'ip_address' to be a str")
pulumi.set(__self__, "ip_address", ip_address)
if ip_configuration and not isinstance(ip_configuration, dict):
raise TypeError("Expected argument 'ip_configuration' to be a dict")
pulumi.set(__self__, "ip_configuration", ip_configuration)
if ip_tags and not isinstance(ip_tags, list):
raise TypeError("Expected argument 'ip_tags' to be a list")
pulumi.set(__self__, "ip_tags", ip_tags)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if public_ip_address_version and not isinstance(public_ip_address_version, str):
raise TypeError("Expected argument 'public_ip_address_version' to be a str")
pulumi.set(__self__, "public_ip_address_version", public_ip_address_version)
if public_ip_allocation_method and not isinstance(public_ip_allocation_method, str):
raise TypeError("Expected argument 'public_ip_allocation_method' to be a str")
pulumi.set(__self__, "public_ip_allocation_method", public_ip_allocation_method)
if public_ip_prefix and not isinstance(public_ip_prefix, dict):
raise TypeError("Expected argument 'public_ip_prefix' to be a dict")
pulumi.set(__self__, "public_ip_prefix", public_ip_prefix)
if resource_guid and not isinstance(resource_guid, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", resource_guid)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if zones and not isinstance(zones, list):
raise TypeError("Expected argument 'zones' to be a list")
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="ddosSettings")
def ddos_settings(self) -> Optional['outputs.DdosSettingsResponse']:
"""
The DDoS protection custom policy associated with the public IP address.
"""
return pulumi.get(self, "ddos_settings")
@property
@pulumi.getter(name="dnsSettings")
def dns_settings(self) -> Optional['outputs.PublicIPAddressDnsSettingsResponse']:
"""
The FQDN of the DNS record associated with the public IP address.
"""
return pulumi.get(self, "dns_settings")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="idleTimeoutInMinutes")
def idle_timeout_in_minutes(self) -> Optional[int]:
"""
The idle timeout of the public IP address.
"""
return pulumi.get(self, "idle_timeout_in_minutes")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
"""
The IP address associated with the public IP address resource.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="ipConfiguration")
def ip_configuration(self) -> 'outputs.IPConfigurationResponse':
"""
The IP configuration associated with the public IP address.
"""
return pulumi.get(self, "ip_configuration")
@property
@pulumi.getter(name="ipTags")
def ip_tags(self) -> Optional[Sequence['outputs.IpTagResponse']]:
"""
The list of tags associated with the public IP address.
"""
return pulumi.get(self, "ip_tags")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the public IP address resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="publicIPAddressVersion")
def public_ip_address_version(self) -> Optional[str]:
"""
The public IP address version.
"""
return pulumi.get(self, "public_ip_address_version")
@property
@pulumi.getter(name="publicIPAllocationMethod")
def public_ip_allocation_method(self) -> Optional[str]:
"""
The public IP address allocation method.
"""
return pulumi.get(self, "public_ip_allocation_method")
@property
@pulumi.getter(name="publicIPPrefix")
def public_ip_prefix(self) -> Optional['outputs.SubResourceResponse']:
"""
The Public IP Prefix this Public IP Address should be allocated from.
"""
return pulumi.get(self, "public_ip_prefix")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> str:
"""
The resource GUID property of the public IP address resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.PublicIPAddressSkuResponse']:
"""
The public IP address SKU.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def zones(self) -> Optional[Sequence[str]]:
"""
A list of availability zones denoting the IP allocated for the resource needs to come from.
"""
return pulumi.get(self, "zones")
class AwaitableGetPublicIPAddressResult(GetPublicIPAddressResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPublicIPAddressResult(
ddos_settings=self.ddos_settings,
dns_settings=self.dns_settings,
etag=self.etag,
id=self.id,
idle_timeout_in_minutes=self.idle_timeout_in_minutes,
ip_address=self.ip_address,
ip_configuration=self.ip_configuration,
ip_tags=self.ip_tags,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
public_ip_address_version=self.public_ip_address_version,
public_ip_allocation_method=self.public_ip_allocation_method,
public_ip_prefix=self.public_ip_prefix,
resource_guid=self.resource_guid,
sku=self.sku,
tags=self.tags,
type=self.type,
zones=self.zones)
def get_public_ip_address(expand: Optional[str] = None,
public_ip_address_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPublicIPAddressResult:
"""
Public IP address resource.
:param str expand: Expands referenced resources.
:param str public_ip_address_name: The name of the public IP address.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['publicIpAddressName'] = public_ip_address_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20200501:getPublicIPAddress', __args__, opts=opts, typ=GetPublicIPAddressResult).value
return AwaitableGetPublicIPAddressResult(
ddos_settings=__ret__.ddos_settings,
dns_settings=__ret__.dns_settings,
etag=__ret__.etag,
id=__ret__.id,
idle_timeout_in_minutes=__ret__.idle_timeout_in_minutes,
ip_address=__ret__.ip_address,
ip_configuration=__ret__.ip_configuration,
ip_tags=__ret__.ip_tags,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
public_ip_address_version=__ret__.public_ip_address_version,
public_ip_allocation_method=__ret__.public_ip_allocation_method,
public_ip_prefix=__ret__.public_ip_prefix,
resource_guid=__ret__.resource_guid,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type,
zones=__ret__.zones)
|
py | b401fd9938620e80257763baa1c4b5816a52de27 | # -*- coding: utf-8 -*-
from iwlearn.base import *
|
py | b401fdf4f77e7b9b1c840b5a6428da5d9642f291 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test class for Management Interface used by iLO modules."""
import mock
from oslo_config import cfg
from oslo_utils import importutils
from ironic.common import boot_devices
from ironic.common import exception
from ironic.conductor import task_manager
from ironic.drivers.modules.ilo import common as ilo_common
from ironic.drivers.modules.ilo import management as ilo_management
from ironic.drivers.modules import ipmitool
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.objects import utils as obj_utils
ilo_error = importutils.try_import('proliantutils.exception')
INFO_DICT = db_utils.get_test_ilo_info()
CONF = cfg.CONF
class IloManagementTestCase(db_base.DbTestCase):
def setUp(self):
super(IloManagementTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_ilo")
self.node = obj_utils.create_test_node(
self.context, driver='fake_ilo', driver_info=INFO_DICT)
def test_get_properties(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
expected = ilo_management.MANAGEMENT_PROPERTIES
self.assertEqual(expected,
task.driver.management.get_properties())
@mock.patch.object(ilo_common, 'parse_driver_info', spec_set=True,
autospec=True)
def test_validate(self, driver_info_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.validate(task)
driver_info_mock.assert_called_once_with(task.node)
def test_get_supported_boot_devices(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
expected = [boot_devices.PXE, boot_devices.DISK,
boot_devices.CDROM]
self.assertEqual(
sorted(expected),
sorted(task.driver.management.get_supported_boot_devices()))
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_get_boot_device_next_boot(self, get_ilo_object_mock):
ilo_object_mock = get_ilo_object_mock.return_value
ilo_object_mock.get_one_time_boot.return_value = 'CDROM'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
expected_device = boot_devices.CDROM
expected_response = {'boot_device': expected_device,
'persistent': False}
self.assertEqual(expected_response,
task.driver.management.get_boot_device(task))
ilo_object_mock.get_one_time_boot.assert_called_once_with()
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_get_boot_device_persistent(self, get_ilo_object_mock):
ilo_mock = get_ilo_object_mock.return_value
ilo_mock.get_one_time_boot.return_value = 'Normal'
ilo_mock.get_persistent_boot_device.return_value = 'NETWORK'
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
expected_device = boot_devices.PXE
expected_response = {'boot_device': expected_device,
'persistent': True}
self.assertEqual(expected_response,
task.driver.management.get_boot_device(task))
ilo_mock.get_one_time_boot.assert_called_once_with()
ilo_mock.get_persistent_boot_device.assert_called_once_with()
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_get_boot_device_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
exc = ilo_error.IloError('error')
ilo_mock_object.get_one_time_boot.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.IloOperationError,
task.driver.management.get_boot_device,
task)
ilo_mock_object.get_one_time_boot.assert_called_once_with()
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_get_boot_device_persistent_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
ilo_mock_object.get_one_time_boot.return_value = 'Normal'
exc = ilo_error.IloError('error')
ilo_mock_object.get_persistent_boot_device.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.IloOperationError,
task.driver.management.get_boot_device,
task)
ilo_mock_object.get_one_time_boot.assert_called_once_with()
ilo_mock_object.get_persistent_boot_device.assert_called_once_with()
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_set_boot_device_ok(self, get_ilo_object_mock):
ilo_object_mock = get_ilo_object_mock.return_value
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.set_boot_device(task, boot_devices.CDROM,
False)
get_ilo_object_mock.assert_called_once_with(task.node)
ilo_object_mock.set_one_time_boot.assert_called_once_with('CDROM')
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_set_boot_device_persistent_true(self, get_ilo_object_mock):
ilo_mock = get_ilo_object_mock.return_value
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.set_boot_device(task, boot_devices.PXE,
True)
get_ilo_object_mock.assert_called_once_with(task.node)
ilo_mock.update_persistent_boot.assert_called_once_with(
['NETWORK'])
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_set_boot_device_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
exc = ilo_error.IloError('error')
ilo_mock_object.set_one_time_boot.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.IloOperationError,
task.driver.management.set_boot_device,
task, boot_devices.PXE)
ilo_mock_object.set_one_time_boot.assert_called_once_with('NETWORK')
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test_set_boot_device_persistent_fail(self, get_ilo_object_mock):
ilo_mock_object = get_ilo_object_mock.return_value
exc = ilo_error.IloError('error')
ilo_mock_object.update_persistent_boot.side_effect = exc
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.IloOperationError,
task.driver.management.set_boot_device,
task, boot_devices.PXE, True)
ilo_mock_object.update_persistent_boot.assert_called_once_with(
['NETWORK'])
def test_set_boot_device_invalid_device(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
self.assertRaises(exception.InvalidParameterValue,
task.driver.management.set_boot_device,
task, 'fake-device')
@mock.patch.object(ilo_common, 'update_ipmi_properties', spec_set=True,
autospec=True)
@mock.patch.object(ipmitool.IPMIManagement, 'get_sensors_data',
spec_set=True, autospec=True)
def test_get_sensor_data(self, get_sensors_data_mock, update_ipmi_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.get_sensors_data(task)
update_ipmi_mock.assert_called_once_with(task)
get_sensors_data_mock.assert_called_once_with(mock.ANY, task)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test__execute_ilo_clean_step_ok(self, get_ilo_object_mock):
ilo_mock = get_ilo_object_mock.return_value
clean_step_mock = getattr(ilo_mock, 'fake-step')
ilo_management._execute_ilo_clean_step(
self.node, 'fake-step', 'args', kwarg='kwarg')
clean_step_mock.assert_called_once_with('args', kwarg='kwarg')
@mock.patch.object(ilo_management, 'LOG', spec_set=True, autospec=True)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test__execute_ilo_clean_step_not_supported(self, get_ilo_object_mock,
log_mock):
ilo_mock = get_ilo_object_mock.return_value
exc = ilo_error.IloCommandNotSupportedError("error")
clean_step_mock = getattr(ilo_mock, 'fake-step')
clean_step_mock.side_effect = exc
ilo_management._execute_ilo_clean_step(
self.node, 'fake-step', 'args', kwarg='kwarg')
clean_step_mock.assert_called_once_with('args', kwarg='kwarg')
self.assertTrue(log_mock.warn.called)
@mock.patch.object(ilo_common, 'get_ilo_object', spec_set=True,
autospec=True)
def test__execute_ilo_clean_step_fail(self, get_ilo_object_mock):
ilo_mock = get_ilo_object_mock.return_value
exc = ilo_error.IloError("error")
clean_step_mock = getattr(ilo_mock, 'fake-step')
clean_step_mock.side_effect = exc
self.assertRaises(exception.NodeCleaningFailure,
ilo_management._execute_ilo_clean_step,
self.node, 'fake-step', 'args', kwarg='kwarg')
clean_step_mock.assert_called_once_with('args', kwarg='kwarg')
@mock.patch.object(ilo_management, '_execute_ilo_clean_step',
spec_set=True, autospec=True)
def test_reset_ilo(self, clean_step_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.reset_ilo(task)
clean_step_mock.assert_called_once_with(task.node, 'reset_ilo')
@mock.patch.object(ilo_management, '_execute_ilo_clean_step',
spec_set=True, autospec=True)
def test_reset_ilo_credential_ok(self, clean_step_mock):
info = self.node.driver_info
info['ilo_change_password'] = "fake-password"
self.node.driver_info = info
self.node.save()
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.reset_ilo_credential(task)
clean_step_mock.assert_called_once_with(
task.node, 'reset_ilo_credential', 'fake-password')
self.assertIsNone(
task.node.driver_info.get('ilo_change_password'))
self.assertEqual(task.node.driver_info['ilo_password'],
'fake-password')
@mock.patch.object(ilo_management, 'LOG', spec_set=True, autospec=True)
@mock.patch.object(ilo_management, '_execute_ilo_clean_step',
spec_set=True, autospec=True)
def test_reset_ilo_credential_no_password(self, clean_step_mock,
log_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.reset_ilo_credential(task)
self.assertFalse(clean_step_mock.called)
self.assertTrue(log_mock.info.called)
@mock.patch.object(ilo_management, '_execute_ilo_clean_step',
spec_set=True, autospec=True)
def test_reset_bios_to_default(self, clean_step_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.reset_bios_to_default(task)
clean_step_mock.assert_called_once_with(task.node,
'reset_bios_to_default')
@mock.patch.object(ilo_management, '_execute_ilo_clean_step',
spec_set=True, autospec=True)
def test_reset_secure_boot_keys_to_default(self, clean_step_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.reset_secure_boot_keys_to_default(task)
clean_step_mock.assert_called_once_with(task.node,
'reset_secure_boot_keys')
@mock.patch.object(ilo_management, '_execute_ilo_clean_step',
spec_set=True, autospec=True)
def test_clear_secure_boot_keys(self, clean_step_mock):
with task_manager.acquire(self.context, self.node.uuid,
shared=False) as task:
task.driver.management.clear_secure_boot_keys(task)
clean_step_mock.assert_called_once_with(task.node,
'clear_secure_boot_keys')
|
py | b401fe531466a03acc4346cc0a5fa44ef83924d0 | from jenkinsapi.jenkins import Jenkins
from jenkinsapi.credential import UsernamePasswordCredential, SSHKeyCredential
api = Jenkins('http://jenkins:8080')
# Get a list of all global credentials
creds = api.credentials
credentialsId = creds.credentials.keys()[0]
import jenkins
j = jenkins.Jenkins('http://jenkins:8080')
# jenkins slave
params = {
'port': '22',
'username': 'jenkins',
'credentialsId': credentialsId,
'host': 'jenkinsslave1'
}
create = True
for node in j.get_nodes():
if node['name'] == 'jenkinsslave1':
create = False
if create:
j.create_node(
'jenkinsslave1',
nodeDescription='my test slave',
remoteFS='/tmp',
labels='jenkinsslave',
launcher=jenkins.LAUNCHER_SSH,
launcher_params=params
)
# jenkins docker slave
params = {
'port': '22',
'username': 'jenkins',
'credentialsId': credentialsId,
'host': 'jenkinsdockerslave1'
}
create = True
for node in j.get_nodes():
if node['name'] == 'jenkinsdockerslave1':
create = False
if create:
j.create_node(
'jenkinsdockerslave1',
nodeDescription='my docker test slave',
remoteFS='/tmp',
labels='jenkinsdockerslave',
launcher=jenkins.LAUNCHER_SSH,
launcher_params=params
)
|
py | b401ff3072da6fa9578d8a6033b8e228a78e9c0b | from conans import ConanFile, CMake, tools
import os
required_conan_version = ">=1.33.0"
class ExpatConan(ConanFile):
name = "expat"
description = "Fast streaming XML parser written in C."
topics = ("conan", "expat", "xml", "parsing")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/libexpat/libexpat"
license = "MIT"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "fPIC": [True, False], "char_type": ["char", "wchar_t"]}
default_options = {"shared": False, "fPIC": True, "char_type": "char"}
generators = "cmake"
exports_sources = ["CMakeLists.txt", "patches/*"]
_source_subfolder = "source_subfolder"
_build_subfolder = "build_subfolder"
_cmake = None
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if tools.Version(self.version) < "2.2.8":
del self.options.char_type
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
if tools.Version(self.version) < "2.2.8":
self._cmake.definitions["BUILD_doc"] = "Off"
self._cmake.definitions["BUILD_examples"] = "Off"
self._cmake.definitions["BUILD_shared"] = self.options.shared
self._cmake.definitions["BUILD_tests"] = "Off"
self._cmake.definitions["BUILD_tools"] = "Off"
else:
# These options were renamed in 2.2.8 to be more consistent
self._cmake.definitions["EXPAT_BUILD_DOCS"] = "Off"
self._cmake.definitions["EXPAT_BUILD_EXAMPLES"] = "Off"
self._cmake.definitions["EXPAT_SHARED_LIBS"] = self.options.shared
self._cmake.definitions["EXPAT_BUILD_TESTS"] = "Off"
self._cmake.definitions["EXPAT_BUILD_TOOLS"] = "Off"
# EXPAT_CHAR_TYPE was added in 2.2.8
self._cmake.definitions["EXPAT_CHAR_TYPE"] = self.options.char_type
self._cmake.configure(build_folder=self._build_subfolder)
return self._cmake
def build(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
cmake = self._configure_cmake()
cmake.build()
def package(self):
self.copy(pattern="COPYING", dst="licenses", src=self._source_subfolder)
cmake = self._configure_cmake()
cmake.install()
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "lib", "cmake"))
tools.rmdir(os.path.join(self.package_folder, "share"))
def package_info(self):
self.cpp_info.names["cmake_find_package"] = "EXPAT"
self.cpp_info.names["cmake_find_package_multi"] = "expat"
self.cpp_info.libs = tools.collect_libs(self)
if not self.options.shared:
self.cpp_info.defines = ["XML_STATIC"]
if self.options.get_safe("char_type") == "wchar_t":
self.cpp_info.defines.append("XML_UNICODE_WCHAR_T")
|
py | b401ff42dd8ae44f656bddedeb74021da37e07ff | from .rsencoder import RSEncoder
|
py | b401ff628da1d9c81de66d06150b232f917a4495 | # Copyright 2016 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from fake_switches.command_processing.base_command_processor import \
BaseCommandProcessor
class DellConfigureVlanCommandProcessor(BaseCommandProcessor):
def get_prompt(self):
return "\n" + self.switch_configuration.name + "(config-vlan)#"
def do_vlan(self, *args):
vlan_id = int(args[0])
self.print_vlan_significant_delays_warning()
self.write_line("")
self.switch_configuration.add_vlan(self.switch_configuration.new("Vlan", vlan_id))
def do_no_vlan(self, *args):
vlan_id = int(args[0])
self.print_vlan_significant_delays_warning()
self.write_line("")
vlan = self.switch_configuration.get_vlan(vlan_id)
if vlan is not None:
self.write_line("If any of the VLANs being deleted are for access ports, the ports will be")
self.write_line("unusable until it is assigned a VLAN that exists.")
self.switch_configuration.remove_vlan(vlan)
else:
self.write_line("")
self.write_line("These VLANs do not exist: {}.".format(vlan_id))
def print_vlan_significant_delays_warning(self):
self.write_line("Warning: The use of large numbers of VLANs or interfaces may cause significant")
self.write_line("delays in applying the configuration.")
def do_exit(self):
self.is_done = True
|
py | b401ff70168b26894507f0047e2285063ff6c5d1 | #!/usr/bin/env python
import sys
sys.path.append('../wrappers/python')
import unittest
import ctypes
from abn_wrapper import ABN
from utilities import *
class Rotate_left_tests(OperationTestCase):
#Testframe
def rotate_left_testframe(self, a, shift):
a = self.abn.create_copy(a)
b = self.abn.create_copy(a)
self.abn.rotate_left(a, shift)
pa = self.utilities.abn_to_long(a)
pb = self.utilities.abn_to_long(b)
pb = self.utilities.normalize(pb<<shift, self.utilities.get_default_bit_number()) + self.utilities.normalize(pb>>(self.utilities.get_default_bit_number() - shift), self.utilities.get_default_bit_number())
self.assertEqual(pa, pb)
def test_bitwise_rotate_left(self):
for i in range(0, len(self.numbers)):
for j in range(0, self.utilities.get_default_bit_number()):
self.rotate_left_testframe(self.numbers[i], j)
|
py | b401ffa9ad403c0a5deaa2bfe2137093875b5c77 | #!/usr/bin/env python3
"""Insert observation data CLI
This script allows the user to insert observation data into Dataex server.
It can takes as input either csv or excel file containing the observations along with the country id number.
Usage:
$ dataex_insert_obs_data.py --country_id <int> --obs_data <str>
Options:
country_id : int
id number of country
obs_data : str
input csv or excel file
"""
import json
import requests
import click
from yaspin import yaspin
import pandas as pd
from dataexclient import auth_helper
from dataexclient.config import INSERT_OBS_DATA_URL
@click.command()
@click.option('--obs_data', '-obs', required=True, help='filename or path to file with filename')
@click.option('--country_id','-cid', required=True ,type=int, help='id of country')
def main(obs_data, country_id):
try:
file = pd.read_csv(obs_data)
except:
file = pd.read_excel(obs_data, engine='openpyxl')
payload = create_json(file, country_id)
headers = {
'Content-Type': 'application/json',
'Authorization': auth_helper.get_token()
}
with yaspin(text="Inserting...", color="yellow") as spinner:
response = requests.post(INSERT_OBS_DATA_URL, headers=headers, data=json.dumps(payload))
if response.status_code == 200:
data = response.json()
if data['error'] is None:
print(data['message'])
spinner.text = "Done"
spinner.ok("✅")
else:
print(data['error'], data['message'])
spinner.fail("💥 ")
else:
print(response.status_code)
data = response.json()
print(data['error'], data['message'])
spinner.fail("💥 ")
def create_json(file, country_id):
"""Creates a json file
Parameters
----------
file : object
dataframe
country_id: str
Id of country entered by the user
Returns
--------
json
a json file containing rows of observation data
"""
obs_data_json = {}
data = []
row = {}
for i in range(len(file)):
row['station_id'] = int(file.iloc[i]['station_id'])
row['parameter_id'] = int(file.iloc[i]['parameter_id'])
row['level_id'] = int(file.iloc[i]['level_id'])
row['start_time'] = file.iloc[i]['start_time'] + 'Z'
row['end_time'] = file.iloc[i]['end_time'] + 'Z'
row['value'] = int(file.iloc[i]['value'])
data.append(row)
row = {}
obs_data_json['country_id'] = country_id
obs_data_json['data'] = data
return obs_data_json
if __name__=="__main__":
main()
|
py | b401ffd3cd2016b340a8c179c6c8e26947ee413c | """
sphinx.domains.python
~~~~~~~~~~~~~~~~~~~~~
The Python domain.
:copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import warnings
from typing import Any, Dict, Iterable, Iterator, List, Tuple, Type
from typing import cast
from docutils import nodes
from docutils.nodes import Element, Node
from docutils.parsers.rst import directives
from sphinx import addnodes
from sphinx.addnodes import pending_xref, desc_signature
from sphinx.application import Sphinx
from sphinx.builders import Builder
from sphinx.deprecation import RemovedInSphinx40Warning
from sphinx.directives import ObjectDescription
from sphinx.domains import Domain, ObjType, Index, IndexEntry
from sphinx.environment import BuildEnvironment
from sphinx.locale import _, __
from sphinx.roles import XRefRole
from sphinx.util import logging
from sphinx.util.docfields import Field, GroupedField, TypedField
from sphinx.util.docutils import SphinxDirective
from sphinx.util.nodes import make_refnode
from sphinx.util.typing import TextlikeNode
logger = logging.getLogger(__name__)
# REs for Python signatures
py_sig_re = re.compile(
r'''^ ([\w.]*\.)? # class name(s)
(\w+) \s* # thing name
(?: \(\s*(.*)\s*\) # optional: arguments
(?:\s* -> \s* (.*))? # return annotation
)? $ # and nothing more
''', re.VERBOSE)
pairindextypes = {
'module': _('module'),
'keyword': _('keyword'),
'operator': _('operator'),
'object': _('object'),
'exception': _('exception'),
'statement': _('statement'),
'builtin': _('built-in function'),
}
def _pseudo_parse_arglist(signode: desc_signature, arglist: str) -> None:
""""Parse" a list of arguments separated by commas.
Arguments can have "optional" annotations given by enclosing them in
brackets. Currently, this will split at any comma, even if it's inside a
string literal (e.g. default argument value).
"""
paramlist = addnodes.desc_parameterlist()
stack = [paramlist] # type: List[Element]
try:
for argument in arglist.split(','):
argument = argument.strip()
ends_open = ends_close = 0
while argument.startswith('['):
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
argument = argument[1:].strip()
while argument.startswith(']'):
stack.pop()
argument = argument[1:].strip()
while argument.endswith(']') and not argument.endswith('[]'):
ends_close += 1
argument = argument[:-1].strip()
while argument.endswith('['):
ends_open += 1
argument = argument[:-1].strip()
if argument:
stack[-1] += addnodes.desc_parameter(argument, argument)
while ends_open:
stack.append(addnodes.desc_optional())
stack[-2] += stack[-1]
ends_open -= 1
while ends_close:
stack.pop()
ends_close -= 1
if len(stack) != 1:
raise IndexError
except IndexError:
# if there are too few or too many elements on the stack, just give up
# and treat the whole argument list as one argument, discarding the
# already partially populated paramlist node
paramlist = addnodes.desc_parameterlist()
paramlist += addnodes.desc_parameter(arglist, arglist)
signode += paramlist
else:
signode += paramlist
# This override allows our inline type specifiers to behave like :class: link
# when it comes to handling "." and "~" prefixes.
class PyXrefMixin:
def make_xref(self, rolename: str, domain: str, target: str,
innernode: Type[TextlikeNode] = nodes.emphasis,
contnode: Node = None, env: BuildEnvironment = None) -> Node:
result = super().make_xref(rolename, domain, target, # type: ignore
innernode, contnode, env)
result['refspecific'] = True
if target.startswith(('.', '~')):
prefix, result['reftarget'] = target[0], target[1:]
if prefix == '.':
text = target[1:]
elif prefix == '~':
text = target.split('.')[-1]
for node in result.traverse(nodes.Text):
node.parent[node.parent.index(node)] = nodes.Text(text)
break
return result
def make_xrefs(self, rolename: str, domain: str, target: str,
innernode: Type[TextlikeNode] = nodes.emphasis,
contnode: Node = None, env: BuildEnvironment = None) -> List[Node]:
delims = r'(\s*[\[\]\(\),](?:\s*or\s)?\s*|\s+or\s+)'
delims_re = re.compile(delims)
sub_targets = re.split(delims, target)
split_contnode = bool(contnode and contnode.astext() == target)
results = []
for sub_target in filter(None, sub_targets):
if split_contnode:
contnode = nodes.Text(sub_target)
if delims_re.match(sub_target):
results.append(contnode or innernode(sub_target, sub_target))
else:
results.append(self.make_xref(rolename, domain, sub_target,
innernode, contnode, env))
return results
class PyField(PyXrefMixin, Field):
def make_xref(self, rolename: str, domain: str, target: str,
innernode: Type[TextlikeNode] = nodes.emphasis,
contnode: Node = None, env: BuildEnvironment = None) -> Node:
if rolename == 'class' and target == 'None':
# None is not a type, so use obj role instead.
rolename = 'obj'
return super().make_xref(rolename, domain, target, innernode, contnode, env)
class PyGroupedField(PyXrefMixin, GroupedField):
pass
class PyTypedField(PyXrefMixin, TypedField):
def make_xref(self, rolename: str, domain: str, target: str,
innernode: Type[TextlikeNode] = nodes.emphasis,
contnode: Node = None, env: BuildEnvironment = None) -> Node:
if rolename == 'class' and target == 'None':
# None is not a type, so use obj role instead.
rolename = 'obj'
return super().make_xref(rolename, domain, target, innernode, contnode, env)
class PyObject(ObjectDescription):
"""
Description of a general Python object.
:cvar allow_nesting: Class is an object that allows for nested namespaces
:vartype allow_nesting: bool
"""
option_spec = {
'noindex': directives.flag,
'module': directives.unchanged,
'annotation': directives.unchanged,
}
doc_field_types = [
PyTypedField('parameter', label=_('Parameters'),
names=('param', 'parameter', 'arg', 'argument',
'keyword', 'kwarg', 'kwparam'),
typerolename='class', typenames=('paramtype', 'type'),
can_collapse=True),
PyTypedField('variable', label=_('Variables'), rolename='obj',
names=('var', 'ivar', 'cvar'),
typerolename='class', typenames=('vartype',),
can_collapse=True),
PyGroupedField('exceptions', label=_('Raises'), rolename='exc',
names=('raises', 'raise', 'exception', 'except'),
can_collapse=True),
Field('returnvalue', label=_('Returns'), has_arg=False,
names=('returns', 'return')),
PyField('returntype', label=_('Return type'), has_arg=False,
names=('rtype',), bodyrolename='class'),
]
allow_nesting = False
def get_signature_prefix(self, sig: str) -> str:
"""May return a prefix to put before the object name in the
signature.
"""
return ''
def needs_arglist(self) -> bool:
"""May return true if an empty argument list is to be generated even if
the document contains none.
"""
return False
def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]:
"""Transform a Python signature into RST nodes.
Return (fully qualified name of the thing, classname if any).
If inside a class, the current class name is handled intelligently:
* it is stripped from the displayed name if present
* it is added to the full name (return value) if not present
"""
m = py_sig_re.match(sig)
if m is None:
raise ValueError
prefix, name, arglist, retann = m.groups()
# determine module and class name (if applicable), as well as full name
modname = self.options.get('module', self.env.ref_context.get('py:module'))
classname = self.env.ref_context.get('py:class')
if classname:
add_module = False
if prefix and (prefix == classname or
prefix.startswith(classname + ".")):
fullname = prefix + name
# class name is given again in the signature
prefix = prefix[len(classname):].lstrip('.')
elif prefix:
# class name is given in the signature, but different
# (shouldn't happen)
fullname = classname + '.' + prefix + name
else:
# class name is not given in the signature
fullname = classname + '.' + name
else:
add_module = True
if prefix:
classname = prefix.rstrip('.')
fullname = prefix + name
else:
classname = ''
fullname = name
signode['module'] = modname
signode['class'] = classname
signode['fullname'] = fullname
sig_prefix = self.get_signature_prefix(sig)
if sig_prefix:
signode += addnodes.desc_annotation(sig_prefix, sig_prefix)
if prefix:
signode += addnodes.desc_addname(prefix, prefix)
elif add_module and self.env.config.add_module_names:
if modname and modname != 'exceptions':
# exceptions are a special case, since they are documented in the
# 'exceptions' module.
nodetext = modname + '.'
signode += addnodes.desc_addname(nodetext, nodetext)
signode += addnodes.desc_name(name, name)
if arglist:
_pseudo_parse_arglist(signode, arglist)
else:
if self.needs_arglist():
# for callables, add an empty parameter list
signode += addnodes.desc_parameterlist()
if retann:
signode += addnodes.desc_returns(retann, retann)
anno = self.options.get('annotation')
if anno:
signode += addnodes.desc_annotation(' ' + anno, ' ' + anno)
return fullname, prefix
def get_index_text(self, modname: str, name: Tuple[str, str]) -> str:
"""Return the text for the index entry of the object."""
raise NotImplementedError('must be implemented in subclasses')
def add_target_and_index(self, name_cls: Tuple[str, str], sig: str,
signode: desc_signature) -> None:
modname = self.options.get('module', self.env.ref_context.get('py:module'))
fullname = (modname and modname + '.' or '') + name_cls[0]
# note target
if fullname not in self.state.document.ids:
signode['names'].append(fullname)
signode['ids'].append(fullname)
signode['first'] = (not self.names)
self.state.document.note_explicit_target(signode)
domain = cast(PythonDomain, self.env.get_domain('py'))
domain.note_object(fullname, self.objtype,
location=(self.env.docname, self.lineno))
indextext = self.get_index_text(modname, name_cls)
if indextext:
self.indexnode['entries'].append(('single', indextext,
fullname, '', None))
def before_content(self) -> None:
"""Handle object nesting before content
:py:class:`PyObject` represents Python language constructs. For
constructs that are nestable, such as a Python classes, this method will
build up a stack of the nesting heirarchy so that it can be later
de-nested correctly, in :py:meth:`after_content`.
For constructs that aren't nestable, the stack is bypassed, and instead
only the most recent object is tracked. This object prefix name will be
removed with :py:meth:`after_content`.
"""
prefix = None
if self.names:
# fullname and name_prefix come from the `handle_signature` method.
# fullname represents the full object name that is constructed using
# object nesting and explicit prefixes. `name_prefix` is the
# explicit prefix given in a signature
(fullname, name_prefix) = self.names[-1]
if self.allow_nesting:
prefix = fullname
elif name_prefix:
prefix = name_prefix.strip('.')
if prefix:
self.env.ref_context['py:class'] = prefix
if self.allow_nesting:
classes = self.env.ref_context.setdefault('py:classes', [])
classes.append(prefix)
if 'module' in self.options:
modules = self.env.ref_context.setdefault('py:modules', [])
modules.append(self.env.ref_context.get('py:module'))
self.env.ref_context['py:module'] = self.options['module']
def after_content(self) -> None:
"""Handle object de-nesting after content
If this class is a nestable object, removing the last nested class prefix
ends further nesting in the object.
If this class is not a nestable object, the list of classes should not
be altered as we didn't affect the nesting levels in
:py:meth:`before_content`.
"""
classes = self.env.ref_context.setdefault('py:classes', [])
if self.allow_nesting:
try:
classes.pop()
except IndexError:
pass
self.env.ref_context['py:class'] = (classes[-1] if len(classes) > 0
else None)
if 'module' in self.options:
modules = self.env.ref_context.setdefault('py:modules', [])
if modules:
self.env.ref_context['py:module'] = modules.pop()
else:
self.env.ref_context.pop('py:module')
class PyModulelevel(PyObject):
"""
Description of an object on module level (functions, data).
"""
def run(self) -> List[Node]:
warnings.warn('PyClassmember is deprecated.',
RemovedInSphinx40Warning)
return super().run()
def needs_arglist(self) -> bool:
return self.objtype == 'function'
def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str:
if self.objtype == 'function':
if not modname:
return _('%s() (built-in function)') % name_cls[0]
return _('%s() (in module %s)') % (name_cls[0], modname)
elif self.objtype == 'data':
if not modname:
return _('%s (built-in variable)') % name_cls[0]
return _('%s (in module %s)') % (name_cls[0], modname)
else:
return ''
class PyFunction(PyObject):
"""Description of a function."""
option_spec = PyObject.option_spec.copy()
option_spec.update({
'async': directives.flag,
})
def get_signature_prefix(self, sig: str) -> str:
if 'async' in self.options:
return 'async '
else:
return ''
def needs_arglist(self) -> bool:
return True
def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str:
name, cls = name_cls
if modname:
return _('%s() (in module %s)') % (name, modname)
else:
return _('%s() (built-in function)') % name
class PyVariable(PyObject):
"""Description of a variable."""
def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str:
name, cls = name_cls
if modname:
return _('%s (in module %s)') % (name, modname)
else:
return _('%s (built-in variable)') % name
class PyClasslike(PyObject):
"""
Description of a class-like object (classes, interfaces, exceptions).
"""
allow_nesting = True
def get_signature_prefix(self, sig: str) -> str:
return self.objtype + ' '
def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str:
if self.objtype == 'class':
if not modname:
return _('%s (built-in class)') % name_cls[0]
return _('%s (class in %s)') % (name_cls[0], modname)
elif self.objtype == 'exception':
return name_cls[0]
else:
return ''
class PyClassmember(PyObject):
"""
Description of a class member (methods, attributes).
"""
def run(self) -> List[Node]:
warnings.warn('PyClassmember is deprecated.',
RemovedInSphinx40Warning)
return super().run()
def needs_arglist(self) -> bool:
return self.objtype.endswith('method')
def get_signature_prefix(self, sig: str) -> str:
if self.objtype == 'staticmethod':
return 'static '
elif self.objtype == 'classmethod':
return 'classmethod '
return ''
def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str:
name, cls = name_cls
add_modules = self.env.config.add_module_names
if self.objtype == 'method':
try:
clsname, methname = name.rsplit('.', 1)
except ValueError:
if modname:
return _('%s() (in module %s)') % (name, modname)
else:
return '%s()' % name
if modname and add_modules:
return _('%s() (%s.%s method)') % (methname, modname, clsname)
else:
return _('%s() (%s method)') % (methname, clsname)
elif self.objtype == 'staticmethod':
try:
clsname, methname = name.rsplit('.', 1)
except ValueError:
if modname:
return _('%s() (in module %s)') % (name, modname)
else:
return '%s()' % name
if modname and add_modules:
return _('%s() (%s.%s static method)') % (methname, modname,
clsname)
else:
return _('%s() (%s static method)') % (methname, clsname)
elif self.objtype == 'classmethod':
try:
clsname, methname = name.rsplit('.', 1)
except ValueError:
if modname:
return _('%s() (in module %s)') % (name, modname)
else:
return '%s()' % name
if modname:
return _('%s() (%s.%s class method)') % (methname, modname,
clsname)
else:
return _('%s() (%s class method)') % (methname, clsname)
elif self.objtype == 'attribute':
try:
clsname, attrname = name.rsplit('.', 1)
except ValueError:
if modname:
return _('%s (in module %s)') % (name, modname)
else:
return name
if modname and add_modules:
return _('%s (%s.%s attribute)') % (attrname, modname, clsname)
else:
return _('%s (%s attribute)') % (attrname, clsname)
else:
return ''
class PyMethod(PyObject):
"""Description of a method."""
option_spec = PyObject.option_spec.copy()
option_spec.update({
'abstractmethod': directives.flag,
'async': directives.flag,
'classmethod': directives.flag,
'property': directives.flag,
'staticmethod': directives.flag,
})
def needs_arglist(self) -> bool:
if 'property' in self.options:
return False
else:
return True
def get_signature_prefix(self, sig: str) -> str:
prefix = []
if 'abstractmethod' in self.options:
prefix.append('abstract')
if 'async' in self.options:
prefix.append('async')
if 'classmethod' in self.options:
prefix.append('classmethod')
if 'property' in self.options:
prefix.append('property')
if 'staticmethod' in self.options:
prefix.append('static')
if prefix:
return ' '.join(prefix) + ' '
else:
return ''
def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str:
name, cls = name_cls
try:
clsname, methname = name.rsplit('.', 1)
if modname and self.env.config.add_module_names:
clsname = '.'.join([modname, clsname])
except ValueError:
if modname:
return _('%s() (in module %s)') % (name, modname)
else:
return '%s()' % name
if 'classmethod' in self.options:
return _('%s() (%s class method)') % (methname, clsname)
elif 'property' in self.options:
return _('%s() (%s property)') % (methname, clsname)
elif 'staticmethod' in self.options:
return _('%s() (%s static method)') % (methname, clsname)
else:
return _('%s() (%s method)') % (methname, clsname)
class PyClassMethod(PyMethod):
"""Description of a classmethod."""
option_spec = PyObject.option_spec.copy()
def run(self) -> List[Node]:
self.name = 'py:method'
self.options['classmethod'] = True
return super().run()
class PyStaticMethod(PyMethod):
"""Description of a staticmethod."""
option_spec = PyObject.option_spec.copy()
def run(self) -> List[Node]:
self.name = 'py:method'
self.options['staticmethod'] = True
return super().run()
class PyAttribute(PyObject):
"""Description of an attribute."""
def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str:
name, cls = name_cls
try:
clsname, attrname = name.rsplit('.', 1)
if modname and self.env.config.add_module_names:
clsname = '.'.join([modname, clsname])
except ValueError:
if modname:
return _('%s (in module %s)') % (name, modname)
else:
return name
return _('%s (%s attribute)') % (attrname, clsname)
class PyDecoratorMixin:
"""
Mixin for decorator directives.
"""
def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]:
ret = super().handle_signature(sig, signode) # type: ignore
signode.insert(0, addnodes.desc_addname('@', '@'))
return ret
def needs_arglist(self) -> bool:
return False
class PyDecoratorFunction(PyDecoratorMixin, PyModulelevel):
"""
Directive to mark functions meant to be used as decorators.
"""
def run(self) -> List[Node]:
# a decorator function is a function after all
self.name = 'py:function'
return super().run()
class PyDecoratorMethod(PyDecoratorMixin, PyClassmember):
"""
Directive to mark methods meant to be used as decorators.
"""
def run(self) -> List[Node]:
self.name = 'py:method'
return super().run()
class PyModule(SphinxDirective):
"""
Directive to mark description of a new module.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'platform': lambda x: x,
'synopsis': lambda x: x,
'noindex': directives.flag,
'deprecated': directives.flag,
}
def run(self) -> List[Node]:
domain = cast(PythonDomain, self.env.get_domain('py'))
modname = self.arguments[0].strip()
noindex = 'noindex' in self.options
self.env.ref_context['py:module'] = modname
ret = [] # type: List[Node]
if not noindex:
# note module to the domain
domain.note_module(modname,
self.options.get('synopsis', ''),
self.options.get('platform', ''),
'deprecated' in self.options)
domain.note_object(modname, 'module', location=(self.env.docname, self.lineno))
targetnode = nodes.target('', '', ids=['module-' + modname],
ismod=True)
self.state.document.note_explicit_target(targetnode)
# the platform and synopsis aren't printed; in fact, they are only
# used in the modindex currently
ret.append(targetnode)
indextext = _('%s (module)') % modname
inode = addnodes.index(entries=[('single', indextext,
'module-' + modname, '', None)])
ret.append(inode)
return ret
class PyCurrentModule(SphinxDirective):
"""
This directive is just to tell Sphinx that we're documenting
stuff in module foo, but links to module foo won't lead here.
"""
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {} # type: Dict
def run(self) -> List[Node]:
modname = self.arguments[0].strip()
if modname == 'None':
self.env.ref_context.pop('py:module', None)
else:
self.env.ref_context['py:module'] = modname
return []
class PyXRefRole(XRefRole):
def process_link(self, env: BuildEnvironment, refnode: Element,
has_explicit_title: bool, title: str, target: str) -> Tuple[str, str]:
refnode['py:module'] = env.ref_context.get('py:module')
refnode['py:class'] = env.ref_context.get('py:class')
if not has_explicit_title:
title = title.lstrip('.') # only has a meaning for the target
target = target.lstrip('~') # only has a meaning for the title
# if the first character is a tilde, don't display the module/class
# parts of the contents
if title[0:1] == '~':
title = title[1:]
dot = title.rfind('.')
if dot != -1:
title = title[dot + 1:]
# if the first character is a dot, search more specific namespaces first
# else search builtins first
if target[0:1] == '.':
target = target[1:]
refnode['refspecific'] = True
return title, target
class PythonModuleIndex(Index):
"""
Index subclass to provide the Python module index.
"""
name = 'modindex'
localname = _('Python Module Index')
shortname = _('modules')
def generate(self, docnames: Iterable[str] = None
) -> Tuple[List[Tuple[str, List[IndexEntry]]], bool]:
content = {} # type: Dict[str, List[IndexEntry]]
# list of prefixes to ignore
ignores = None # type: List[str]
ignores = self.domain.env.config['modindex_common_prefix'] # type: ignore
ignores = sorted(ignores, key=len, reverse=True)
# list of all modules, sorted by module name
modules = sorted(self.domain.data['modules'].items(),
key=lambda x: x[0].lower())
# sort out collapsable modules
prev_modname = ''
num_toplevels = 0
for modname, (docname, synopsis, platforms, deprecated) in modules:
if docnames and docname not in docnames:
continue
for ignore in ignores:
if modname.startswith(ignore):
modname = modname[len(ignore):]
stripped = ignore
break
else:
stripped = ''
# we stripped the whole module name?
if not modname:
modname, stripped = stripped, ''
entries = content.setdefault(modname[0].lower(), [])
package = modname.split('.')[0]
if package != modname:
# it's a submodule
if prev_modname == package:
# first submodule - make parent a group head
if entries:
last = entries[-1]
entries[-1] = IndexEntry(last[0], 1, last[2], last[3],
last[4], last[5], last[6])
elif not prev_modname.startswith(package):
# submodule without parent in list, add dummy entry
entries.append(IndexEntry(stripped + package, 1, '', '', '', '', ''))
subtype = 2
else:
num_toplevels += 1
subtype = 0
qualifier = deprecated and _('Deprecated') or ''
entries.append(IndexEntry(stripped + modname, subtype, docname,
'module-' + stripped + modname, platforms,
qualifier, synopsis))
prev_modname = modname
# apply heuristics when to collapse modindex at page load:
# only collapse if number of toplevel modules is larger than
# number of submodules
collapse = len(modules) - num_toplevels < num_toplevels
# sort by first letter
sorted_content = sorted(content.items())
return sorted_content, collapse
class PythonDomain(Domain):
"""Python language domain."""
name = 'py'
label = 'Python'
object_types = {
'function': ObjType(_('function'), 'func', 'obj'),
'data': ObjType(_('data'), 'data', 'obj'),
'class': ObjType(_('class'), 'class', 'exc', 'obj'),
'exception': ObjType(_('exception'), 'exc', 'class', 'obj'),
'method': ObjType(_('method'), 'meth', 'obj'),
'classmethod': ObjType(_('class method'), 'meth', 'obj'),
'staticmethod': ObjType(_('static method'), 'meth', 'obj'),
'attribute': ObjType(_('attribute'), 'attr', 'obj'),
'module': ObjType(_('module'), 'mod', 'obj'),
} # type: Dict[str, ObjType]
directives = {
'function': PyFunction,
'data': PyVariable,
'class': PyClasslike,
'exception': PyClasslike,
'method': PyMethod,
'classmethod': PyClassMethod,
'staticmethod': PyStaticMethod,
'attribute': PyAttribute,
'module': PyModule,
'currentmodule': PyCurrentModule,
'decorator': PyDecoratorFunction,
'decoratormethod': PyDecoratorMethod,
}
roles = {
'data': PyXRefRole(),
'exc': PyXRefRole(),
'func': PyXRefRole(fix_parens=True),
'class': PyXRefRole(),
'const': PyXRefRole(),
'attr': PyXRefRole(),
'meth': PyXRefRole(fix_parens=True),
'mod': PyXRefRole(),
'obj': PyXRefRole(),
}
initial_data = {
'objects': {}, # fullname -> docname, objtype
'modules': {}, # modname -> docname, synopsis, platform, deprecated
} # type: Dict[str, Dict[str, Tuple[Any]]]
indices = [
PythonModuleIndex,
]
@property
def objects(self) -> Dict[str, Tuple[str, str]]:
return self.data.setdefault('objects', {}) # fullname -> docname, objtype
def note_object(self, name: str, objtype: str, location: Any = None) -> None:
"""Note a python object for cross reference.
.. versionadded:: 2.1
"""
if name in self.objects:
docname = self.objects[name][0]
logger.warning(__('duplicate object description of %s, '
'other instance in %s, use :noindex: for one of them'),
name, docname, location=location)
self.objects[name] = (self.env.docname, objtype)
@property
def modules(self) -> Dict[str, Tuple[str, str, str, bool]]:
return self.data.setdefault('modules', {}) # modname -> docname, synopsis, platform, deprecated # NOQA
def note_module(self, name: str, synopsis: str, platform: str, deprecated: bool) -> None:
"""Note a python module for cross reference.
.. versionadded:: 2.1
"""
self.modules[name] = (self.env.docname, synopsis, platform, deprecated)
def clear_doc(self, docname: str) -> None:
for fullname, (fn, _l) in list(self.objects.items()):
if fn == docname:
del self.objects[fullname]
for modname, (fn, _x, _x, _y) in list(self.modules.items()):
if fn == docname:
del self.modules[modname]
def merge_domaindata(self, docnames: List[str], otherdata: Dict) -> None:
# XXX check duplicates?
for fullname, (fn, objtype) in otherdata['objects'].items():
if fn in docnames:
self.objects[fullname] = (fn, objtype)
for modname, data in otherdata['modules'].items():
if data[0] in docnames:
self.modules[modname] = data
def find_obj(self, env: BuildEnvironment, modname: str, classname: str,
name: str, type: str, searchmode: int = 0) -> List[Tuple[str, Any]]:
"""Find a Python object for "name", perhaps using the given module
and/or classname. Returns a list of (name, object entry) tuples.
"""
# skip parens
if name[-2:] == '()':
name = name[:-2]
if not name:
return []
matches = [] # type: List[Tuple[str, Any]]
newname = None
if searchmode == 1:
if type is None:
objtypes = list(self.object_types)
else:
objtypes = self.objtypes_for_role(type)
if objtypes is not None:
if modname and classname:
fullname = modname + '.' + classname + '.' + name
if fullname in self.objects and self.objects[fullname][1] in objtypes:
newname = fullname
if not newname:
if modname and modname + '.' + name in self.objects and \
self.objects[modname + '.' + name][1] in objtypes:
newname = modname + '.' + name
elif name in self.objects and self.objects[name][1] in objtypes:
newname = name
else:
# "fuzzy" searching mode
searchname = '.' + name
matches = [(oname, self.objects[oname]) for oname in self.objects
if oname.endswith(searchname) and
self.objects[oname][1] in objtypes]
else:
# NOTE: searching for exact match, object type is not considered
if name in self.objects:
newname = name
elif type == 'mod':
# only exact matches allowed for modules
return []
elif classname and classname + '.' + name in self.objects:
newname = classname + '.' + name
elif modname and modname + '.' + name in self.objects:
newname = modname + '.' + name
elif modname and classname and \
modname + '.' + classname + '.' + name in self.objects:
newname = modname + '.' + classname + '.' + name
# special case: builtin exceptions have module "exceptions" set
elif type == 'exc' and '.' not in name and \
'exceptions.' + name in self.objects:
newname = 'exceptions.' + name
# special case: object methods
elif type in ('func', 'meth') and '.' not in name and \
'object.' + name in self.objects:
newname = 'object.' + name
if newname is not None:
matches.append((newname, self.objects[newname]))
return matches
def resolve_xref(self, env: BuildEnvironment, fromdocname: str, builder: Builder,
type: str, target: str, node: pending_xref, contnode: Element
) -> Element:
modname = node.get('py:module')
clsname = node.get('py:class')
searchmode = node.hasattr('refspecific') and 1 or 0
matches = self.find_obj(env, modname, clsname, target,
type, searchmode)
if not matches:
return None
elif len(matches) > 1:
logger.warning(__('more than one target found for cross-reference %r: %s'),
target, ', '.join(match[0] for match in matches),
type='ref', subtype='python', location=node)
name, obj = matches[0]
if obj[1] == 'module':
return self._make_module_refnode(builder, fromdocname, name, contnode)
else:
return make_refnode(builder, fromdocname, obj[0], name, contnode, name)
def resolve_any_xref(self, env: BuildEnvironment, fromdocname: str, builder: Builder,
target: str, node: pending_xref, contnode: Element
) -> List[Tuple[str, Element]]:
modname = node.get('py:module')
clsname = node.get('py:class')
results = [] # type: List[Tuple[str, Element]]
# always search in "refspecific" mode with the :any: role
matches = self.find_obj(env, modname, clsname, target, None, 1)
for name, obj in matches:
if obj[1] == 'module':
results.append(('py:mod',
self._make_module_refnode(builder, fromdocname,
name, contnode)))
else:
results.append(('py:' + self.role_for_objtype(obj[1]),
make_refnode(builder, fromdocname, obj[0], name,
contnode, name)))
return results
def _make_module_refnode(self, builder: Builder, fromdocname: str, name: str,
contnode: Node) -> Element:
# get additional info for modules
docname, synopsis, platform, deprecated = self.modules[name]
title = name
if synopsis:
title += ': ' + synopsis
if deprecated:
title += _(' (deprecated)')
if platform:
title += ' (' + platform + ')'
return make_refnode(builder, fromdocname, docname,
'module-' + name, contnode, title)
def get_objects(self) -> Iterator[Tuple[str, str, str, str, str, int]]:
for modname, info in self.modules.items():
yield (modname, modname, 'module', info[0], 'module-' + modname, 0)
for refname, (docname, type) in self.objects.items():
if type != 'module': # modules are already handled
yield (refname, refname, type, docname, refname, 1)
def get_full_qualified_name(self, node: Element) -> str:
modname = node.get('py:module')
clsname = node.get('py:class')
target = node.get('reftarget')
if target is None:
return None
else:
return '.'.join(filter(None, [modname, clsname, target]))
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_domain(PythonDomain)
return {
'version': 'builtin',
'env_version': 1,
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
py | b40201f4034c86e6fdede8f6220a20e413847f2b | # -*- coding: utf-8 -*-
"""
pygments.lexers.css
~~~~~~~~~~~~~~~~~~~
Lexers for CSS and related stylesheet formats.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import copy
from pygments.lexer import ExtendedRegexLexer, RegexLexer, include, bygroups, \
default, words, inherit
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.util import iteritems
__all__ = ['CssLexer', 'SassLexer', 'ScssLexer', 'LessCssLexer']
class CssLexer(RegexLexer):
"""
For CSS (Cascading Style Sheets).
"""
name = 'CSS'
aliases = ['css']
filenames = ['*.css']
mimetypes = ['text/css']
tokens = {
'root': [
include('basics'),
],
'basics': [
(r'\s+', Text),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'\{', Punctuation, 'content'),
(r'\:[\w-]+', Name.Decorator),
(r'\.[\w-]+', Name.Class),
(r'\#[\w-]+', Name.Namespace),
(r'@[\w-]+', Keyword, 'atrule'),
(r'[\w-]+', Name.Tag),
(r'[~^*!%&$\[\]()<>|+=@:;,./?-]', Operator),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single)
],
'atrule': [
(r'\{', Punctuation, 'atcontent'),
(r';', Punctuation, '#pop'),
include('basics'),
],
'atcontent': [
include('basics'),
(r'\}', Punctuation, '#pop:2'),
],
'content': [
(r'\s+', Text),
(r'\}', Punctuation, '#pop'),
(r'url\(.*?\)', String.Other),
(r'^@.*?$', Comment.Preproc),
(words((
'azimuth', 'background-attachment', 'background-color',
'background-image', 'background-position', 'background-repeat',
'background', 'border-bottom-color', 'border-bottom-style',
'border-bottom-width', 'border-left-color', 'border-left-style',
'border-left-width', 'border-right', 'border-right-color',
'border-right-style', 'border-right-width', 'border-top-color',
'border-top-style', 'border-top-width', 'border-bottom',
'border-collapse', 'border-left', 'border-width', 'border-color',
'border-spacing', 'border-style', 'border-top', 'border', 'caption-side',
'clear', 'clip', 'color', 'content', 'counter-increment', 'counter-reset',
'cue-after', 'cue-before', 'cue', 'cursor', 'direction', 'display',
'elevation', 'empty-cells', 'float', 'font-family', 'font-size',
'font-size-adjust', 'font-stretch', 'font-style', 'font-variant',
'font-weight', 'font', 'height', 'letter-spacing', 'line-height',
'list-style-type', 'list-style-image', 'list-style-position',
'list-style', 'margin-bottom', 'margin-left', 'margin-right',
'margin-top', 'margin', 'marker-offset', 'marks', 'max-height', 'max-width',
'min-height', 'min-width', 'opacity', 'orphans', 'outline-color',
'outline-style', 'outline-width', 'outline', 'overflow', 'overflow-x',
'overflow-y', 'padding-bottom', 'padding-left', 'padding-right', 'padding-top',
'padding', 'page', 'page-break-after', 'page-break-before', 'page-break-inside',
'pause-after', 'pause-before', 'pause', 'pitch-range', 'pitch',
'play-during', 'position', 'quotes', 'richness', 'right', 'size',
'speak-header', 'speak-numeral', 'speak-punctuation', 'speak',
'speech-rate', 'stress', 'table-layout', 'text-align', 'text-decoration',
'text-indent', 'text-shadow', 'text-transform', 'top', 'unicode-bidi',
'vertical-align', 'visibility', 'voice-family', 'volume', 'white-space',
'widows', 'width', 'word-spacing', 'z-index', 'bottom',
'above', 'absolute', 'always', 'armenian', 'aural', 'auto', 'avoid', 'baseline',
'behind', 'below', 'bidi-override', 'blink', 'block', 'bolder', 'bold', 'both',
'capitalize', 'center-left', 'center-right', 'center', 'circle',
'cjk-ideographic', 'close-quote', 'collapse', 'condensed', 'continuous',
'crop', 'crosshair', 'cross', 'cursive', 'dashed', 'decimal-leading-zero',
'decimal', 'default', 'digits', 'disc', 'dotted', 'double', 'e-resize', 'embed',
'extra-condensed', 'extra-expanded', 'expanded', 'fantasy', 'far-left',
'far-right', 'faster', 'fast', 'fixed', 'georgian', 'groove', 'hebrew', 'help',
'hidden', 'hide', 'higher', 'high', 'hiragana-iroha', 'hiragana', 'icon',
'inherit', 'inline-table', 'inline', 'inset', 'inside', 'invert', 'italic',
'justify', 'katakana-iroha', 'katakana', 'landscape', 'larger', 'large',
'left-side', 'leftwards', 'left', 'level', 'lighter', 'line-through', 'list-item',
'loud', 'lower-alpha', 'lower-greek', 'lower-roman', 'lowercase', 'ltr',
'lower', 'low', 'medium', 'message-box', 'middle', 'mix', 'monospace',
'n-resize', 'narrower', 'ne-resize', 'no-close-quote', 'no-open-quote',
'no-repeat', 'none', 'normal', 'nowrap', 'nw-resize', 'oblique', 'once',
'open-quote', 'outset', 'outside', 'overline', 'pointer', 'portrait', 'px',
'relative', 'repeat-x', 'repeat-y', 'repeat', 'rgb', 'ridge', 'right-side',
'rightwards', 's-resize', 'sans-serif', 'scroll', 'se-resize',
'semi-condensed', 'semi-expanded', 'separate', 'serif', 'show', 'silent',
'slower', 'slow', 'small-caps', 'small-caption', 'smaller', 'soft', 'solid',
'spell-out', 'square', 'static', 'status-bar', 'super', 'sw-resize',
'table-caption', 'table-cell', 'table-column', 'table-column-group',
'table-footer-group', 'table-header-group', 'table-row',
'table-row-group', 'text-bottom', 'text-top', 'text', 'thick', 'thin',
'transparent', 'ultra-condensed', 'ultra-expanded', 'underline',
'upper-alpha', 'upper-latin', 'upper-roman', 'uppercase', 'url',
'visible', 'w-resize', 'wait', 'wider', 'x-fast', 'x-high', 'x-large', 'x-loud',
'x-low', 'x-small', 'x-soft', 'xx-large', 'xx-small', 'yes'), suffix=r'\b'),
Name.Builtin),
(words((
'indigo', 'gold', 'firebrick', 'indianred', 'yellow', 'darkolivegreen',
'darkseagreen', 'mediumvioletred', 'mediumorchid', 'chartreuse',
'mediumslateblue', 'black', 'springgreen', 'crimson', 'lightsalmon', 'brown',
'turquoise', 'olivedrab', 'cyan', 'silver', 'skyblue', 'gray', 'darkturquoise',
'goldenrod', 'darkgreen', 'darkviolet', 'darkgray', 'lightpink', 'teal',
'darkmagenta', 'lightgoldenrodyellow', 'lavender', 'yellowgreen', 'thistle',
'violet', 'navy', 'orchid', 'blue', 'ghostwhite', 'honeydew', 'cornflowerblue',
'darkblue', 'darkkhaki', 'mediumpurple', 'cornsilk', 'red', 'bisque', 'slategray',
'darkcyan', 'khaki', 'wheat', 'deepskyblue', 'darkred', 'steelblue', 'aliceblue',
'gainsboro', 'mediumturquoise', 'floralwhite', 'coral', 'purple', 'lightgrey',
'lightcyan', 'darksalmon', 'beige', 'azure', 'lightsteelblue', 'oldlace',
'greenyellow', 'royalblue', 'lightseagreen', 'mistyrose', 'sienna',
'lightcoral', 'orangered', 'navajowhite', 'lime', 'palegreen', 'burlywood',
'seashell', 'mediumspringgreen', 'fuchsia', 'papayawhip', 'blanchedalmond',
'peru', 'aquamarine', 'white', 'darkslategray', 'ivory', 'dodgerblue',
'lemonchiffon', 'chocolate', 'orange', 'forestgreen', 'slateblue', 'olive',
'mintcream', 'antiquewhite', 'darkorange', 'cadetblue', 'moccasin',
'limegreen', 'saddlebrown', 'darkslateblue', 'lightskyblue', 'deeppink',
'plum', 'aqua', 'darkgoldenrod', 'maroon', 'sandybrown', 'magenta', 'tan',
'rosybrown', 'pink', 'lightblue', 'palevioletred', 'mediumseagreen',
'dimgray', 'powderblue', 'seagreen', 'snow', 'mediumblue', 'midnightblue',
'paleturquoise', 'palegoldenrod', 'whitesmoke', 'darkorchid', 'salmon',
'lightslategray', 'lawngreen', 'lightgreen', 'tomato', 'hotpink',
'lightyellow', 'lavenderblush', 'linen', 'mediumaquamarine', 'green',
'blueviolet', 'peachpuff'), suffix=r'\b'),
Name.Builtin),
(r'\!important', Comment.Preproc),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'\#[a-zA-Z0-9]{1,6}', Number),
(r'[.-]?[0-9]*[.]?[0-9]+(em|px|pt|pc|in|mm|cm|ex|s)\b', Number),
# Separate regex for percentages, as can't do word boundaries with %
(r'[.-]?[0-9]*[.]?[0-9]+%', Number),
(r'-?[0-9]+', Number),
(r'[~^*!%&<>|+=@:,./?-]+', Operator),
(r'[\[\]();]+', Punctuation),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[a-zA-Z_]\w*', Name)
]
}
common_sass_tokens = {
'value': [
(r'[ \t]+', Text),
(r'[!$][\w-]+', Name.Variable),
(r'url\(', String.Other, 'string-url'),
(r'[a-z_-][\w-]*(?=\()', Name.Function),
(words((
'azimuth', 'background-attachment', 'background-color',
'background-image', 'background-position', 'background-repeat',
'background', 'border-bottom-color', 'border-bottom-style',
'border-bottom-width', 'border-left-color', 'border-left-style',
'border-left-width', 'border-right', 'border-right-color',
'border-right-style', 'border-right-width', 'border-top-color',
'border-top-style', 'border-top-width', 'border-bottom',
'border-collapse', 'border-left', 'border-width', 'border-color',
'border-spacing', 'border-style', 'border-top', 'border', 'caption-side',
'clear', 'clip', 'color', 'content', 'counter-increment', 'counter-reset',
'cue-after', 'cue-before', 'cue', 'cursor', 'direction', 'display',
'elevation', 'empty-cells', 'float', 'font-family', 'font-size',
'font-size-adjust', 'font-stretch', 'font-style', 'font-variant',
'font-weight', 'font', 'height', 'letter-spacing', 'line-height',
'list-style-type', 'list-style-image', 'list-style-position',
'list-style', 'margin-bottom', 'margin-left', 'margin-right',
'margin-top', 'margin', 'marker-offset', 'marks', 'max-height', 'max-width',
'min-height', 'min-width', 'opacity', 'orphans', 'outline', 'outline-color',
'outline-style', 'outline-width', 'overflow', 'padding-bottom',
'padding-left', 'padding-right', 'padding-top', 'padding', 'page',
'page-break-after', 'page-break-before', 'page-break-inside',
'pause-after', 'pause-before', 'pause', 'pitch', 'pitch-range',
'play-during', 'position', 'quotes', 'richness', 'right', 'size',
'speak-header', 'speak-numeral', 'speak-punctuation', 'speak',
'speech-rate', 'stress', 'table-layout', 'text-align', 'text-decoration',
'text-indent', 'text-shadow', 'text-transform', 'top', 'unicode-bidi',
'vertical-align', 'visibility', 'voice-family', 'volume', 'white-space',
'widows', 'width', 'word-spacing', 'z-index', 'bottom', 'left',
'above', 'absolute', 'always', 'armenian', 'aural', 'auto', 'avoid', 'baseline',
'behind', 'below', 'bidi-override', 'blink', 'block', 'bold', 'bolder', 'both',
'capitalize', 'center-left', 'center-right', 'center', 'circle',
'cjk-ideographic', 'close-quote', 'collapse', 'condensed', 'continuous',
'crop', 'crosshair', 'cross', 'cursive', 'dashed', 'decimal-leading-zero',
'decimal', 'default', 'digits', 'disc', 'dotted', 'double', 'e-resize', 'embed',
'extra-condensed', 'extra-expanded', 'expanded', 'fantasy', 'far-left',
'far-right', 'faster', 'fast', 'fixed', 'georgian', 'groove', 'hebrew', 'help',
'hidden', 'hide', 'higher', 'high', 'hiragana-iroha', 'hiragana', 'icon',
'inherit', 'inline-table', 'inline', 'inset', 'inside', 'invert', 'italic',
'justify', 'katakana-iroha', 'katakana', 'landscape', 'larger', 'large',
'left-side', 'leftwards', 'level', 'lighter', 'line-through', 'list-item',
'loud', 'lower-alpha', 'lower-greek', 'lower-roman', 'lowercase', 'ltr',
'lower', 'low', 'medium', 'message-box', 'middle', 'mix', 'monospace',
'n-resize', 'narrower', 'ne-resize', 'no-close-quote', 'no-open-quote',
'no-repeat', 'none', 'normal', 'nowrap', 'nw-resize', 'oblique', 'once',
'open-quote', 'outset', 'outside', 'overline', 'pointer', 'portrait', 'px',
'relative', 'repeat-x', 'repeat-y', 'repeat', 'rgb', 'ridge', 'right-side',
'rightwards', 's-resize', 'sans-serif', 'scroll', 'se-resize',
'semi-condensed', 'semi-expanded', 'separate', 'serif', 'show', 'silent',
'slow', 'slower', 'small-caps', 'small-caption', 'smaller', 'soft', 'solid',
'spell-out', 'square', 'static', 'status-bar', 'super', 'sw-resize',
'table-caption', 'table-cell', 'table-column', 'table-column-group',
'table-footer-group', 'table-header-group', 'table-row',
'table-row-group', 'text', 'text-bottom', 'text-top', 'thick', 'thin',
'transparent', 'ultra-condensed', 'ultra-expanded', 'underline',
'upper-alpha', 'upper-latin', 'upper-roman', 'uppercase', 'url',
'visible', 'w-resize', 'wait', 'wider', 'x-fast', 'x-high', 'x-large', 'x-loud',
'x-low', 'x-small', 'x-soft', 'xx-large', 'xx-small', 'yes'), suffix=r'\b'),
Name.Constant),
(words((
'indigo', 'gold', 'firebrick', 'indianred', 'darkolivegreen',
'darkseagreen', 'mediumvioletred', 'mediumorchid', 'chartreuse',
'mediumslateblue', 'springgreen', 'crimson', 'lightsalmon', 'brown',
'turquoise', 'olivedrab', 'cyan', 'skyblue', 'darkturquoise',
'goldenrod', 'darkgreen', 'darkviolet', 'darkgray', 'lightpink',
'darkmagenta', 'lightgoldenrodyellow', 'lavender', 'yellowgreen', 'thistle',
'violet', 'orchid', 'ghostwhite', 'honeydew', 'cornflowerblue',
'darkblue', 'darkkhaki', 'mediumpurple', 'cornsilk', 'bisque', 'slategray',
'darkcyan', 'khaki', 'wheat', 'deepskyblue', 'darkred', 'steelblue', 'aliceblue',
'gainsboro', 'mediumturquoise', 'floralwhite', 'coral', 'lightgrey',
'lightcyan', 'darksalmon', 'beige', 'azure', 'lightsteelblue', 'oldlace',
'greenyellow', 'royalblue', 'lightseagreen', 'mistyrose', 'sienna',
'lightcoral', 'orangered', 'navajowhite', 'palegreen', 'burlywood',
'seashell', 'mediumspringgreen', 'papayawhip', 'blanchedalmond',
'peru', 'aquamarine', 'darkslategray', 'ivory', 'dodgerblue',
'lemonchiffon', 'chocolate', 'orange', 'forestgreen', 'slateblue',
'mintcream', 'antiquewhite', 'darkorange', 'cadetblue', 'moccasin',
'limegreen', 'saddlebrown', 'darkslateblue', 'lightskyblue', 'deeppink',
'plum', 'darkgoldenrod', 'sandybrown', 'magenta', 'tan',
'rosybrown', 'pink', 'lightblue', 'palevioletred', 'mediumseagreen',
'dimgray', 'powderblue', 'seagreen', 'snow', 'mediumblue', 'midnightblue',
'paleturquoise', 'palegoldenrod', 'whitesmoke', 'darkorchid', 'salmon',
'lightslategray', 'lawngreen', 'lightgreen', 'tomato', 'hotpink',
'lightyellow', 'lavenderblush', 'linen', 'mediumaquamarine',
'blueviolet', 'peachpuff'), suffix=r'\b'),
Name.Entity),
(words((
'black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green',
'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua'), suffix=r'\b'),
Name.Builtin),
(r'\!(important|default)', Name.Exception),
(r'(true|false)', Name.Pseudo),
(r'(and|or|not)', Operator.Word),
(r'/\*', Comment.Multiline, 'inline-comment'),
(r'//[^\n]*', Comment.Single),
(r'\#[a-z0-9]{1,6}', Number.Hex),
(r'(-?\d+)(\%|[a-z]+)?', bygroups(Number.Integer, Keyword.Type)),
(r'(-?\d*\.\d+)(\%|[a-z]+)?', bygroups(Number.Float, Keyword.Type)),
(r'#\{', String.Interpol, 'interpolation'),
(r'[~^*!&%<>|+=@:,./?-]+', Operator),
(r'[\[\]()]+', Punctuation),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
(r'[a-z_-][\w-]*', Name),
],
'interpolation': [
(r'\}', String.Interpol, '#pop'),
include('value'),
],
'selector': [
(r'[ \t]+', Text),
(r'\:', Name.Decorator, 'pseudo-class'),
(r'\.', Name.Class, 'class'),
(r'\#', Name.Namespace, 'id'),
(r'[\w-]+', Name.Tag),
(r'#\{', String.Interpol, 'interpolation'),
(r'&', Keyword),
(r'[~^*!&\[\]()<>|+=@:;,./?-]', Operator),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
],
'string-double': [
(r'(\\.|#(?=[^\n{])|[^\n"#])+', String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r'"', String.Double, '#pop'),
],
'string-single': [
(r"(\\.|#(?=[^\n{])|[^\n'#])+", String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r"'", String.Double, '#pop'),
],
'string-url': [
(r'(\\#|#(?=[^\n{])|[^\n#)])+', String.Other),
(r'#\{', String.Interpol, 'interpolation'),
(r'\)', String.Other, '#pop'),
],
'pseudo-class': [
(r'[\w-]+', Name.Decorator),
(r'#\{', String.Interpol, 'interpolation'),
default('#pop'),
],
'class': [
(r'[\w-]+', Name.Class),
(r'#\{', String.Interpol, 'interpolation'),
default('#pop'),
],
'id': [
(r'[\w-]+', Name.Namespace),
(r'#\{', String.Interpol, 'interpolation'),
default('#pop'),
],
'for': [
(r'(from|to|through)', Operator.Word),
include('value'),
],
}
def _indentation(lexer, match, ctx):
indentation = match.group(0)
yield match.start(), Text, indentation
ctx.last_indentation = indentation
ctx.pos = match.end()
if hasattr(ctx, 'block_state') and ctx.block_state and \
indentation.startswith(ctx.block_indentation) and \
indentation != ctx.block_indentation:
ctx.stack.append(ctx.block_state)
else:
ctx.block_state = None
ctx.block_indentation = None
ctx.stack.append('content')
def _starts_block(token, state):
def callback(lexer, match, ctx):
yield match.start(), token, match.group(0)
if hasattr(ctx, 'last_indentation'):
ctx.block_indentation = ctx.last_indentation
else:
ctx.block_indentation = ''
ctx.block_state = state
ctx.pos = match.end()
return callback
class SassLexer(ExtendedRegexLexer):
"""
For Sass stylesheets.
.. versionadded:: 1.3
"""
name = 'Sass'
aliases = ['sass']
filenames = ['*.sass']
mimetypes = ['text/x-sass']
flags = re.IGNORECASE | re.MULTILINE
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'content': [
(r'//[^\n]*', _starts_block(Comment.Single, 'single-comment'),
'root'),
(r'/\*[^\n]*', _starts_block(Comment.Multiline, 'multi-comment'),
'root'),
(r'@import', Keyword, 'import'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'@[\w-]+', Keyword, 'selector'),
(r'=[\w-]+', Name.Function, 'value'),
(r'\+[\w-]+', Name.Decorator, 'value'),
(r'([!$][\w-]\w*)([ \t]*(?:(?:\|\|)?=|:))',
bygroups(Name.Variable, Operator), 'value'),
(r':', Name.Attribute, 'old-style-attr'),
(r'(?=.+?[=:]([^a-z]|$))', Name.Attribute, 'new-style-attr'),
default('selector'),
],
'single-comment': [
(r'.+', Comment.Single),
(r'\n', Text, 'root'),
],
'multi-comment': [
(r'.+', Comment.Multiline),
(r'\n', Text, 'root'),
],
'import': [
(r'[ \t]+', Text),
(r'\S+', String),
(r'\n', Text, 'root'),
],
'old-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#\{', String.Interpol, 'interpolation'),
(r'[ \t]*=', Operator, 'value'),
default('value'),
],
'new-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#\{', String.Interpol, 'interpolation'),
(r'[ \t]*[=:]', Operator, 'value'),
],
'inline-comment': [
(r"(\\#|#(?=[^\n{])|\*(?=[^\n/])|[^\n#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in iteritems(common_sass_tokens):
tokens[group] = copy.copy(common)
tokens['value'].append((r'\n', Text, 'root'))
tokens['selector'].append((r'\n', Text, 'root'))
class ScssLexer(RegexLexer):
"""
For SCSS stylesheets.
"""
name = 'SCSS'
aliases = ['scss']
filenames = ['*.scss']
mimetypes = ['text/x-scss']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@import', Keyword, 'value'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'(@media)(\s+)', bygroups(Keyword, Text), 'value'),
(r'@[\w-]+', Keyword, 'selector'),
(r'(\$[\w-]*\w)([ \t]*:)', bygroups(Name.Variable, Operator), 'value'),
# TODO: broken, and prone to infinite loops.
#(r'(?=[^;{}][;}])', Name.Attribute, 'attr'),
#(r'(?=[^;{}:]+:[^a-z])', Name.Attribute, 'attr'),
default('selector'),
],
'attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#\{', String.Interpol, 'interpolation'),
(r'[ \t]*:', Operator, 'value'),
default('#pop'),
],
'inline-comment': [
(r"(\\#|#(?=[^{])|\*(?=[^/])|[^#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in iteritems(common_sass_tokens):
tokens[group] = copy.copy(common)
tokens['value'].extend([(r'\n', Text), (r'[;{}]', Punctuation, '#pop')])
tokens['selector'].extend([(r'\n', Text), (r'[;{}]', Punctuation, '#pop')])
class LessCssLexer(CssLexer):
"""
For `LESS <http://lesscss.org/>`_ styleshets.
.. versionadded:: 2.1
"""
name = 'LessCss'
aliases = ['less']
filenames = ['*.less']
mimetypes = ['text/x-less-css']
tokens = {
'root': [
(r'@\w+', Name.Variable),
inherit,
],
'content': [
(r'{', Punctuation, '#push'),
inherit,
],
}
|
py | b402028f2f134c62abab3a7a3d94b142495ae8a0 | import numpy as np
from sklearn.metrics import mean_squared_error, mean_absolute_error
def divide(title=None):
print()
title = "" if title is None else title
print(title.center(50, "="))
print()
def format_print_dict(dd, depth=0):
if not isinstance(dd, dict):
print(dd)
else:
for k, v in dd.items():
print("\t" * depth, end="")
print(k, "\t: ", end="")
if not isinstance(v, dict):
print(v)
else:
print()
format_print_dict(v, depth + 1)
# 得到前K个最相关的项
def get_K_neighbors(data_vector, similarity_vector, K):
sim = similarity_vector.copy()
zero_location = np.where(data_vector == 0)
sim[zero_location] = 0
K_neighbors = sparse_matrix_sort(-sim)[0:K]
return K_neighbors
# 稀疏矩阵排序
def sparse_matrix_sort(matrix):
non_zero_idx = np.nonzero(matrix)[0]
res = non_zero_idx[np.argsort(matrix[non_zero_idx])]
return res
# Rooted Mean Squared Error
def calc_RMSE(ground_truth, pred):
return np.sqrt(mean_squared_error(ground_truth, pred))
# Mean Absolute Error
def calc_MAE(ground_truth, pred):
return mean_absolute_error(ground_truth, pred)
|
py | b402037cc25ff1e06bc294ff5cea082f54774770 | import os
import sys
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
# points to the path where the chromedriver is installed
from selenium.webdriver.common.keys import Keys
chrome_driver_path = "/home/yang/Downloads"
os.environ["PATH"] += os.pathsep + chrome_driver_path
class NewVisitorTest(StaticLiveServerTestCase):
@classmethod
def setUpClass(cls):
live_address = os.environ.get('liveserver')
if live_address:
cls.server_url = "http://" + live_address
return
super().setUpClass()
cls.server_url = cls.live_server_url
@classmethod
def tearDownClass(cls):
if os.environ.get('live_server'):
# if hasattr(cls, 'server_url') and cls.server_url == cls.live_server_url:
super().tearDownClass()
def setUp(self):
self.browser = webdriver.Chrome()
self.browser.implicitly_wait(1)
def tearDown(self):
# pass
self.browser.quit()
def check_for_row_in_list_table(self, row_text):
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn(row_text, [row.text for row in rows])
def test_can_start_a_list_and_retrieve_it_later(self):
# Bob has heard about a cool online to-do app. he goes to checkout its home page
self.browser.get(self.server_url)
# Bob notices the title says it is a to-do list
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
# She is invited to enter a to-do item straight away
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertEqual(inputbox.get_attribute('placeholder'),'Enter a to-do item')
# She types "Buy peacock feathers" into a text box (Edith's hobby
# is tying fly-fishing lures)
value1 = 'Buy peacock feathers'
inputbox.send_keys(value1)
# When she hits enter, the page updates, and now the page lists
# "1: Buy peacock feathers" as an item in a to-do list
inputbox.send_keys(Keys.ENTER)
edith_list_url = self.browser.current_url
self.assertRegex(edith_list_url, '/lists/.+') # 1
self.check_for_row_in_list_table("1: " + value1)
inputbox = self.browser.find_element_by_id('id_new_item')
value2 = 'Use peacock feathers to make a fly'
inputbox.send_keys(value2)
inputbox.send_keys(Keys.ENTER)
self.check_for_row_in_list_table("2: " + value2)
# Now a new user, Francis, comes along to the site.
## We use a new browser session to make sure that no information
## of Edith's is coming through from cookies etc #1
self.browser.quit()
self.browser = webdriver.Chrome()
# Francis visits the home page. There is no sign of Edith's
# list
self.browser.get(self.server_url)
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertNotIn('make a fly', page_text)
# Francis starts a new list by entering a new item. He
# is less interesting than Edith...
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Buy milk')
inputbox.send_keys(Keys.ENTER)
# Francis gets his own unique URL
francis_list_url = self.browser.current_url
self.assertRegex(francis_list_url, '/lists/.+')
self.assertNotEqual(francis_list_url, edith_list_url)
# Again, there is no trace of Edith's list
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertIn('Buy milk', page_text)
# Satisfied, they both go back to sleep
# Satisfied, she goes back to sleep
self.fail('Finish the Test')
def test_layout_and_styling(self):
# Edith goes to the home page
self.browser.get(self.server_url)
self.browser.set_window_size(1024, 768)
# She notices the input box is nicely centered
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertAlmostEqual(
inputbox.location['x'] + 8 + inputbox.size['width'] / 2, # 8 is webkit margin of 16px /2
512,
delta=5
)
# She starts a new list and sees the input is nicely
# centered there too
inputbox.send_keys('testing\n')
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertAlmostEqual(
inputbox.location['x'] + 8 + inputbox.size['width'] / 2,
512,
delta=5
) |
py | b4020387b9c13ac4456433408e2470a254ed349c | #!/usr/bin/python3
"""
@Author: Liu Shaoweihua
@Site: https://github.com/liushaoweihua
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
import numpy as np
import collections
from pyclue.tf1.tokenizers.utils import convert_to_unicode
class InputExample(object):
"""A single training/test example for simple sequence sentence_pair."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
batches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids_1,
input_mask_1,
segment_ids_1,
input_ids_2,
input_mask_2,
segment_ids_2,
label_id,
is_real_example=True):
self.input_ids_1 = input_ids_1
self.input_mask_1 = input_mask_1
self.segment_ids_1 = segment_ids_1
self.input_ids_2 = input_ids_2
self.input_mask_2 = input_mask_2
self.segment_ids_2 = segment_ids_2
self.label_id = label_id
self.is_real_example = is_real_example
class Processor(object):
"""Base class for data converters for sequence sentence_pair data sets."""
def __init__(self, max_seq_len, tokenizer, labels=None):
self.max_seq_len = max_seq_len
self.tokenizer = tokenizer
self.labels = labels
self._get_labels()
def _get_labels(self):
"""Gets the list of labels."""
if not self.labels:
self.labels = ['pseudo_label']
assert isinstance(self.labels, list), 'labels should be `list` instance.'
self.num_labels = len(self.labels)
self.label_map = {label: i for i, label in enumerate(self.labels)}
self.label_map_reverse = {i: label for i, label in enumerate(self.labels)}
def _create_examples(self, lines, set_type):
"""Creates examples."""
examples = []
print('# {} data: {}'.format(set_type, len(lines)))
for i, line in enumerate(lines):
origin_line = ' '.join(line)
guid = '{}-{}'.format(set_type, i)
try:
label = convert_to_unicode(line[0])
label = label.replace('"', '').replace('\\', '')
text_a = convert_to_unicode(line[1])
text_b = convert_to_unicode(line[2])
if label in self.labels or set_type == 'predict':
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
except Exception:
print('### {}-example error {}: {}'.format(set_type, i, origin_line))
return examples
def _get_feature_for_example(self, example):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids_1=[0] * self.max_seq_len,
input_mask_1=[0] * self.max_seq_len,
segment_ids_1=[0] * self.max_seq_len,
input_ids_2=[0] * self.max_seq_len,
input_mask_2=[0] * self.max_seq_len,
segment_ids_2=[0] * self.max_seq_len,
label_id=0,
is_real_example=False)
# sentence_pair input text a
tokens_a = self.tokenizer.tokenize(example.text_a)
if len(tokens_a) > self.max_seq_len - 2:
tokens_a = tokens_a[0:(self.max_seq_len - 2)]
tokens_1 = []
segment_ids_1 = []
tokens_1.append('[CLS]')
segment_ids_1.append(0)
tokens_1.extend(tokens_a)
segment_ids_1.extend([0] * len(tokens_a))
tokens_1.append('[SEP]')
segment_ids_1.append(0)
input_ids_1 = self.tokenizer.convert_tokens_to_ids(tokens_1)
input_mask_1 = [1] * len(input_ids_1)
# Zero-pad up to the sequence length.
while len(input_ids_1) < self.max_seq_len:
input_ids_1.append(0)
input_mask_1.append(0)
segment_ids_1.append(0)
assert len(input_ids_1) == self.max_seq_len
assert len(input_mask_1) == self.max_seq_len
assert len(segment_ids_1) == self.max_seq_len
# sentence_pair input text b
tokens_b = self.tokenizer.tokenize(example.text_b)
if len(tokens_b) > self.max_seq_len - 2:
tokens_b = tokens_b[0:(self.max_seq_len - 2)]
tokens_2 = []
segment_ids_2 = []
tokens_2.append('[CLS]')
segment_ids_2.append(0)
tokens_2.extend(tokens_b)
segment_ids_2.extend([0] * len(tokens_b))
tokens_2.append('[SEP]')
segment_ids_2.append(0)
input_ids_2 = self.tokenizer.convert_tokens_to_ids(tokens_2)
input_mask_2 = [1] * len(input_ids_2)
# Zero-pad up to the sequence length.
while len(input_ids_2) < self.max_seq_len:
input_ids_2.append(0)
input_mask_2.append(0)
segment_ids_2.append(0)
assert len(input_ids_2) == self.max_seq_len
assert len(input_mask_2) == self.max_seq_len
assert len(segment_ids_2) == self.max_seq_len
label_id = self.label_map[example.label]
feature = InputFeatures(
input_ids_1=input_ids_1,
input_mask_1=input_mask_1,
segment_ids_1=segment_ids_1,
input_ids_2=input_ids_2,
input_mask_2=input_mask_2,
segment_ids_2=segment_ids_2,
label_id=label_id,
is_real_example=True)
return feature
def get_features_for_inputs(self, lines, set_type='predict'):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
examples = self._create_examples(lines, set_type)
features = []
for example in examples:
feature = self._get_feature_for_example(example=example)
features.append(feature)
return features
@staticmethod
def read_file(file_path):
"""Read files."""
with tf.gfile.GFile(file_path, 'r') as f:
data = f.readlines()
lines = []
for line in data:
lines.append(line.strip().split('\t'))
return lines
class FileProcessor(Processor):
"""Data converters for sequence sentence_pair data with file inputs."""
def __init__(self, max_seq_len, tokenizer, data_dir, save_tfrecord_dir=None, recreate_tfrecord=True):
self.max_seq_len = max_seq_len
self.tokenizer = tokenizer
self.data_dir = os.path.abspath(data_dir)
self.save_tfrecord_dir = os.path.abspath(save_tfrecord_dir) if save_tfrecord_dir else None
self.recreate_tfrecord = recreate_tfrecord
self._get_labels()
self._get_data()
def _get_data(self):
"""Gets a collection of `InputExample`s."""
# train
train_example_path = os.path.join(self.data_dir, 'train.txt')
train_example_path_tfrecord = None if not self.save_tfrecord_dir \
else os.path.join(self.save_tfrecord_dir, 'train.tfrecord')
self.train_examples = self._create_examples(
self.read_file(train_example_path), 'train')
self.num_train_examples = len(self.train_examples)
if self.recreate_tfrecord and train_example_path_tfrecord and self.num_train_examples > 0:
self._save_tfrecords(
examples=self.train_examples, output_file=train_example_path_tfrecord)
# dev
dev_example_path = os.path.join(self.data_dir, 'dev.txt')
dev_example_path_tfrecord = None if not self.save_tfrecord_dir \
else os.path.join(self.save_tfrecord_dir, 'dev.tfrecord')
self.dev_examples = self._create_examples(
self.read_file(dev_example_path), 'dev')
self.num_dev_examples = len(self.dev_examples)
if self.recreate_tfrecord and dev_example_path_tfrecord and self.num_dev_examples > 0:
self._save_tfrecords(
examples=self.dev_examples, output_file=dev_example_path_tfrecord)
# test
test_example_path = os.path.join(self.data_dir, 'test.txt')
test_example_path_tfrecord = None if not self.save_tfrecord_dir \
else os.path.join(self.save_tfrecord_dir, 'test.tfrecord')
if tf.gfile.Exists(test_example_path):
self.test_examples = self._create_examples(
self.read_file(test_example_path), 'test')
self.num_test_examples = len(self.test_examples)
if self.recreate_tfrecord and test_example_path_tfrecord and self.num_test_examples > 0:
self._save_tfrecords(
examples=self.test_examples, output_file=test_example_path_tfrecord)
else:
self.test_examples = None
self.num_test_examples = 0
def _get_labels(self):
"""Gets the list of labels."""
self.labels = []
lines = self.read_file(
os.path.join(self.data_dir, 'labels.txt'))
for line in lines:
self.labels.append(line[0])
self.num_labels = len(self.labels)
self.label_map = {label: i for i, label in enumerate(self.labels)}
self.label_map_reverse = {i: label for i, label in enumerate(self.labels)}
def _save_tfrecords(self, examples, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
def create_int_feature(values):
return tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
for example in examples:
feature = self._get_feature_for_example(example)
features = collections.OrderedDict()
features['input_ids_1'] = create_int_feature(feature.input_ids_1)
features['input_mask_1'] = create_int_feature(feature.input_mask_1)
features['segment_ids_1'] = create_int_feature(feature.segment_ids_1)
features['input_ids_2'] = create_int_feature(feature.input_ids_2)
features['input_mask_2'] = create_int_feature(feature.input_mask_2)
features['segment_ids_2'] = create_int_feature(feature.segment_ids_2)
features['label_ids'] = create_int_feature([feature.label_id])
features['is_real_example'] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
class InputFn(object):
"""Data converters for sequence sentence_pair data sets."""
def __init__(self, max_seq_len):
self.max_seq_len = max_seq_len
def predict_input_fn(self, features):
"""Creates an `input_fn` closure to be passed to Estimator."""
x = {
'input_ids_1': [],
'input_mask_1': [],
'segment_ids_1': [],
'input_ids_2': [],
'input_mask_2': [],
'segment_ids_2': [],
'label_ids': []
}
for feature in features:
x['input_ids_1'].append(feature.input_ids_1)
x['input_mask_1'].append(feature.input_mask_1)
x['segment_ids_1'].append(feature.segment_ids_1)
x['input_ids_2'].append(feature.input_ids_2)
x['input_mask_2'].append(feature.input_mask_2)
x['segment_ids_2'].append(feature.segment_ids_2)
x['label_ids'].append(feature.label_id)
x = {item: np.array(x[item]) for item in x}
input_fn = tf.estimator.inputs.numpy_input_fn(
x=x, num_epochs=1, shuffle=False)
return input_fn
class FileInputFn(InputFn):
"""Data converters for sequence sentence_pair data sets saved as tfrecord format."""
def __init__(self, max_seq_len, input_file_dir, batch_size):
super(FileInputFn, self).__init__(max_seq_len)
self.input_file_dir = os.path.abspath(input_file_dir)
self.batch_size = batch_size
self._get_input_fn_from_file()
def _get_input_fn_from_file(self):
self.train_input_fn = self._file_based_input_fn_builder(
input_file=os.path.join(self.input_file_dir, 'train.tfrecord'),
is_training=True,
drop_remainder=True)
self.dev_input_fn = self._file_based_input_fn_builder(
input_file=os.path.join(self.input_file_dir, 'dev.tfrecord'),
is_training=False,
drop_remainder=False)
if tf.gfile.Exists(os.path.join(self.input_file_dir, 'test.tfrecord')):
self.test_input_fn = self._file_based_input_fn_builder(
input_file=os.path.join(self.input_file_dir, 'test.tfrecord'),
is_training=False,
drop_remainder=False)
else:
self.test_input_fn = None
def _file_based_input_fn_builder(self, input_file, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to Estimator."""
name_to_features = {
'input_ids_1': tf.FixedLenFeature([self.max_seq_len], tf.int64),
'input_mask_1': tf.FixedLenFeature([self.max_seq_len], tf.int64),
'segment_ids_1': tf.FixedLenFeature([self.max_seq_len], tf.int64),
'input_ids_2': tf.FixedLenFeature([self.max_seq_len], tf.int64),
'input_mask_2': tf.FixedLenFeature([self.max_seq_len], tf.int64),
'segment_ids_2': tf.FixedLenFeature([self.max_seq_len], tf.int64),
'label_ids': tf.FixedLenFeature([], tf.int64),
'is_real_example': tf.FixedLenFeature([], tf.int64)}
def _decode_record(record, name_to_features):
"""Decodes a record to a Tensorflow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn():
"""The actual input function."""
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=1000)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=self.batch_size,
drop_remainder=drop_remainder))
return d
return input_fn
|
py | b40204152e57a5d6213718566708627aa70b2df9 | import requests
import pytz
from time import sleep
from datetime import datetime
from dbo import config, Accounting
class Collector(object):
'''
Collector is based on code of Uroš Vovk
www.urosvovk.com/bandwidth-usage-report-per-ip-address/
'''
def __init__(self):
self.router_ip = config("general", "router_ip")
self.accounting_url = "http://{}/accounting/ip.cgi".format(self.router_ip)
self.local_network = ".".join(self.router_ip.split('.')[0:-1]) + '.'
self.interval = config("general", "interval")
def collect(self):
while True:
response = requests.get(self.accounting_url)
data = response.text
#print (data)
data_collector = {}
for l in data.split("\n")[:-1]:
is_upload = True
src, dst, b, p, su, du = l.split()
if src.startswith(self.local_network): # upload
k = src
else: # download
k = dst
is_upload = False
if k not in data_collector:
data_collector[k] = [0, 0]
if is_upload:
data_collector[k][0] = data_collector[k][0] + int(b)
else:
data_collector[k][1] = data_collector[k][1] + int(b)
for key, val in data_collector.items():
Accounting.create(
address=key,
date=datetime.utcnow(),
upload=val[0],
download=val[1],
)
sleep(self.interval)
|
py | b402043710fe56f57c298f1ef4159540199b3b2e | from setuptools import setup
setup(
name='n_line_notify',
packages=['n_line_notify'],
url='https://github.com/NiorAP/n_line_notify',
license='MIT',
author='Nior.A.P',
author_email='[email protected]',
description='Line Notify Library by Nior.A.P',
keywords=['Python', 'Line API', 'Line Notify', 'Nior.A.P'],
install_requires=['requests', 'numpy', 'imageio'],
include_package_data=True
)
|
py | b40205445a3febb241ddc08c77c65222e22b4380 | ############################################
# Copyright (c) 2012 Microsoft Corporation
#
# Z3 project configuration files
#
# Author: Leonardo de Moura (leonardo)
############################################
from mk_util import *
# Z3 Project definition
def init_project_def():
set_version(4, 5, 1, 0)
add_lib('util', [])
add_lib('lp', ['util'], 'util/lp')
add_lib('polynomial', ['util'], 'math/polynomial')
add_lib('sat', ['util'])
add_lib('nlsat', ['polynomial', 'sat'])
add_lib('hilbert', ['util'], 'math/hilbert')
add_lib('simplex', ['util'], 'math/simplex')
add_lib('automata', ['util'], 'math/automata')
add_lib('interval', ['util'], 'math/interval')
add_lib('realclosure', ['interval'], 'math/realclosure')
add_lib('subpaving', ['interval'], 'math/subpaving')
add_lib('ast', ['util', 'polynomial'])
add_lib('rewriter', ['ast', 'polynomial', 'automata'], 'ast/rewriter')
add_lib('normal_forms', ['rewriter'], 'ast/normal_forms')
add_lib('model', ['rewriter'])
add_lib('tactic', ['ast', 'model'])
add_lib('substitution', ['ast', 'rewriter'], 'ast/substitution')
add_lib('parser_util', ['ast'], 'parsers/util')
add_lib('grobner', ['ast'], 'math/grobner')
add_lib('euclid', ['util'], 'math/euclid')
add_lib('core_tactics', ['tactic', 'normal_forms'], 'tactic/core')
add_lib('sat_tactic', ['tactic', 'sat'], 'sat/tactic')
add_lib('arith_tactics', ['core_tactics', 'sat'], 'tactic/arith')
add_lib('nlsat_tactic', ['nlsat', 'sat_tactic', 'arith_tactics'], 'nlsat/tactic')
add_lib('subpaving_tactic', ['core_tactics', 'subpaving'], 'math/subpaving/tactic')
add_lib('aig_tactic', ['tactic'], 'tactic/aig')
add_lib('solver', ['model', 'tactic'])
add_lib('ackermannization', ['model', 'rewriter', 'ast', 'solver', 'tactic'], 'ackermannization')
add_lib('interp', ['solver'])
add_lib('cmd_context', ['solver', 'rewriter', 'interp'])
add_lib('extra_cmds', ['cmd_context', 'subpaving_tactic', 'arith_tactics'], 'cmd_context/extra_cmds')
add_lib('smt2parser', ['cmd_context', 'parser_util'], 'parsers/smt2')
add_lib('proof_checker', ['rewriter'], 'ast/proof_checker')
# Simplifier module will be deleted in the future.
# It has been replaced with rewriter module.
add_lib('simplifier', ['rewriter'], 'ast/simplifier')
add_lib('fpa', ['ast', 'util', 'simplifier', 'model'], 'ast/fpa')
add_lib('macros', ['simplifier'], 'ast/macros')
add_lib('pattern', ['normal_forms', 'smt2parser', 'simplifier'], 'ast/pattern')
add_lib('bit_blaster', ['rewriter', 'simplifier'], 'ast/rewriter/bit_blaster')
add_lib('smt_params', ['ast', 'simplifier', 'pattern', 'bit_blaster'], 'smt/params')
add_lib('proto_model', ['model', 'simplifier', 'smt_params'], 'smt/proto_model')
add_lib('smt', ['bit_blaster', 'macros', 'normal_forms', 'cmd_context', 'proto_model',
'substitution', 'grobner', 'euclid', 'simplex', 'proof_checker', 'pattern', 'parser_util', 'fpa', 'lp'])
add_lib('bv_tactics', ['tactic', 'bit_blaster', 'core_tactics'], 'tactic/bv')
add_lib('fuzzing', ['ast'], 'test/fuzzing')
add_lib('smt_tactic', ['smt'], 'smt/tactic')
add_lib('sls_tactic', ['tactic', 'normal_forms', 'core_tactics', 'bv_tactics'], 'tactic/sls')
add_lib('qe', ['smt','sat','nlsat','tactic','nlsat_tactic'], 'qe')
add_lib('duality', ['smt', 'interp', 'qe'])
add_lib('muz', ['smt', 'sat', 'smt2parser', 'aig_tactic', 'qe'], 'muz/base')
add_lib('dataflow', ['muz'], 'muz/dataflow')
add_lib('transforms', ['muz', 'hilbert', 'dataflow'], 'muz/transforms')
add_lib('rel', ['muz', 'transforms'], 'muz/rel')
add_lib('pdr', ['muz', 'transforms', 'arith_tactics', 'core_tactics', 'smt_tactic'], 'muz/pdr')
add_lib('spacer', ['muz', 'transforms', 'arith_tactics', 'smt_tactic'], 'muz/spacer')
add_lib('clp', ['muz', 'transforms'], 'muz/clp')
add_lib('tab', ['muz', 'transforms'], 'muz/tab')
add_lib('bmc', ['muz', 'transforms'], 'muz/bmc')
add_lib('ddnf', ['muz', 'transforms', 'rel'], 'muz/ddnf')
add_lib('duality_intf', ['muz', 'transforms', 'duality'], 'muz/duality')
add_lib('fp', ['muz', 'pdr', 'clp', 'tab', 'rel', 'bmc', 'duality_intf', 'ddnf', 'spacer'], 'muz/fp')
add_lib('nlsat_smt_tactic', ['nlsat_tactic', 'smt_tactic'], 'tactic/nlsat_smt')
add_lib('ufbv_tactic', ['normal_forms', 'core_tactics', 'macros', 'smt_tactic', 'rewriter'], 'tactic/ufbv')
add_lib('sat_solver', ['solver', 'core_tactics', 'aig_tactic', 'bv_tactics', 'arith_tactics', 'sat_tactic'], 'sat/sat_solver')
add_lib('smtlogic_tactics', ['ackermannization', 'sat_solver', 'arith_tactics', 'bv_tactics', 'nlsat_tactic', 'smt_tactic', 'aig_tactic', 'fp', 'muz','qe','nlsat_smt_tactic'], 'tactic/smtlogics')
add_lib('fpa_tactics', ['fpa', 'core_tactics', 'bv_tactics', 'sat_tactic', 'smt_tactic', 'arith_tactics', 'smtlogic_tactics'], 'tactic/fpa')
add_lib('portfolio', ['smtlogic_tactics', 'sat_solver', 'ufbv_tactic', 'fpa_tactics', 'aig_tactic', 'fp', 'qe','sls_tactic', 'subpaving_tactic'], 'tactic/portfolio')
add_lib('smtparser', ['portfolio'], 'parsers/smt')
add_lib('opt', ['smt', 'smtlogic_tactics', 'sls_tactic', 'sat_solver'], 'opt')
API_files = ['z3_api.h', 'z3_ast_containers.h', 'z3_algebraic.h', 'z3_polynomial.h', 'z3_rcf.h', 'z3_fixedpoint.h', 'z3_optimization.h', 'z3_interp.h', 'z3_fpa.h', 'z3_spacer.h']
add_lib('api', ['portfolio', 'smtparser', 'realclosure', 'interp', 'opt'],
includes2install=['z3.h', 'z3_v1.h', 'z3_macros.h'] + API_files)
add_exe('shell', ['api', 'sat', 'extra_cmds','opt'], exe_name='z3')
add_exe('test', ['api', 'fuzzing', 'simplex'], exe_name='test-z3', install=False)
_libz3Component = add_dll('api_dll', ['api', 'sat', 'extra_cmds'], 'api/dll',
reexports=['api'],
dll_name='libz3',
static=build_static_lib(),
export_files=API_files,
staging_link='python')
add_dot_net_dll('dotnet', ['api_dll'], 'api/dotnet', dll_name='Microsoft.Z3', assembly_info_dir='Properties', default_key_file='src/api/dotnet/Microsoft.Z3.snk')
add_java_dll('java', ['api_dll'], 'api/java', dll_name='libz3java', package_name="com.microsoft.z3", manifest_file='manifest')
add_ml_lib('ml', ['api_dll'], 'api/ml', lib_name='libz3ml')
add_hlib('cpp', 'api/c++', includes2install=['z3++.h'])
set_z3py_dir('api/python')
add_python(_libz3Component)
add_python_install(_libz3Component)
# Examples
add_cpp_example('cpp_example', 'c++')
add_cpp_example('z3_tptp', 'tptp')
add_c_example('c_example', 'c')
add_c_example('maxsat')
add_dotnet_example('dotnet_example', 'dotnet')
add_java_example('java_example', 'java')
add_ml_example('ml_example', 'ml')
add_z3py_example('py_example', 'python')
return API_files
|
py | b402058d9655cb60cdd37a0201fd109ed53d81e3 | import abc
from collections import defaultdict, MutableMapping, OrderedDict
import json
import os
import sys
import random
import itertools
import math
import traceback
import warnings
import operator
import time
import numpy as np
import resource
import platform
from sklearn.metrics import average_precision_score
__author__ = 'Kelvin Gu'
def time_it(f, n=1):
"""
My version of timeit
"""
start = time.time()
for k in range(n):
f()
stop = time.time()
return stop - start
def npa(l):
return np.array(l, dtype=float)
def npnorm(*args, **kwargs):
"""
Just an abbreviation
"""
return np.random.normal(*args, **kwargs)
def unit_vec(dim, entry):
"""
Return column vector (2D numpy array) with all zeros except a 1 in the specified entry
"""
x = np.zeros((dim, 1))
x[entry] = 1
return x
def print_nn(s):
# print with no newline
sys.stdout.write(s)
sys.stdout.flush()
def sample_if_large(arr, max_size, replace=True):
if len(arr) > max_size:
idx = np.random.choice(len(arr), size=max_size, replace=replace)
return [arr[i] for i in idx]
return list(arr)
def sample_excluding(items, exclude):
candidates = list(items) # shallow copy
random.shuffle(candidates)
for cand in candidates:
if cand not in exclude:
return cand
return None
def flatten(lol):
"""
Flatten a list of lists
"""
return [item for sublist in lol for item in sublist]
def chunks(l, n):
"""
Return a generator of lists, each of size n (the last list may be less than n)
"""
for i in xrange(0, len(l), n):
yield l[i:i + n]
# PROFILING CODE
# ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
profiling = False
profile_by_count = True # TODO: WARNING, by_count breaks PyCharm debugger
def get_profiler():
if not profiling:
return
# lazy load (this won't be available in production)
import line_profiler
glob = globals()
if 'line_profiler_' not in glob:
profiler = line_profiler.LineProfiler()
if profile_by_count:
profiler.enable_by_count()
glob['line_profiler_'] = profiler
print 'initialized profiler'
return glob['line_profiler_']
def reset_profiler(keep_fxns=True):
if not profiling:
return
# save old functions
profiler = get_profiler()
old_fxns = list(profiler.functions)
# reset
del globals()['line_profiler_']
profiler = get_profiler()
# put old functions back
if keep_fxns:
for fxn in old_fxns:
profiler.add_function(fxn)
print 'kept functions:', profiler.functions
def profile(f):
"""A decorator for functions you want to profile"""
if not profiling:
return f
profiler = get_profiler()
profiler.add_function(f)
print 'added to profiler:', f
return f
def profile_report():
if not profiling:
return
profiler = get_profiler()
profiler.print_stats()
# ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
def memoize(f):
cache = {}
def decorated(*args):
if args not in cache:
cache[args] = f(*args)
else:
print 'loading cached values for {}'.format(args)
return cache[args]
return decorated
class EqualityMixin(object):
def __eq__(self, other):
if type(other) is type(self):
return self.__dict__ == other.__dict__
return False
def __ne__(self, other):
return not self.__eq__(other)
def data_split(items, dev_part=0.1, test_part=0.1):
# don't allow duplicates
assert len(set(items)) == len(items)
# remaining portion is set aside for train
assert dev_part + test_part < 1.0
items_copy = list(items)
random.shuffle(items_copy)
n = len(items_copy)
ndev = int(n * dev_part)
ntest = int(n * test_part)
dev = items_copy[:ndev]
test = items_copy[ndev:ndev + ntest]
train = items_copy[ndev + ntest:]
# verify that there is no overlap
train_set = set(train)
dev_set = set(dev)
test_set = set(test)
assert len(train_set.intersection(dev_set)) == 0
assert len(train_set.intersection(test_set)) == 0
print 'train {}, dev {}, test {}'.format(len(train), len(dev), len(test))
return train, dev, test
def is_vector(x):
# suppose x.shape = (d1, d2, ..., dn)
# checks that there is no more than one di > 1
non_flat = [i for i, d in enumerate(x.shape) if d > 1]
return len(non_flat) <= 1
class MultivariateFunction(object):
"""
Represents an Rm -> Rn nonlinearity
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def value(self, x):
"""
m -> n
"""
return
def elem_derivative(self, x):
"""
Element-wise derivative. m -> n
"""
raise NotImplementedError()
@abc.abstractmethod
def jacobian(self, x):
"""
Jacobian. m -> n x m
"""
assert is_vector(x)
x_1d = np.ravel(x)
# if an element-wise derivative is implemented, use that
elem_deriv = self.elem_derivative(x_1d)
return np.diag(elem_deriv)
class Tanh(MultivariateFunction):
def value(self, x):
return np.tanh(x)
def elem_derivative(self, x):
return 1.0 - np.tanh(x) ** 2
def jacobian(self, x):
return super(Tanh, self).jacobian(x)
class Identity(MultivariateFunction):
def value(self, x):
return x
def elem_derivative(self, x):
return np.ones(x.shape)
def jacobian(self, x):
return super(Identity, self).jacobian(x)
class Sigmoid(MultivariateFunction):
def value(self, x):
# scipy.special.expit will return NaN if x gets larger than about 700, which is just wrong
# compute using two different approaches
# they are each stable over a different interval of x
with warnings.catch_warnings():
warnings.simplefilter('ignore')
numer = np.exp(x)
s0 = numer / (1.0 + numer)
denom = 1.0 + np.exp(-x)
s1 = 1.0 / denom
# replace nans
if isinstance(x, float):
if np.isnan(s0):
s0 = s1
else:
nans = np.isnan(s0)
s0[nans] = s1[nans]
return s0
def elem_derivative(self, x):
ex = np.exp(x)
s = 1.0 / (1.0 + ex)
return s - s ** 2
def jacobian(self, x):
return super(Sigmoid, self).jacobian(x)
# shortcuts
sigmoid_object = Sigmoid()
sigmoid = sigmoid_object.value
sigmoid_derivative = sigmoid_object.elem_derivative
class RectLinear(MultivariateFunction):
def value(self, x):
return np.maximum(x, 0.0)
def elem_derivative(self, x):
grad = np.zeros(x.shape)
grad[x > 0.0] = 1.0
return grad
def jacobian(self, x):
return super(RectLinear, self).jacobian(x)
def examine_nan(type, flag):
# this gets called whenever numpy produces a NaN
pass # we can set a breakpoint here
def catch_nans():
np.seterr(invalid='call')
np.seterrcall(examine_nan)
def pandas_options():
# lazy load
import pandas as pd
pd.set_option('expand_frame_repr', False)
pd.set_option('display.max_rows', 3000)
pd.set_option('display.max_colwidth', 1000)
def group(items, keyfunc):
# this is different from itertools.groupby in several ways:
# - groupby creates a new group every time the key CHANGES in the sequence of items
# - this function returns a dict
groups = defaultdict(list)
for item in verboserate(items):
l = groups[keyfunc(item)]
l.append(item)
return groups
def weighted_abs_error(examples, predictions):
total = 0.0
error = 0.0
for ex, predict in zip(examples, predictions):
w = ex.weight
actual = ex.score
error += w * abs(actual - predict)
total += w
return error / total
def f1(correct, retrieved):
hits = float(len([a for a in retrieved if a in correct]))
precision = hits / len(retrieved)
recall = hits / len(correct)
if precision + recall == 0:
return 0.0
return 2.0 * (precision * recall) / (precision + recall)
def dialogue_accuracy(dlg, predictions, exclude=None):
if exclude is None:
exclude = set()
right = 0.0
total = 0.0
for correct, predict in itertools.izip(dlg.outputs, predictions):
if correct in exclude:
continue
right += 1.0 if correct == predict else 0.0
total += 1.0
return right / total
def compute_if_absent(d, key, keyfunc):
val = d.get(key)
if val is None:
val = keyfunc(key)
d[key] = val
return val
def tensor_combine(tensor, matrix):
# linearly combine pages of the tensor, based on matrix columns
# e.g. result[0] = tensor[0] * matrix[0, 0] + tensor[1] * matrix[1, 0] + ...
# e.g. result[1] = tensor[0] * matrix[0, 1] + tensor[1] * matrix[1, 1] + ...
# temporarily make the first axis the last
axes = range(len(tensor.shape))
tensor_mod = np.transpose(tensor, np.roll(axes, -1))
# perform linear combination
tensor_mod = tensor_mod.dot(matrix)
# go back to original axes
tensor_mod = np.transpose(tensor_mod, np.roll(axes, 1))
return tensor_mod
class NestedDict(MutableMapping):
def __init__(self):
self.d = {}
def __iter__(self):
return self.d.__iter__()
def __delitem__(self, key):
return self.d.__delitem__(key)
def __getitem__(self, key):
try:
return self.d.__getitem__(key)
except KeyError:
val = NestedDict()
self.d[key] = val
return val
def __len__(self):
return self.d.__len__()
def __setitem__(self, key, value):
return self.d.__setitem__(key, value)
def get_nested(self, keys):
d = self
for k in keys:
d = d[k]
return d
def set_nested(self, keys, val):
d = self.get_nested(keys[:-1])
return d.__setitem__(keys[-1], val)
def __repr__(self):
return self.d.__repr__()
def as_dict(self):
items = []
for key, sub in self.iteritems():
if isinstance(sub, NestedDict):
val = sub.as_dict()
else:
val = sub
items.append((key, val))
return dict(items)
meta = NestedDict()
def metadata(keys, val):
"""
Sets entries in a nested dictionary called meta.
After each call, meta is updated and saved to meta.json in the current directory
keys = either a string or a tuple of strings
a tuple of strings will be interpreted as nested keys in a dictionary, i.e. dictionary[key1][key2][...]
"""
# This is only designed to be used with CodaLab
if isinstance(keys, tuple):
meta.set_nested(keys, val)
else:
# if there is actually just one key
meta[keys] = val
# sync with file
with open('meta.json', 'w') as f:
d = meta.as_dict() # json only handles dicts
json.dump(d, f)
class ComputeDefaultDict(MutableMapping):
def __init__(self, init_fxn):
self.d = {}
self.init_fxn = init_fxn
def __iter__(self):
return self.d.__iter__()
def __delitem__(self, key):
return self.d.__delitem__(key)
def __getitem__(self, key):
try:
return self.d.__getitem__(key)
except KeyError:
val = self.init_fxn(key)
self.d[key] = val
return val
def __len__(self):
return self.d.__len__()
def __setitem__(self, key, value):
return self.d.__setitem__(key, value)
class FallbackDict(MutableMapping):
"""
Getting: try to get item from main dict. If failed, get from fallback dict.
Setting: set items in the main dict. If you try to set an item present in the fallback dict, throw an error.
"""
def __init__(self, main, fallback):
# assert no key overlap
# TODO: WARNING, may be expensive
# main_keys = set(main.keys())
# fback_keys = set(fallback.keys())
# assert len(main_keys.intersection(fback_keys)) == 0
self.main = main
self.fallback = fallback
def __getitem__(self, key):
try:
return self.main[key]
except KeyError:
return self.fallback[key]
def __setitem__(self, key, value):
if key in self.fallback:
raise KeyError('Not allowed to set items in fallback dict')
self.main[key] = value
def __delitem__(self, key):
if key in self.fallback:
raise KeyError('Not allowed to delete items in fallback dict')
del self.main[key]
def __iter__(self):
return itertools.chain(iter(self.main), iter(self.fallback))
def __len__(self):
return len(self.main) + len(self.fallback)
def __repr__(self):
return 'main:\n{}\nfallback:\n{}'.format(repr(self.main), repr(self.fallback))
def nearest_word(v, wvecs):
word_scores = []
for word, vec in wvecs.iteritems():
s = vec.T.dot(v)[0][0]
word_scores.append((word, s))
top_word, top_score = max(word_scores, key=operator.itemgetter(1))
return top_word
def align_view(words, width=7):
return ' '.join([word.ljust(width) for word in words])
def format_nested_dict(d):
# convert all keys and leaf values to strings
def string_keys(d0):
if not isinstance(d0, dict):
return str(d0)
return dict((str(k), string_keys(v)) for k, v in d0.iteritems())
return json.dumps(string_keys(d), sort_keys=True, indent=4)
def nested_iteritems(d):
for k, v in d.iteritems():
if isinstance(v, dict):
for k_suffix, v_leaf in nested_iteritems(v):
yield (k,) + k_suffix, v_leaf
else:
yield (k,), v
def nested_setitem(d, key_tuple, val):
sub_d = d
for key in key_tuple[:-1]:
sub_d = compute_if_absent(sub_d, key, lambda k: {})
sub_d[key_tuple[-1]] = val
def transform_nested(d0, fxn):
nested = lambda: defaultdict(nested())
d = {}
for key_tuple, val in nested_iteritems(d0):
new_key_tuple, new_val = fxn(key_tuple, val)
nested_setitem(d, new_key_tuple, new_val)
return d
def unit_circle_points(n):
"""
Return n unique evenly spaced points on the unit circle
"""
thetas = np.linspace(0, 2 * math.pi, num=n, endpoint=False)
radii = np.ones(thetas.shape)
x = radii * np.cos(thetas)
y = radii * np.sin(thetas)
xy = np.vstack((x, y))
pts = [xy[:, [i]] for i in range(len(thetas))]
return pts
def conveyor_belt(n, shift):
w = np.eye(n)
cycle = [(i - shift) % n for i in range(n)]
w = w[cycle, :]
w[:shift, :] = 0.0
return w
def verboserate(iterable, time_wait=5, report=None):
"""
Iterate verbosely.
"""
try:
total = len(iterable)
except TypeError:
total = '?'
def default_report(steps, elapsed):
print '{} of {} processed ({} s)'.format(steps, total, elapsed)
sys.stdout.flush()
if report is None:
report = default_report
start = time.time()
prev = start
for steps, val in enumerate(iterable):
current = time.time()
since_prev = current - prev
elapsed = current - start
if since_prev > time_wait:
report(steps, elapsed)
prev = current
yield val
def in_ipython():
try:
__IPYTHON__
return True
except NameError:
return False
def makedirs(directory):
if directory != '' and not os.path.exists(directory):
os.makedirs(directory)
def show(title, directory=''):
import matplotlib.pyplot as plt
if in_ipython():
plt.show()
else:
# ensure directory exists
makedirs(directory)
plt.savefig(os.path.join(directory, title) + '.png')
# close all figures to conserve memory
plt.close('all')
def ticks_off():
tickparams = dict((key, 'off') for key in ['top', 'bottom', 'left', 'right', 'labelbottom', 'labelleft'])
tickparams['which'] = 'both'
import matplotlib.pyplot as plt
plt.tick_params(**tickparams)
def matshow(mat):
import matplotlib.pyplot as plt
plt.figure()
plt.imshow(mat, interpolation='nearest')
show('matshow')
class Bunch:
"""
A simple class for holding arbitrary attributes.
"""
def __init__(self, **kwds):
self.__dict__.update(kwds)
def __repr__(self):
return str(self.__dict__.keys())
def pad_vector(vec, before, after):
return np.pad(vec, ((before, after), (0, 0)), mode='constant', constant_values=0)
def latex_subscript(main, *subscript_terms):
return '{}_{{{}}}'.format(main, ','.join([str(term) for term in subscript_terms]))
def invert_latex_subscript(s):
main, subscript = s.split('_', 1)
# strip leading and trailing braces
subscript = subscript[1:-1]
# split by commas
split_pts = []
brace_depth = 0
for i, char in enumerate(subscript):
if char == '{': brace_depth += 1
if char == '}': brace_depth -= 1
if brace_depth == 0 and char == ',':
split_pts.append(i)
split_pts.insert(0, -1)
split_pts.append(len(subscript))
subscript_terms = []
for i in range(len(split_pts) - 1):
term = subscript[split_pts[i]+1:split_pts[i+1]]
subscript_terms.append(term)
return main, subscript_terms
class Stopwatch(dict):
def __init__(self):
self.start = time.time()
def mark(self, name):
diff = time.time() - self.start
self[name] = diff
return diff
def avg_scaling(a, nonlinearity=None):
"""
Estimate the expected scaling factor: ||f(Av)|| / ||v||
Where f is a nonlinearity, v is a random unit vector and we use the 2-norm
"""
if nonlinearity is None:
nonlinearity = Identity()
q = 5000
V = np.random.normal(0.0, 1.0, (a.shape[1], q))
ratios = []
for k in range(q):
v = V[:, k].reshape(-1, 1)
y = nonlinearity.value(a.dot(v))
ratio = np.linalg.norm(y) / np.linalg.norm(v)
ratios.append(ratio)
return sum(ratios) / len(ratios)
def average_precision(positives, negatives):
"""
positives and negatives must each be a 1D array of scores
"""
if len(positives) == 0:
print 'WARNING: No positive examples presented! AP = NaN.'
return np.nan
elif len(negatives) == 0:
print 'WARNING: No negative examples presented! AP = 1.'
return 1.
scores = np.concatenate((positives, negatives))
labels = np.concatenate((np.ones(positives.shape), np.zeros(negatives.shape)))
return average_precision_score(labels, scores)
def ranks(scores, ascending=True):
if isinstance(scores, list):
scores = np.array(scores)
else:
assert len(scores.shape) == 1
flip = 1 if ascending else -1
idx = np.argsort(flip * scores)
ranks = np.empty(scores.shape, dtype=int)
ranks[idx] = np.arange(len(scores))
# ranks should start from 1
ranks += 1
return list(ranks)
def quantile(rank, total):
"""
Return 1.0 when you are first, 0.0 when you are last.
"""
if total == 1:
return np.nan
return float(total - rank) / (total - 1)
def rank_from_quantile(quantile, total):
if np.isnan(quantile):
return 1
return total - quantile * (total - 1)
def average_quantile(positives, negatives):
all = np.concatenate((positives, negatives))
all_ranks = ranks(all, ascending=False)[:len(positives)]
pos_ranks = ranks(positives, ascending=False)
filtered_ranks = [a - (p - 1) for a, p in itertools.izip(all_ranks, pos_ranks)] # filtered ranks
n = len(negatives) + 1 # total filtered candidates
quantiles = [quantile(r, n) for r in filtered_ranks]
return np.nanmean(quantiles)
def plot_ecdf(x, *args, **kwargs):
x = list(x) # make a copy
x.sort()
y = (np.arange(len(x)) + 1.0) / len(x)
import matplotlib.pyplot as plt
plt.plot(x, y, *args, **kwargs)
def plot_pdf(x, cov_factor=None, *args, **kwargs):
import matplotlib.pyplot as plt
from scipy.stats import gaussian_kde
density = gaussian_kde(x)
xgrid = np.linspace(min(x), max(x), 200)
if cov_factor is not None:
density.covariance_factor = lambda: cov_factor
density._compute_covariance()
y = density(xgrid)
plt.plot(xgrid, y, *args, **kwargs)
def sorted_by_value(d, ascending=True):
return OrderedDict(sorted(d.items(), key=operator.itemgetter(1), reverse=not ascending))
def show_warn_traceback():
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
traceback.print_stack()
log = file if hasattr(file, 'write') else sys.stderr
log.write(warnings.formatwarning(message, category, filename, lineno, line))
warnings.showwarning = warn_with_traceback
def gb_used():
used = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if platform.system() != 'Darwin':
# on Linux, used is in terms of kilobytes
power = 2
else:
# on Mac, used is in terms of bytes
power = 3
return float(used) / math.pow(1024, power)
def transpose_dict(d):
d_t = defaultdict(dict)
for i, di in d.iteritems():
for j, dij in di.iteritems():
d_t[j][i] = dij
return dict(d_t)
def best_threshold(scores, labels, debug=False):
# find best threshold in O(nlogn)
# does not handle scores of infinity or -infinity
items = zip(scores, labels)
items.sort()
total = len(items)
total_pos = len([l for l in labels if l])
def accuracy(p, n):
correct_n = n
correct_p = total_pos - p
return float(correct_n + correct_p) / total
# predict True iff score > thresh
pos = 0 # no. pos <= thresh
neg = 0 # no. neg <= thresh
thresh_accs = [(float('-inf'), accuracy(pos, neg))]
for thresh, label in items:
if label:
pos += 1
else:
neg += 1
thresh_accs.append((thresh, accuracy(pos, neg)))
if debug:
import matplotlib.pyplot as plt
x, y = zip(*thresh_accs)
plt.figure()
plt.plot(x, y)
pos_scores = [s for s, l in items if l]
neg_scores = [s for s, l in items if not l]
plot_pdf(pos_scores, 0.1, color='b')
plot_pdf(neg_scores, 0.1, color='r')
plt.show()
return max(thresh_accs, key=operator.itemgetter(1))[0]
|
py | b402063e9525ad77c5e576da95a3d45a9a5928e7 | from bs4 import BeautifulSoup
import requests
import pandas as pd
import re
import time
url = 'https://ascopubs.org/loi/jco'
time.sleep(3)
page = requests.get(url, headers={'User-Agent':'Your BrowserUser-Agent'})
soup = BeautifulSoup(page.content, "html.parser")
#%% Buscar que obtener de cada item (url,Issue,Fecha)
def extraer_info (links):
web = 'https://ascopubs.org' + links.a['href']
data = links.find('span', class_="issue_inner_container").get_text().split()
return (web, data[0:2], data[2:5])
#%% Obtener todas las URL de la pagina
raw_data = [extraer_info(links) for links in soup.find_all('div', class_="js_issue row")]
df1 = pd.DataFrame (raw_data, columns=('Pagina Web', 'Issue', 'Fecha'))
df1.to_csv('JCO_ALL_ISSUE.csv', index=False)
issue_full_list = list (df1['Pagina Web'].astype(str).tolist())
#%%
#%%
#%% Segunda Capa
# Buscar que obtener de cada item (url, titulo y seccion de cada paper)
def extraer_info_paper (link_paper):
doi = link_paper.find('a', class_="ref nowrap full")['href']
title = link_paper.find('span', class_="hlFld-Title").get_text()
return (title, doi, "https://ascopubs.org" + doi)
#%% Obtener todos los papes de la pagina
def procesar_paginas(n):
issue_url = issue_full_list[n]
time.sleep(3)
page = requests.get(issue_url, headers={'User-Agent':'Your BrowserUser-Agent'})
soup = BeautifulSoup(page.content, "html.parser")
papers_data = [extraer_info_paper(link_paper) for link_paper in soup.find_all('table', class_="articleEntry")]
return pd.DataFrame(papers_data, columns=('Titulos', 'DOI', 'URL'))
#%% Scrapping en la cantidad de paginas que queremos
df2 = pd.concat([procesar_paginas(n) for n in range(0,10)], ignore_index=True)
df2.to_csv('JCO_20XX_URLS.csv', index=False)
pages = list (df2['URL'].astype(str).tolist())
#%%
#%%
#%% Tercera Capa
# Sacar info paper a paper (DOI, titulo, Volume&Issue, Authors, Section)
titles=list()
authors=list()
dois=list()
volume=list()
section=list()
for n in pages:
time.sleep(3)
page = requests.get(n, headers={'User-Agent':'Your BrowserUser-Agent'})
soup = BeautifulSoup(page.content, "html.parser")
#Title
tls=re.sub(r'\|.*', "", soup.title.text)
titles.append(tls)
# #Author
all_ath = list(map(lambda i : i.text, filter(lambda i : i.text!='Search for articles by this author', soup.find_all("a", class_="entryAuthor"))))
authors.append(all_ath)
# DOI
all_doi = list(map(lambda i : i.get('href'), soup.find_all('link')))
dois.append(all_doi[2])
# # section
sct = soup.find_all('h2', limit=1)
section.append(sct)
# # Volume-Issue
vol = list(map(lambda i : i.text, filter(lambda i : i.text!= 'Newest Articles' and i.text!='Current Issue', soup.find_all(href=re.compile('/toc/jco')))))
volume.append(vol)
df = pd.DataFrame({"Title": titles, "Authors": authors, "DOI": dois, "Section": section, "Vol-Issue": volume})
df.to_csv('JCO_20XX.csv', index=False)
#%%
#%%
|
py | b40207e6bef17030fb87dc5173228be038830a00 | #! /usr/bin/env python3
""" script to track user cpu % by process family for coda """
from collections import Counter
import psutil
debug=False
def process_watch():
usertime_parent = Counter() # single process
usertime_subprocess = Counter() # child processes
parents = {} # keep cmdline for parent pids
for p in psutil.process_iter(attrs=['cmdline', 'cpu_times', 'pid', 'ppid']):
try:
cmdline = " ".join(p.info['cmdline'])
if 'coda' in cmdline or 'coda internal snark-worker' in cmdline \
or 'parallel-worker' in cmdline \
or 'coda-kademlia' in cmdline:
user_cpu = int(p.info['cpu_times'].user)
pid = p.info['pid']
ppid = p.info['ppid']
if debug:
print(ppid, pid, cmdline)
# Daemon and nohup processes are owned by parent pid 1
if ppid == 1:
parents[pid] = cmdline
usertime_parent[pid] += user_cpu
elif "coda internal snark-worker" in cmdline and "/usr/local/bin/coda" not in cmdline:
parents[pid] = cmdline
usertime_parent[pid] += user_cpu
else:
usertime_subprocess[ppid] += user_cpu
except psutil.NoSuchProcess:
continue
usertime_family = Counter()
print('='*80)
print('CPU usertime By Process Tree\nPARENT \t CHILDREN \t PARENT_CMDLINE')
for pid in parents:
print(usertime_parent[pid], '\t',
usertime_subprocess[pid], '\t\t', parents[pid], )
cmdline = parents[pid]
# family classifications
key_words = ['snark-worker', 'proposer', 'seed']
for word in key_words:
if word in cmdline:
usertime_family[word] += (usertime_parent[pid] + usertime_subprocess[pid])
usertime_family['total'] += (usertime_parent[pid] + usertime_subprocess[pid])
print('='*80)
print('CPU Usertime By Family\nRaw \t % \t Family')
for family in usertime_family.keys():
if family == 'total': continue
ratio = int(usertime_family[family]/usertime_family['total'] * 100)
print(usertime_family[family], '\t', ratio, '\t', family)
if __name__ == '__main__':
process_watch() |
py | b4020821ee8c7136239c6632db89ed13accfcd40 | from __future__ import absolute_import, division, print_function
import operator
from functools import partial, wraps
from itertools import product, repeat
from math import factorial, log, ceil
import numpy as np
from toolz import compose, partition_all, get, accumulate, pluck
from . import chunk
from .core import _concatenate2, Array, atop, lol_tuples, handle_out
from .creation import arange
from .ufunc import sqrt
from .utils import validate_axis
from .wrap import zeros, ones
from .numpy_compat import ma_divide, divide as np_divide
from ..compatibility import getargspec, builtins
from ..base import tokenize
from ..utils import ignoring, funcname, Dispatch
from .. import config, sharedict
# Generic functions to support chunks of different types
empty_lookup = Dispatch('empty')
empty_lookup.register((object, np.ndarray), np.empty)
empty_lookup.register(np.ma.masked_array, np.ma.empty)
divide_lookup = Dispatch('divide')
divide_lookup.register((object, np.ndarray), np_divide)
divide_lookup.register(np.ma.masked_array, ma_divide)
def divide(a, b, dtype=None):
key = lambda x: getattr(x, '__array_priority__', float('-inf'))
f = divide_lookup.dispatch(type(builtins.max(a, b, key=key)))
return f(a, b, dtype=dtype)
def reduction(x, chunk, aggregate, axis=None, keepdims=False, dtype=None,
split_every=None, combine=None, name=None, out=None,
concatenate=True, output_size=1):
""" General version of reductions
Parameters
----------
x: Array
Data being reduced along one or more axes
chunk: callable(x_chunk, axis, keepdims)
First function to be executed when resolving the dask graph.
This function is applied in parallel to all original chunks of x.
See below for function parameters.
combine: callable(x_chunk, axis, keepdims), optional
Function used for intermediate recursive aggregation (see
split_every below). If omitted, it defaults to aggregate.
If the reduction can be performed in less than 3 steps, it will not
be invoked at all.
aggregate: callable(x_chunk, axis, keepdims)
Last function to be executed when resolving the dask graph,
producing the final output. It is always invoked, even when the reduced
Array counts a single chunk along the reduced axes.
axis: int or sequence of ints, optional
Axis or axes to aggregate upon. If omitted, aggregate along all axes.
keepdims: boolean, optional
Whether the reduction function should preserve the reduced axes,
leaving them at size ``output_size``, or remove them.
dtype: np.dtype, optional
Force output dtype. Defaults to x.dtype if omitted.
split_every: int >= 2 or dict(axis: int), optional
Determines the depth of the recursive aggregation. If set to or more
than the number of input chunks, the aggregation will be performed in
two steps, one ``chunk`` function per input chunk and a single
``aggregate`` function at the end. If set to less than that, an
intermediate ``combine`` function will be used, so that any one
``combine`` or ``aggregate`` function has no more than ``split_every``
inputs. The depth of the aggregation graph will be
:math:`log_{split_every}(input chunks along reduced axes)`. Setting to
a low value can reduce cache size and network transfers, at the cost of
more CPU and a larger dask graph.
Omit to let dask heuristically decide a good default. A default can
also be set globally with the ``split_every`` key in
:mod:`dask.config`.
name: str, optional
Prefix of the keys of the intermediate and output nodes. If omitted it
defaults to the function names.
out: Array, optional
Another dask array whose contents will be replaced. Omit to create a
new one. Note that, unlike in numpy, this setting gives no performance
benefits whatsoever, but can still be useful if one needs to preserve
the references to a previously existing Array.
concatenate: bool, optional
If True (the default), the outputs of the ``chunk``/``combine``
functions are concatenated into a single np.array before being passed
to the ``combine``/``aggregate`` functions. If False, the input of
``combine`` and ``aggregate`` will be either a list of the raw outputs
of the previous step or a single output, and the function will have to
concatenate it itself. It can be useful to set this to False if the
chunk and/or combine steps do not produce np.arrays.
output_size: int >= 1, optional
Size of the output of the ``aggregate`` function along the reduced
axes. Ignored if keepdims is False.
Returns
-------
dask array
**Function Parameters**
x_chunk: numpy.ndarray
Individual input chunk. For ``chunk`` functions, it is one of the
original chunks of x. For ``combine`` and ``aggregate`` functions, it's
the concatenation of the outputs produced by the previous ``chunk`` or
``combine`` functions. If concatenate=False, it's a list of the raw
outputs from the previous functions.
axis: tuple
Normalized list of axes to reduce upon, e.g. ``(0, )``
Scalar, negative, and None axes have been normalized away.
Note that some numpy reduction functions cannot reduce along multiple
axes at once and strictly require an int in input. Such functions have
to be wrapped to cope.
keepdims: bool
Whether the reduction function should preserve the reduced axes or
remove them.
"""
if axis is None:
axis = tuple(range(x.ndim))
if isinstance(axis, int):
axis = (axis,)
axis = validate_axis(axis, x.ndim)
if dtype is None:
raise ValueError("Must specify dtype")
if 'dtype' in getargspec(chunk).args:
chunk = partial(chunk, dtype=dtype)
if 'dtype' in getargspec(aggregate).args:
aggregate = partial(aggregate, dtype=dtype)
# Map chunk across all blocks
inds = tuple(range(x.ndim))
# The dtype of `tmp` doesn't actually matter, and may be incorrect.
tmp = atop(chunk, inds, x, inds, axis=axis, keepdims=True, dtype=x.dtype)
tmp._chunks = tuple((output_size, ) * len(c) if i in axis else c
for i, c in enumerate(tmp.chunks))
result = _tree_reduce(tmp, aggregate, axis, keepdims, dtype, split_every,
combine, name=name, concatenate=concatenate)
if keepdims and output_size != 1:
result._chunks = tuple((output_size, ) if i in axis else c
for i, c in enumerate(tmp.chunks))
return handle_out(out, result)
def _tree_reduce(x, aggregate, axis, keepdims, dtype, split_every=None,
combine=None, name=None, concatenate=True):
""" Perform the tree reduction step of a reduction.
Lower level, users should use ``reduction`` or ``arg_reduction`` directly.
"""
# Normalize split_every
split_every = split_every or config.get('split_every', 4)
if isinstance(split_every, dict):
split_every = dict((k, split_every.get(k, 2)) for k in axis)
elif isinstance(split_every, int):
n = builtins.max(int(split_every ** (1 / (len(axis) or 1))), 2)
split_every = dict.fromkeys(axis, n)
else:
raise ValueError("split_every must be a int or a dict")
# Reduce across intermediates
depth = 1
for i, n in enumerate(x.numblocks):
if i in split_every and split_every[i] != 1:
depth = int(builtins.max(depth, ceil(log(n, split_every[i]))))
func = partial(combine or aggregate, axis=axis, keepdims=True)
if concatenate:
func = compose(func, partial(_concatenate2, axes=axis))
for i in range(depth - 1):
x = partial_reduce(func, x, split_every, True, dtype=dtype,
name=(name or funcname(combine or aggregate)) + '-partial')
func = partial(aggregate, axis=axis, keepdims=keepdims)
if concatenate:
func = compose(func, partial(_concatenate2, axes=axis))
return partial_reduce(func, x, split_every, keepdims=keepdims, dtype=dtype,
name=(name or funcname(aggregate)) + '-aggregate')
def partial_reduce(func, x, split_every, keepdims=False, dtype=None, name=None):
""" Partial reduction across multiple axes.
Parameters
----------
func : function
x : Array
split_every : dict
Maximum reduction block sizes in each dimension.
Examples
--------
Reduce across axis 0 and 2, merging a maximum of 1 block in the 0th
dimension, and 3 blocks in the 2nd dimension:
>>> partial_reduce(np.min, x, {0: 1, 2: 3}) # doctest: +SKIP
"""
name = (name or funcname(func)) + '-' + tokenize(func, x, split_every,
keepdims, dtype)
parts = [list(partition_all(split_every.get(i, 1), range(n))) for (i, n)
in enumerate(x.numblocks)]
keys = product(*map(range, map(len, parts)))
out_chunks = [tuple(1 for p in partition_all(split_every[i], c)) if i
in split_every else c for (i, c) in enumerate(x.chunks)]
if not keepdims:
out_axis = [i for i in range(x.ndim) if i not in split_every]
getter = lambda k: get(out_axis, k)
keys = map(getter, keys)
out_chunks = list(getter(out_chunks))
dsk = {}
for k, p in zip(keys, product(*parts)):
decided = dict((i, j[0]) for (i, j) in enumerate(p) if len(j) == 1)
dummy = dict(i for i in enumerate(p) if i[0] not in decided)
g = lol_tuples((x.name,), range(x.ndim), decided, dummy)
dsk[(name,) + k] = (func, g)
return Array(sharedict.merge(x.dask, (name, dsk)), name, out_chunks, dtype=dtype)
@wraps(chunk.sum)
def sum(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
if dtype is not None:
dt = dtype
else:
dt = getattr(np.empty((1,), dtype=a.dtype).sum(), 'dtype', object)
return reduction(a, chunk.sum, chunk.sum, axis=axis, keepdims=keepdims,
dtype=dt, split_every=split_every, out=out)
@wraps(chunk.prod)
def prod(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
if dtype is not None:
dt = dtype
else:
dt = getattr(np.empty((1,), dtype=a.dtype).prod(), 'dtype', object)
return reduction(a, chunk.prod, chunk.prod, axis=axis, keepdims=keepdims,
dtype=dt, split_every=split_every, out=out)
@wraps(chunk.min)
def min(a, axis=None, keepdims=False, split_every=None, out=None):
return reduction(a, chunk.min, chunk.min, axis=axis, keepdims=keepdims,
dtype=a.dtype, split_every=split_every, out=out)
@wraps(chunk.max)
def max(a, axis=None, keepdims=False, split_every=None, out=None):
return reduction(a, chunk.max, chunk.max, axis=axis, keepdims=keepdims,
dtype=a.dtype, split_every=split_every, out=out)
@wraps(chunk.any)
def any(a, axis=None, keepdims=False, split_every=None, out=None):
return reduction(a, chunk.any, chunk.any, axis=axis, keepdims=keepdims,
dtype='bool', split_every=split_every, out=out)
@wraps(chunk.all)
def all(a, axis=None, keepdims=False, split_every=None, out=None):
return reduction(a, chunk.all, chunk.all, axis=axis, keepdims=keepdims,
dtype='bool', split_every=split_every, out=out)
@wraps(chunk.nansum)
def nansum(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
if dtype is not None:
dt = dtype
else:
dt = getattr(chunk.nansum(np.empty((1,), dtype=a.dtype)), 'dtype', object)
return reduction(a, chunk.nansum, chunk.sum, axis=axis, keepdims=keepdims,
dtype=dt, split_every=split_every, out=out)
with ignoring(AttributeError):
@wraps(chunk.nanprod)
def nanprod(a, axis=None, dtype=None, keepdims=False, split_every=None,
out=None):
if dtype is not None:
dt = dtype
else:
dt = getattr(chunk.nansum(np.empty((1,), dtype=a.dtype)), 'dtype', object)
return reduction(a, chunk.nanprod, chunk.prod, axis=axis,
keepdims=keepdims, dtype=dt, split_every=split_every,
out=out)
@wraps(chunk.nancumsum)
def nancumsum(x, axis, dtype=None, out=None):
return cumreduction(chunk.nancumsum, operator.add, 0, x, axis, dtype,
out=out)
@wraps(chunk.nancumprod)
def nancumprod(x, axis, dtype=None, out=None):
return cumreduction(chunk.nancumprod, operator.mul, 1, x, axis, dtype,
out=out)
@wraps(chunk.nanmin)
def nanmin(a, axis=None, keepdims=False, split_every=None, out=None):
return reduction(a, chunk.nanmin, chunk.nanmin, axis=axis,
keepdims=keepdims, dtype=a.dtype, split_every=split_every,
out=out)
@wraps(chunk.nanmax)
def nanmax(a, axis=None, keepdims=False, split_every=None, out=None):
return reduction(a, chunk.nanmax, chunk.nanmax, axis=axis,
keepdims=keepdims, dtype=a.dtype, split_every=split_every,
out=out)
def numel(x, **kwargs):
""" A reduction to count the number of elements """
return chunk.sum(np.ones_like(x), **kwargs)
def nannumel(x, **kwargs):
""" A reduction to count the number of elements """
return chunk.sum(~np.isnan(x), **kwargs)
def mean_chunk(x, sum=chunk.sum, numel=numel, dtype='f8', **kwargs):
n = numel(x, dtype=dtype, **kwargs)
total = sum(x, dtype=dtype, **kwargs)
empty = empty_lookup.dispatch(type(n))
result = empty(n.shape, dtype=[('total', total.dtype), ('n', n.dtype)])
result['n'] = n
result['total'] = total
return result
def mean_combine(pair, sum=chunk.sum, numel=numel, dtype='f8', **kwargs):
n = sum(pair['n'], **kwargs)
total = sum(pair['total'], **kwargs)
empty = empty_lookup.dispatch(type(n))
result = empty(n.shape, dtype=pair.dtype)
result['n'] = n
result['total'] = total
return result
def mean_agg(pair, dtype='f8', **kwargs):
return divide(pair['total'].sum(dtype=dtype, **kwargs),
pair['n'].sum(dtype=dtype, **kwargs), dtype=dtype)
@wraps(chunk.mean)
def mean(a, axis=None, dtype=None, keepdims=False, split_every=None, out=None):
if dtype is not None:
dt = dtype
else:
dt = getattr(np.mean(np.empty(shape=(1,), dtype=a.dtype)), 'dtype', object)
return reduction(a, mean_chunk, mean_agg, axis=axis, keepdims=keepdims,
dtype=dt, split_every=split_every, combine=mean_combine,
out=out)
def nanmean(a, axis=None, dtype=None, keepdims=False, split_every=None,
out=None):
if dtype is not None:
dt = dtype
else:
dt = getattr(np.mean(np.empty(shape=(1,), dtype=a.dtype)), 'dtype', object)
return reduction(a, partial(mean_chunk, sum=chunk.nansum, numel=nannumel),
mean_agg, axis=axis, keepdims=keepdims, dtype=dt,
split_every=split_every, out=out,
combine=partial(mean_combine, sum=chunk.nansum, numel=nannumel))
with ignoring(AttributeError):
nanmean = wraps(chunk.nanmean)(nanmean)
def moment_chunk(A, order=2, sum=chunk.sum, numel=numel, dtype='f8', **kwargs):
total = sum(A, dtype=dtype, **kwargs)
n = numel(A, **kwargs).astype(np.int64)
u = total / n
empty = empty_lookup.dispatch(type(n))
M = empty(n.shape + (order - 1,), dtype=dtype)
for i in range(2, order + 1):
M[..., i - 2] = sum((A - u)**i, dtype=dtype, **kwargs)
result = empty(n.shape, dtype=[('total', total.dtype),
('n', n.dtype),
('M', M.dtype, (order - 1,))])
result['total'] = total
result['n'] = n
result['M'] = M
return result
def _moment_helper(Ms, ns, inner_term, order, sum, kwargs):
M = Ms[..., order - 2].sum(**kwargs) + sum(ns * inner_term ** order, **kwargs)
for k in range(1, order - 1):
coeff = factorial(order) / (factorial(k) * factorial(order - k))
M += coeff * sum(Ms[..., order - k - 2] * inner_term**k, **kwargs)
return M
def moment_combine(data, order=2, ddof=0, dtype='f8', sum=np.sum, **kwargs):
kwargs['dtype'] = dtype
kwargs['keepdims'] = True
totals = data['total']
ns = data['n']
Ms = data['M']
total = totals.sum(**kwargs)
n = sum(ns, **kwargs)
mu = divide(total, n, dtype=dtype)
inner_term = divide(totals, ns, dtype=dtype) - mu
empty = empty_lookup.dispatch(type(n))
M = empty(n.shape + (order - 1,), dtype=dtype)
for o in range(2, order + 1):
M[..., o - 2] = _moment_helper(Ms, ns, inner_term, o, sum, kwargs)
result = empty(n.shape, dtype=[('total', total.dtype),
('n', n.dtype),
('M', Ms.dtype, (order - 1,))])
result['total'] = total
result['n'] = n
result['M'] = M
return result
def moment_agg(data, order=2, ddof=0, dtype='f8', sum=np.sum, **kwargs):
totals = data['total']
ns = data['n']
Ms = data['M']
kwargs['dtype'] = dtype
# To properly handle ndarrays, the original dimensions need to be kept for
# part of the calculation.
keepdim_kw = kwargs.copy()
keepdim_kw['keepdims'] = True
n = sum(ns, **keepdim_kw)
mu = divide(totals.sum(**keepdim_kw), n, dtype=dtype)
inner_term = divide(totals, ns, dtype=dtype) - mu
M = _moment_helper(Ms, ns, inner_term, order, sum, kwargs)
return divide(M, sum(n, **kwargs) - ddof, dtype=dtype)
def moment(a, order, axis=None, dtype=None, keepdims=False, ddof=0,
split_every=None, out=None):
if not isinstance(order, int) or order < 0:
raise ValueError("Order must be an integer >= 0")
if order < 2:
reduced = a.sum(axis=axis) # get reduced shape and chunks
if order == 0:
# When order equals 0, the result is 1, by definition.
return ones(reduced.shape, chunks=reduced.chunks, dtype='f8')
# By definition the first order about the mean is 0.
return zeros(reduced.shape, chunks=reduced.chunks, dtype='f8')
if dtype is not None:
dt = dtype
else:
dt = getattr(np.var(np.ones(shape=(1,), dtype=a.dtype)), 'dtype', object)
return reduction(a, partial(moment_chunk, order=order),
partial(moment_agg, order=order, ddof=ddof),
axis=axis, keepdims=keepdims,
dtype=dt, split_every=split_every, out=out,
combine=partial(moment_combine, order=order))
@wraps(chunk.var)
def var(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None,
out=None):
if dtype is not None:
dt = dtype
else:
dt = getattr(np.var(np.ones(shape=(1,), dtype=a.dtype)), 'dtype', object)
return reduction(a, moment_chunk, partial(moment_agg, ddof=ddof), axis=axis,
keepdims=keepdims, dtype=dt, split_every=split_every,
combine=moment_combine, name='var', out=out)
def nanvar(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None,
out=None):
if dtype is not None:
dt = dtype
else:
dt = getattr(np.var(np.ones(shape=(1,), dtype=a.dtype)), 'dtype', object)
return reduction(a, partial(moment_chunk, sum=chunk.nansum, numel=nannumel),
partial(moment_agg, sum=np.nansum, ddof=ddof), axis=axis,
keepdims=keepdims, dtype=dt, split_every=split_every,
combine=partial(moment_combine, sum=np.nansum), out=out)
with ignoring(AttributeError):
nanvar = wraps(chunk.nanvar)(nanvar)
@wraps(chunk.std)
def std(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None,
out=None):
result = sqrt(a.var(axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof,
split_every=split_every, out=out))
if dtype and dtype != result.dtype:
result = result.astype(dtype)
return result
def nanstd(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None,
out=None):
result = sqrt(nanvar(a, axis=axis, dtype=dtype, keepdims=keepdims,
ddof=ddof, split_every=split_every, out=out))
if dtype and dtype != result.dtype:
result = result.astype(dtype)
return result
with ignoring(AttributeError):
nanstd = wraps(chunk.nanstd)(nanstd)
def _arg_combine(data, axis, argfunc, keepdims=False):
""" Merge intermediate results from ``arg_*`` functions"""
axis = None if len(axis) == data.ndim or data.ndim == 1 else axis[0]
vals = data['vals']
arg = data['arg']
if axis is None:
local_args = argfunc(vals, axis=axis, keepdims=keepdims)
vals = vals.ravel()[local_args]
arg = arg.ravel()[local_args]
else:
local_args = argfunc(vals, axis=axis)
inds = np.ogrid[tuple(map(slice, local_args.shape))]
inds.insert(axis, local_args)
inds = tuple(inds)
vals = vals[inds]
arg = arg[inds]
if keepdims:
vals = np.expand_dims(vals, axis)
arg = np.expand_dims(arg, axis)
return arg, vals
def arg_chunk(func, argfunc, x, axis, offset_info):
arg_axis = None if len(axis) == x.ndim or x.ndim == 1 else axis[0]
vals = func(x, axis=arg_axis, keepdims=True)
arg = argfunc(x, axis=arg_axis, keepdims=True)
if arg_axis is None:
offset, total_shape = offset_info
ind = np.unravel_index(arg.ravel()[0], x.shape)
total_ind = tuple(o + i for (o, i) in zip(offset, ind))
arg[:] = np.ravel_multi_index(total_ind, total_shape)
else:
arg += offset_info
if isinstance(vals, np.ma.masked_array):
if 'min' in argfunc.__name__:
fill_value = np.ma.minimum_fill_value(vals)
else:
fill_value = np.ma.maximum_fill_value(vals)
vals = np.ma.filled(vals, fill_value)
result = np.empty(shape=vals.shape, dtype=[('vals', vals.dtype),
('arg', arg.dtype)])
result['vals'] = vals
result['arg'] = arg
return result
def arg_combine(func, argfunc, data, axis=None, **kwargs):
arg, vals = _arg_combine(data, axis, argfunc, keepdims=True)
result = np.empty(shape=vals.shape, dtype=[('vals', vals.dtype),
('arg', arg.dtype)])
result['vals'] = vals
result['arg'] = arg
return result
def arg_agg(func, argfunc, data, axis=None, **kwargs):
return _arg_combine(data, axis, argfunc, keepdims=False)[0]
def nanarg_agg(func, argfunc, data, axis=None, **kwargs):
arg, vals = _arg_combine(data, axis, argfunc, keepdims=False)
if np.any(np.isnan(vals)):
raise ValueError("All NaN slice encountered")
return arg
def arg_reduction(x, chunk, combine, agg, axis=None, split_every=None, out=None):
""" Generic function for argreduction.
Parameters
----------
x : Array
chunk : callable
Partialed ``arg_chunk``.
combine : callable
Partialed ``arg_combine``.
agg : callable
Partialed ``arg_agg``.
axis : int, optional
split_every : int or dict, optional
"""
if axis is None:
axis = tuple(range(x.ndim))
ravel = True
elif isinstance(axis, int):
axis = validate_axis(axis, x.ndim)
axis = (axis,)
ravel = x.ndim == 1
else:
raise TypeError("axis must be either `None` or int, "
"got '{0}'".format(axis))
# Map chunk across all blocks
name = 'arg-reduce-{0}'.format(tokenize(axis, x, chunk,
combine, split_every))
old = x.name
keys = list(product(*map(range, x.numblocks)))
offsets = list(product(*(accumulate(operator.add, bd[:-1], 0)
for bd in x.chunks)))
if ravel:
offset_info = zip(offsets, repeat(x.shape))
else:
offset_info = pluck(axis[0], offsets)
chunks = tuple((1, ) * len(c) if i in axis else c for (i, c)
in enumerate(x.chunks))
dsk = dict(((name,) + k, (chunk, (old,) + k, axis, off)) for (k, off)
in zip(keys, offset_info))
# The dtype of `tmp` doesn't actually matter, just need to provide something
tmp = Array(sharedict.merge(x.dask, (name, dsk)), name, chunks, dtype=x.dtype)
dtype = np.argmin([1]).dtype
result = _tree_reduce(tmp, agg, axis, False, dtype, split_every, combine)
return handle_out(out, result)
def make_arg_reduction(func, argfunc, is_nan_func=False):
""" Create an argreduction callable
Parameters
----------
func : callable
The reduction (e.g. ``min``)
argfunc : callable
The argreduction (e.g. ``argmin``)
"""
chunk = partial(arg_chunk, func, argfunc)
combine = partial(arg_combine, func, argfunc)
if is_nan_func:
agg = partial(nanarg_agg, func, argfunc)
else:
agg = partial(arg_agg, func, argfunc)
@wraps(argfunc)
def _(x, axis=None, split_every=None, out=None):
return arg_reduction(x, chunk, combine, agg, axis,
split_every=split_every, out=out)
return _
def _nanargmin(x, axis, **kwargs):
try:
return chunk.nanargmin(x, axis, **kwargs)
except ValueError:
return chunk.nanargmin(np.where(np.isnan(x), np.inf, x), axis, **kwargs)
def _nanargmax(x, axis, **kwargs):
try:
return chunk.nanargmax(x, axis, **kwargs)
except ValueError:
return chunk.nanargmax(np.where(np.isnan(x), -np.inf, x), axis, **kwargs)
argmin = make_arg_reduction(chunk.min, chunk.argmin)
argmax = make_arg_reduction(chunk.max, chunk.argmax)
nanargmin = make_arg_reduction(chunk.nanmin, _nanargmin, True)
nanargmax = make_arg_reduction(chunk.nanmax, _nanargmax, True)
def cumreduction(func, binop, ident, x, axis=None, dtype=None, out=None):
""" Generic function for cumulative reduction
Parameters
----------
func: callable
Cumulative function like np.cumsum or np.cumprod
binop: callable
Associated binary operator like ``np.cumsum->add`` or ``np.cumprod->mul``
ident: Number
Associated identity like ``np.cumsum->0`` or ``np.cumprod->1``
x: dask Array
axis: int
dtype: dtype
Returns
-------
dask array
See also
--------
cumsum
cumprod
"""
if axis is None:
x = x.flatten()
axis = 0
if dtype is None:
dtype = getattr(func(np.empty((0,), dtype=x.dtype)), 'dtype', object)
assert isinstance(axis, int)
axis = validate_axis(axis, x.ndim)
m = x.map_blocks(func, axis=axis, dtype=dtype)
name = '{0}-{1}'.format(func.__name__, tokenize(func, axis, binop,
ident, x, dtype))
n = x.numblocks[axis]
full = slice(None, None, None)
slc = (full,) * axis + (slice(-1, None),) + (full,) * (x.ndim - axis - 1)
indices = list(product(*[range(nb) if i != axis else [0]
for i, nb in enumerate(x.numblocks)]))
dsk = dict()
for ind in indices:
shape = tuple(x.chunks[i][ii] if i != axis else 1
for i, ii in enumerate(ind))
dsk[(name, 'extra') + ind] = (np.full, shape, ident, m.dtype)
dsk[(name,) + ind] = (m.name,) + ind
for i in range(1, n):
last_indices = indices
indices = list(product(*[range(nb) if ii != axis else [i]
for ii, nb in enumerate(x.numblocks)]))
for old, ind in zip(last_indices, indices):
this_slice = (name, 'extra') + ind
dsk[this_slice] = (binop, (name, 'extra') + old,
(operator.getitem, (m.name,) + old, slc))
dsk[(name,) + ind] = (binop, this_slice, (m.name,) + ind)
result = Array(sharedict.merge(m.dask, (name, dsk)), name, x.chunks, m.dtype)
return handle_out(out, result)
def _cumsum_merge(a, b):
if isinstance(a, np.ma.masked_array) or isinstance(b, np.ma.masked_array):
values = np.ma.getdata(a) + np.ma.getdata(b)
return np.ma.masked_array(values, mask=np.ma.getmaskarray(b))
return a + b
def _cumprod_merge(a, b):
if isinstance(a, np.ma.masked_array) or isinstance(b, np.ma.masked_array):
values = np.ma.getdata(a) * np.ma.getdata(b)
return np.ma.masked_array(values, mask=np.ma.getmaskarray(b))
return a * b
@wraps(np.cumsum)
def cumsum(x, axis=None, dtype=None, out=None):
return cumreduction(np.cumsum, _cumsum_merge, 0, x, axis, dtype, out=out)
@wraps(np.cumprod)
def cumprod(x, axis=None, dtype=None, out=None):
return cumreduction(np.cumprod, _cumprod_merge, 1, x, axis, dtype, out=out)
def topk(a, k, axis=-1, split_every=None):
""" Extract the k largest elements from a on the given axis,
and return them sorted from largest to smallest.
If k is negative, extract the -k smallest elements instead,
and return them sorted from smallest to largest.
This performs best when ``k`` is much smaller than the chunk size. All
results will be returned in a single chunk along the given axis.
Parameters
----------
x: Array
Data being sorted
k: int
axis: int, optional
split_every: int >=2, optional
See :func:`reduce`. This parameter becomes very important when k is
on the same order of magnitude of the chunk size or more, as it
prevents getting the whole or a significant portion of the input array
in memory all at once, with a negative impact on network transfer
too when running on distributed.
Returns
-------
Selection of x with size abs(k) along the given axis.
Examples
--------
>>> import dask.array as da
>>> x = np.array([5, 1, 3, 6])
>>> d = da.from_array(x, chunks=2)
>>> d.topk(2).compute()
array([6, 5])
>>> d.topk(-2).compute()
array([1, 3])
"""
axis = validate_axis(axis, a.ndim)
# chunk and combine steps of the reduction, which recursively invoke
# np.partition to pick the top/bottom k elements from the previous step.
# The selection is not sorted internally.
chunk_combine = partial(chunk.topk, k=k)
# aggregate step of the reduction. Internally invokes the chunk/combine
# function, then sorts the results internally.
aggregate = partial(chunk.topk_aggregate, k=k)
return reduction(
a, chunk=chunk_combine, combine=chunk_combine, aggregate=aggregate,
axis=axis, keepdims=True, dtype=a.dtype, split_every=split_every,
output_size=abs(k))
def argtopk(a, k, axis=-1, split_every=None):
""" Extract the indices of the k largest elements from a on the given axis,
and return them sorted from largest to smallest. If k is negative, extract
the indices of the -k smallest elements instead, and return them sorted
from smallest to largest.
This performs best when ``k`` is much smaller than the chunk size. All
results will be returned in a single chunk along the given axis.
Parameters
----------
x: Array
Data being sorted
k: int
axis: int, optional
split_every: int >=2, optional
See :func:`topk`. The performance considerations for topk also apply
here.
Returns
-------
Selection of np.intp indices of x with size abs(k) along the given axis.
Examples
--------
>>> import dask.array as da
>>> x = np.array([5, 1, 3, 6])
>>> d = da.from_array(x, chunks=2)
>>> d.argtopk(2).compute()
array([3, 0])
>>> d.argtopk(-2).compute()
array([1, 2])
"""
axis = validate_axis(axis, a.ndim)
# Generate nodes where every chunk is a tuple of (a, original index of a)
idx = arange(a.shape[axis], chunks=(a.chunks[axis], ), dtype=np.intp)
idx = idx[tuple(slice(None) if i == axis else np.newaxis
for i in range(a.ndim))]
a_plus_idx = a.map_blocks(chunk.argtopk_preprocess, idx,
dtype=object)
# chunk and combine steps of the reduction. They acquire in input a tuple
# of (a, original indices of a) and return another tuple containing the top
# k elements of a and the matching original indices. The selection is not
# sorted internally, as in np.argpartition.
chunk_combine = partial(chunk.argtopk, k=k)
# aggregate step of the reduction. Internally invokes the chunk/combine
# function, then sorts the results internally, drops a and returns the
# index only.
aggregate = partial(chunk.argtopk_aggregate, k=k)
return reduction(
a_plus_idx, chunk=chunk_combine, combine=chunk_combine,
aggregate=aggregate, axis=axis, keepdims=True, dtype=np.intp,
split_every=split_every, concatenate=False, output_size=abs(k))
|
py | b40208295a2d50782faa3ab573dc7f50e6695949 | import os
import AppKit
import mojo
from mojo.events import installTool, EditingTool, BaseEventTool, setActiveEventTool
from mojo.drawingTools import *
from mojo.UI import UpdateCurrentGlyphView, CurrentGlyphWindow, GetFile
from defconAppKit.windows.baseWindow import BaseWindowController
from vanilla import *
from glyphConstruction import ParseGlyphConstructionListFromString, GlyphConstructionBuilder
from importlib import reload
import recipee
reload(recipee)
compositeToolBundle = mojo.extensions.ExtensionBundle("CompositeTool")
toolbarIconPath = os.path.join(compositeToolBundle.resourcesPath(), "icon.pdf")
toolbarIcon = AppKit.NSImage.alloc().initWithContentsOfFile_(toolbarIconPath)
glyph_constructor = recipee.glyph_constructor
class SettingsWindow(BaseWindowController):
def __init__(self):
self.constructions = glyph_constructor
self.w = FloatingWindow((200, 70), "Window Demo")
self.w.myButton = SquareButton((10, 10, -10, 20), "Load glyph construction", callback=self.changerecipee)
self.w.updateComposites = CheckBox((10, 40, -10, 20), 'Update composites', value=True)
self.w.getNSWindow().setStyleMask_(False)
self.w.open()
def changerecipee(self, sender):
root = GetFile(message="Please select a txt file containing glyph construction recipee", title="Select a txt file", allowsMultipleSelection=False, fileTypes=["glyphConstruction", "txt"])
with open(root, 'r') as file:
data = file.read()
# Write over existing data
with open(recipee.__file__, "w") as f:
f.write(f"glyph_constructor = '''{data}'''")
recipee.glyph_constructor = data
self.constructions = data
class ComponentTool(EditingTool):
def setup(self):
self.settingsWindow = None
self.glyph_constructor = self.SettingsWindow.constructions
self.constructions = ParseGlyphConstructionListFromString(self.SettingsWindow.constructions)
def getToolbarTip(self):
return 'Component link'
def getToolbarIcon(self):
## return the toolbar icon
return toolbarIcon
def becomeActive(self):
self.SettingsWindow = SettingsWindow()
def becomeInactive(self):
self.SettingsWindow.w.close()
def draw(self, viewScale, g=None):
if g is None:
g = self.getGlyph()
if g is not None:
save()
self.updateComp(g, viewScale)
restore()
def drawInfos(self, new_x_baseGlyph_anchor, new_y_baseGlyph_anchor, viewScale, glyphView, baseGlyph_anchor):
color = (1, 0, 0, 1)
ovalSize = 5 * viewScale
fill(1, 0, 0, 1)
stroke(None)
oval(new_x_baseGlyph_anchor-ovalSize/2, new_y_baseGlyph_anchor-ovalSize/2, ovalSize, ovalSize)
textAttributes = {
AppKit.NSFontAttributeName: AppKit.NSFont.systemFontOfSize_(11),
}
glyphView.drawTextAtPoint(
"%s (%d, %d)"%(baseGlyph_anchor.name, new_x_baseGlyph_anchor, new_y_baseGlyph_anchor),
textAttributes,
(new_x_baseGlyph_anchor, new_y_baseGlyph_anchor),
yOffset=-ovalSize-8,
drawBackground=True,
centerX=True,
centerY=True,
roundBackground=False,
backgroundXAdd=7,
backgroundYadd=2)
def updateRelatedComposites(self, glyph_constructor, cg, cf, new_base_glyph, baseGlyph_anchor, composed_glyph):
for line in glyph_constructor.split("\n"):
if len(line) > 0:
composed_glyph = line.split("=")[0]
recipee = line.split("=")[1]
any_base_glyph = recipee.split("+")[0]
for diacritic_and_anchor in recipee.split("+")[1:]:
new_diacritic = diacritic_and_anchor.split("@")[0]
new_anchor = diacritic_and_anchor.split("@")[1]
if composed_glyph != cg.name and new_base_glyph == any_base_glyph and new_anchor == baseGlyph_anchor.name and composed_glyph in cf.keys():
constructionGlyph = GlyphConstructionBuilder(line, cf)
new_glyph = cf.newGlyph(constructionGlyph.name, clear=True)
# get the destination glyph in the font
new_glyph = cf.newGlyph(constructionGlyph.name, clear=True)
# draw the construction glyph into the destination glyph
constructionGlyph.draw(new_glyph.getPen())
new_glyph.changed()
def updateComp(self, g, viewScale):
if len(g.selectedComponents) == 1:
cf = g.font
cg = g
selected_component = cg.selectedComponents[0]
selected_component_name = cg.selectedComponents[0].baseGlyph
constructions = self.constructions
glyph_constructor = self.glyph_constructor
glyphWindow = CurrentGlyphWindow()
glyphView = glyphWindow.getGlyphView()
if not glyphWindow:
return
for line in glyph_constructor.split("\n"):
if len(line) > 0:
composed_glyph = line.split("=")[0]
recipee = line.split("=")[1]
new_base_glyph = recipee.split("+")[0]
if new_base_glyph == cg.components[0].baseGlyph and cg.name == composed_glyph:
construction = f"{composed_glyph}={recipee}"
constructionGlyph = GlyphConstructionBuilder(construction, cf)
if constructionGlyph.name == cg.name:
for component_index, c in enumerate(constructionGlyph.components):
c = list(c)[0]
if c == selected_component_name:
baseGlyphName = constructionGlyph.components[component_index-1][0]
baseGlyph = cf[baseGlyphName]
recipee = construction.split("=")[1]
for diacritic_and_anchor in recipee.split("+")[1:]:
diacritic = diacritic_and_anchor.split("@")[0]
anchor = diacritic_and_anchor.split("@")[1]
if diacritic == selected_component_name:
selected_component_anchor_name = "_%s"%anchor
for baseGlyph_anchor in baseGlyph.anchors:
if baseGlyph_anchor.name == anchor:
x_baseGlyph_anchor = baseGlyph_anchor.x
y_baseGlyph_anchor = baseGlyph_anchor.y
selected_comp_baseGlyph = cf[selected_component_name]
for selectedComponent_anchor in selected_comp_baseGlyph.anchors:
if selected_component_anchor_name == selectedComponent_anchor.name:
x_offset = 0
y_offset = 0
for previous_components in constructionGlyph.components[1:component_index]:
for cg_component in cg.components:
if cg_component.baseGlyph == previous_components[0]:
x_offset += cg_component.offset[0]
y_offset += cg_component.offset[1]
new_x_baseGlyph_anchor = selectedComponent_anchor.x + selected_component.offset[0] - x_offset
new_y_baseGlyph_anchor = selectedComponent_anchor.y + selected_component.offset[1] - y_offset
self.drawInfos(new_x_baseGlyph_anchor, new_y_baseGlyph_anchor, viewScale, glyphView, baseGlyph_anchor)
### Update baseGlyph anchor
baseGlyph_anchor.x = new_x_baseGlyph_anchor
baseGlyph_anchor.y = new_y_baseGlyph_anchor
if self.SettingsWindow.w.updateComposites.get() == 1:
self.updateRelatedComposites(glyph_constructor, cg, cf, new_base_glyph, baseGlyph_anchor, composed_glyph)
installTool(ComponentTool())
|
py | b40209e4c98f5af4b807e3965bf34d17d9d43d00 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.targets.jvm_target import JvmTarget
from pants.base.exceptions import TargetDefinitionException
from pants.util.memo import memoized_property
from pants.contrib.android.android_manifest_parser import AndroidManifestParser
class AndroidTarget(JvmTarget):
"""A base class for all Android targets."""
def __init__(self,
address=None,
# TODO (mateor) add support for minSDk
# most recent build_tools_version should be defined elsewhere
build_tools_version="19.1.0",
manifest=None,
**kwargs):
"""
:param build_tools_version: API for the Build Tools (separate from SDK version).
Defaults to the latest full release.
:param manifest: path/to/file of 'AndroidManifest.xml' (required name). Paths are relative
to the BUILD file's directory.
"""
super(AndroidTarget, self).__init__(address=address, **kwargs)
self.add_labels('android')
# TODO(pl): These attributes should live in the payload
self.build_tools_version = build_tools_version
self._spec_path = address.spec_path
self._manifest_file = manifest
@memoized_property
def manifest(self):
"""Return an AndroidManifest object made from a manifest by AndroidManifestParser."""
# If there was no 'manifest' field in the BUILD file, try to find one with the default value.
if self._manifest_file is None:
self._manifest_file = 'AndroidManifest.xml'
manifest_path = os.path.join(self._spec_path, self._manifest_file)
if not os.path.isfile(manifest_path):
raise TargetDefinitionException(self, "There is no AndroidManifest.xml at path {0}. Please "
"declare a 'manifest' field with its relative "
"path.".format(manifest_path))
return AndroidManifestParser.parse_manifest(manifest_path)
|
py | b4020b823a38b43b4b3bd0f9b61c676a80bba04c | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="hkjournalist",
version="0.0.8",
author="Xinyi Li",
author_email="[email protected]",
description="Custom Auto Report Generator for Python Program",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/li-xin-yi/HK-journalist",
packages=setuptools.find_packages(),
package_data={'configuration':['hkjournalist/configuration/*']},
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License"
],
install_requires = ['tabulate',
'pandas',
'pandoc',
'matplotlib',
]
)
|
py | b4020c1e66ca009d65e860dc66e23162a018c6c6 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2021 all rights reserved
#
"""
Exercise finding the maximum value of a matrix
"""
def test():
# package access
import gsl
# make a matrix and initialize it
m = gsl.matrix(shape=(100,100)).fill(value=1)
# unpack the shape
s0, s1 = m.shape
# prime
for i0 in range(s0):
for i1 in range(s1):
m[i0, i1] = 2*(i0+i1)+1
# find the min
small = m.min()
# check it
assert small == 1
# all done
return m
# main
if __name__ == "__main__":
test()
# end of file
|
py | b4020c712886aa29d97797e2345fddbbd86be225 | import os
import unittest
import torchtext
from SeqModel.evaluator import Predictor
from SeqModel.dataset import SourceField, TargetField
from SeqModel.models import Seq2seq, EncoderRNN, DecoderRNN
class TestPredictor(unittest.TestCase):
@classmethod
def setUpClass(self):
test_path = os.path.dirname(os.path.realpath(__file__))
src = SourceField()
trg = TargetField()
dataset = torchtext.data.TabularDataset(
path=os.path.join(test_path, 'data/eng-fra.txt'), format='tsv',
fields=[('src', src), ('trg', trg)],
)
src.build_vocab(dataset)
trg.build_vocab(dataset)
encoder = EncoderRNN(len(src.vocab), 10, 10, rnn_cell='lstm')
decoder = DecoderRNN(len(trg.vocab), 10, 10, trg.sos_id, trg.eos_id, rnn_cell='lstm')
seq2seq = Seq2seq(encoder, decoder)
self.predictor = Predictor(seq2seq, src.vocab, trg.vocab)
def test_predict(self):
src_seq = ["I", "am", "fat"]
tgt_seq = self.predictor.predict(src_seq)
for tok in tgt_seq:
self.assertTrue(tok in self.predictor.tgt_vocab.stoi)
|
py | b4020da7ce88fb1bc92a5896365f62825e9fd60b | """initial
Revision ID: 6df0d5aac594
Revises:
Create Date: 2017-11-28 23:39:40.098546
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6df0d5aac594'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('reminders',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Text(), nullable=True),
sa.Column('message', sa.Text(), nullable=True),
sa.Column('notify_at', sa.Integer(), nullable=True),
sa.Column('status', sa.String(), server_default='pending', nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('timezones',
sa.Column('user_id', sa.String(), nullable=False),
sa.Column('timezone', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('user_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('timezones')
op.drop_table('reminders')
# ### end Alembic commands ###
|
py | b4020df6fe0d0d4ef8d7f0ce1bccd8e7a8fc73df | # ******************************************************************************
# Copyright 2017-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
import torch
class ParallelDataset(torch.utils.data.Dataset):
def __init__(self, *datasets):
self.datasets = datasets
def __getitem__(self, i):
return tuple(d[i] for d in self.datasets)
def __len__(self):
return min(len(d) for d in self.datasets)
|
py | b4020eed6926d40aef845f79c0f8c039a34eade5 | #!/usr/bin/env python
'''Generate models for affinity predictions'''
# variables:
# GAP(0)
# PENALTY (0)
# HUBER (false)
# DELTA (0)
# RANKLOSS (0)
basemodel = '''layer {
name: "data"
type: "MolGridData"
top: "data"
top: "label"
top: "affinity"
include {
phase: TEST
}
molgrid_data_param {
source: "TESTFILE"
batch_size: 50
dimension: 23.5
resolution: 0.5
shuffle: false
balanced: false
has_affinity: true
root_folder: "../../"
}
}
layer {
name: "data"
type: "MolGridData"
top: "data"
top: "label"
top: "affinity"
include {
phase: TRAIN
}
molgrid_data_param {
source: "TRAINFILE"
batch_size: 50
dimension: 23.5
resolution: 0.5
shuffle: true
balanced: true
stratify_receptor: true
stratify_affinity_min: 0
stratify_affinity_max: 0
stratify_affinity_step: 0
has_affinity: true
random_rotation: true
random_translate: 2
root_folder: "../../"
}
}
layer {
name: "unit1_pool"
type: "Pooling"
bottom: "data"
top: "unit1_pool"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "unit1_conv1"
type: "Convolution"
bottom: "unit1_pool"
top: "unit1_conv1"
convolution_param {
num_output: 32
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
}
}
layer {
name: "unit1_relu1"
type: "ReLU"
bottom: "unit1_conv1"
top: "unit1_conv1"
}
layer {
name: "unit2_pool"
type: "Pooling"
bottom: "unit1_conv1"
top: "unit2_pool"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "unit2_conv1"
type: "Convolution"
bottom: "unit2_pool"
top: "unit2_conv1"
convolution_param {
num_output: 64
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
}
}
layer {
name: "unit2_relu1"
type: "ReLU"
bottom: "unit2_conv1"
top: "unit2_conv1"
}
layer {
name: "unit3_pool"
type: "Pooling"
bottom: "unit2_conv1"
top: "unit3_pool"
pooling_param {
pool: MAX
kernel_size: 2
stride: 2
}
}
layer {
name: "unit3_conv1"
type: "Convolution"
bottom: "unit3_pool"
top: "unit3_conv1"
convolution_param {
num_output: 128
pad: 1
kernel_size: 3
stride: 1
weight_filler {
type: "xavier"
}
}
}
layer {
name: "unit3_relu1"
type: "ReLU"
bottom: "unit3_conv1"
top: "unit3_conv1"
}
layer {
name: "split"
type: "Split"
bottom: "unit3_conv1"
top: "split"
}
layer {
name: "output_fc"
type: "InnerProduct"
bottom: "split"
top: "output_fc"
inner_product_param {
num_output: 2
weight_filler {
type: "xavier"
}
}
}
layer {
name: "loss"
type: "SoftmaxWithLoss"
bottom: "output_fc"
bottom: "label"
top: "loss"
}
layer {
name: "output"
type: "Softmax"
bottom: "output_fc"
top: "output"
}
layer {
name: "labelout"
type: "Split"
bottom: "label"
top: "labelout"
include {
phase: TEST
}
}
layer {
name: "output_fc_aff"
type: "InnerProduct"
bottom: "split"
top: "output_fc_aff"
inner_product_param {
num_output: 1
weight_filler {
type: "xavier"
}
}
}
layer {
name: "rmsd"
type: "AffinityLoss"
bottom: "output_fc_aff"
bottom: "affinity"
top: "rmsd"
affinity_loss_param {
scale: 0.1
gap: GAP
penalty: PENALTY
pseudohuber: HUBER
delta: DELTA
}
}
layer {
name: "predaff"
type: "Flatten"
bottom: "output_fc_aff"
top: "predaff"
}
layer {
name: "affout"
type: "Split"
bottom: "affinity"
top: "affout"
include {
phase: TEST
}
}
'''
def makemodel(**kwargs):
m = basemodel
for (k,v) in kwargs.iteritems():
m = m.replace(k,str(v))
return m
# GAP(0)
# PENALTY (0)
# HUBER (false)
# DELTA (0)
# RANKLOSS (0)
models = []
for gap in [0,1,2]:
for penalty in [0,1,2,4]:
for delta in [0,1,2,4,6]:
if delta == 0: huber = "false"
else: huber = "true"
model = makemodel(GAP=gap,PENALTY=penalty,HUBER=huber, DELTA=delta)
m = 'affinity_g%d_p%d_h%d.model'%(gap,penalty, delta)
models.append(m)
out = open(m,'w')
out.write(model)
for m in models:
print "train.py -m %s -p ../types/all_0.5_0_ --keep_best -t 1000 -i 100000 --reduced -o all_%s"%(m,m.replace('.model',''))
|
py | b4020ffd30c61fead2a265bfed9307435d1e5244 | #!/usr/bin/env python
# Copyright (C) 2011 Smarkets Limited <[email protected]>
#
# This module is released under the MIT License:
# http://www.opensource.org/licenses/mit-license.php
import glob
import io
import os
import shutil
import subprocess
import sys
from distutils.command import build, clean
from distutils.spawn import find_executable
from itertools import chain
from os.path import abspath, dirname, join
from setuptools import setup
PROJECT_ROOT = abspath(dirname(__file__))
sys.path.insert(0, PROJECT_ROOT)
def check_call(*args, **kwargs):
print('Calling %s, %s' % (args, kwargs,))
subprocess.check_call(*args, **kwargs)
ETO_PIQI_URL = 'https://raw.github.com/smarkets/eto_common/v1.2.3/eto.piqi'
SETO_PIQI_URL = 'https://raw.github.com/smarkets/smk_api_common/v6.6.0/seto.piqi'
def _safe_glob(pathname):
"Do a safe version of glob which copes with win32"
is_win32 = sys.platform == 'win32'
for source in glob.glob(pathname):
yield source.replace('/', '\\') if is_win32 else source
protobuf_modules = ['eto', 'seto']
def protobuf_module_file(name):
return join(PROJECT_ROOT, 'smarkets', 'streaming_api', '%s_pb2.py' % (name,))
class SmarketsProtocolBuild(build.build):
"Class to build the protobuf output"
description = "build the protocol buffer output with protobuf-compiler"
def download(self, url):
check_call((self.find('wget'), url))
def find(self, name):
result = find_executable(name)
if result is None:
raise Exception("*** Cannot find %s; make sure it's installed" % (name,))
return result
def run(self):
"Get the .piqi definitions and run the 'protoc' compiler command"
eto_piqi = join(PROJECT_ROOT, 'eto.piqi')
if not os.path.exists(eto_piqi):
self.download(ETO_PIQI_URL)
seto_piqi = join(PROJECT_ROOT, 'seto.piqi')
if not os.path.exists(seto_piqi):
self.download(SETO_PIQI_URL)
eto_proto = join(PROJECT_ROOT, 'smarkets.streaming_api.eto.proto')
if not os.path.exists(eto_proto):
check_call((self.find('piqi'), 'to-proto', eto_piqi, '-o', eto_proto))
seto_proto = join(PROJECT_ROOT, 'smarkets.streaming_api.seto.proto')
if not os.path.exists(seto_proto):
check_call((self.find('piqi'), 'to-proto', seto_piqi, '-o', seto_proto))
self.replace_file(seto_proto,
lambda line: line.replace(
'import "eto.piqi.proto"',
'import "smarkets.streaming_api.eto.proto"'))
for pkg in protobuf_modules:
dst_pkg_file = protobuf_module_file(pkg)
if not os.path.exists(dst_pkg_file):
check_call((self.find('protoc'),
'--python_out=.', 'smarkets.streaming_api.%s.proto' % (pkg,)))
build.build.run(self)
@staticmethod
def replace_file(filename, line_map):
"Map line_map for each line in filename"
with open(filename, "r") as sources:
lines = sources.readlines()
with open(filename, "w") as sources:
for line in lines:
sources.write(line_map(line))
class SmarketsProtocolClean(clean.clean):
"""Class to clean up the built protobuf files."""
description = "clean up files generated by protobuf-compiler"
def run(self):
"""Do the clean up"""
for src_dir in [
join('build', 'pb'),
]:
src_dir = join(PROJECT_ROOT, src_dir)
if os.path.exists(src_dir):
shutil.rmtree(src_dir)
for filename in chain(
_safe_glob('*.proto'),
_safe_glob('*.piqi'),
(join(PROJECT_ROOT, 'smarkets', 'streaming_api', '%s_pb2.py' % key)
for key in ('eto', 'seto'))):
if os.path.exists(filename):
os.unlink(filename)
# Call the parent class clean command
clean.clean.run(self)
readme_path = join(PROJECT_ROOT, 'README.rst')
with io.open(readme_path, encoding='utf-8') as f:
long_description = f.read()
# this is not ideal but at at least we're not repeating ourselved when updating package version
with open(join(PROJECT_ROOT, 'smarkets', '__init__.py')) as f:
version_line = [line for line in f if line.startswith('__version__')][0]
__version__ = version_line.split('=')[1].strip().strip("'").strip('"')
sdict = {
'name': 'smk_python_sdk',
'version': __version__,
'description': 'Smarkets Python SDK - API clients and utility library',
'long_description': long_description,
'url': 'https://github.com/smarkets/smk_python_sdk',
'download_url': 'https://github.com/smarkets/smk_python_sdk/downloads/smk_python_sdk-%s.tar.gz' % (
__version__,),
'author': 'Smarkets Limited',
'author_email': '[email protected]',
'maintainer': 'Smarkets Limited',
'maintainer_email': '[email protected]',
'keywords': ['Smarkets', 'betting exchange'],
'license': 'MIT',
'packages': ['smarkets', 'smarkets.streaming_api', 'smarkets.tests'],
'classifiers': [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python'],
'install_requires': [
'decorator',
'iso8601',
'protobuf',
'pytz',
'six',
],
'zip_safe': False,
'cmdclass': {
'build': SmarketsProtocolBuild,
'clean': SmarketsProtocolClean,
},
}
def creating_a_distribution():
command_line = ' '.join(sys.argv)
return 'sdist' in command_line or 'bdist' in command_line
def make_sure_the_package_is_built():
# It used to be *very* easy to create a sdist/bdist without building
# the package first and the resulting distribution would be incomplete,
# this is to prevent that from happening.
for name in protobuf_modules:
file_name = protobuf_module_file(name)
assert os.path.isfile(file_name), '%r not built' % (file_name,)
if __name__ == '__main__':
if creating_a_distribution():
make_sure_the_package_is_built()
setup(**sdict)
|
py | b4021158e7a313d71c719f8e9a25626320c75c47 | from typing import Any
class f_base(int):
pass
class f_entity(f_base):
obj_type = 'node'
obj_tags = 'tags'
obj_default_tags = {'type': 'node', 'tags': {}}
def __new__(cls, **kwargs):
return super().__new__(cls, kwargs.get('id') or kwargs['properties']['pk'])
def __init__(self, **kwargs) -> None:
for k, v in self.obj_default_tags.copy().items():
setattr(self, k, v)
self.position = [0.0, 0.0]
self.type = self.obj_type
self.owner = None
for k, v in kwargs.items():
setattr(self, k, v)
def __getattr__(self, attr) -> Any:
return getattr(self, self.obj_tags, {}).get(attr)
@property
def __dict__(self):
cpy = super().__dict__.copy()
try:
del cpy['owner']
except KeyError:
pass
return cpy
|
py | b40211d5a91cf27dd1ec6bbac5031b57beebddd3 | """The tests for the facebox component."""
import pytest
import requests
import requests_mock
import homeassistant.components.facebox.image_processing as fb
import homeassistant.components.image_processing as ip
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
CONF_FRIENDLY_NAME,
CONF_IP_ADDRESS,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
HTTP_BAD_REQUEST,
HTTP_OK,
HTTP_UNAUTHORIZED,
STATE_UNKNOWN,
)
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
from tests.async_mock import Mock, mock_open, patch
MOCK_IP = "192.168.0.1"
MOCK_PORT = "8080"
# Mock data returned by the facebox API.
MOCK_BOX_ID = "b893cc4f7fd6"
MOCK_ERROR_NO_FACE = "No face found"
MOCK_FACE = {
"confidence": 0.5812028911604818,
"id": "john.jpg",
"matched": True,
"name": "John Lennon",
"rect": {"height": 75, "left": 63, "top": 262, "width": 74},
}
MOCK_FILE_PATH = "/images/mock.jpg"
MOCK_HEALTH = {
"success": True,
"hostname": "b893cc4f7fd6",
"metadata": {"boxname": "facebox", "build": "development"},
"errors": [],
}
MOCK_JSON = {"facesCount": 1, "success": True, "faces": [MOCK_FACE]}
MOCK_NAME = "mock_name"
MOCK_USERNAME = "mock_username"
MOCK_PASSWORD = "mock_password"
# Faces data after parsing.
PARSED_FACES = [
{
fb.FACEBOX_NAME: "John Lennon",
fb.ATTR_IMAGE_ID: "john.jpg",
fb.ATTR_CONFIDENCE: 58.12,
fb.ATTR_MATCHED: True,
fb.ATTR_BOUNDING_BOX: {"height": 75, "left": 63, "top": 262, "width": 74},
}
]
MATCHED_FACES = {"John Lennon": 58.12}
VALID_ENTITY_ID = "image_processing.facebox_demo_camera"
VALID_CONFIG = {
ip.DOMAIN: {
"platform": "facebox",
CONF_IP_ADDRESS: MOCK_IP,
CONF_PORT: MOCK_PORT,
ip.CONF_SOURCE: {ip.CONF_ENTITY_ID: "camera.demo_camera"},
},
"camera": {"platform": "demo"},
}
@pytest.fixture
def mock_healthybox():
"""Mock fb.check_box_health."""
check_box_health = (
"homeassistant.components.facebox.image_processing.check_box_health"
)
with patch(check_box_health, return_value=MOCK_BOX_ID) as _mock_healthybox:
yield _mock_healthybox
@pytest.fixture
def mock_isfile():
"""Mock os.path.isfile."""
with patch(
"homeassistant.components.facebox.image_processing.cv.isfile", return_value=True
) as _mock_isfile:
yield _mock_isfile
@pytest.fixture
def mock_image():
"""Return a mock camera image."""
with patch(
"homeassistant.components.demo.camera.DemoCamera.camera_image",
return_value=b"Test",
) as image:
yield image
@pytest.fixture
def mock_open_file():
"""Mock open."""
mopen = mock_open()
with patch(
"homeassistant.components.facebox.image_processing.open", mopen, create=True
) as _mock_open:
yield _mock_open
def test_check_box_health(caplog):
"""Test check box health."""
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/healthz"
mock_req.get(url, status_code=HTTP_OK, json=MOCK_HEALTH)
assert fb.check_box_health(url, "user", "pass") == MOCK_BOX_ID
mock_req.get(url, status_code=HTTP_UNAUTHORIZED)
assert fb.check_box_health(url, None, None) is None
assert "AuthenticationError on facebox" in caplog.text
mock_req.get(url, exc=requests.exceptions.ConnectTimeout)
fb.check_box_health(url, None, None)
assert "ConnectionError: Is facebox running?" in caplog.text
def test_encode_image():
"""Test that binary data is encoded correctly."""
assert fb.encode_image(b"test") == "dGVzdA=="
def test_get_matched_faces():
"""Test that matched_faces are parsed correctly."""
assert fb.get_matched_faces(PARSED_FACES) == MATCHED_FACES
def test_parse_faces():
"""Test parsing of raw face data, and generation of matched_faces."""
assert fb.parse_faces(MOCK_JSON["faces"]) == PARSED_FACES
@patch("os.access", Mock(return_value=False))
def test_valid_file_path():
"""Test that an invalid file_path is caught."""
assert not fb.valid_file_path("test_path")
async def test_setup_platform(hass, mock_healthybox):
"""Set up platform with one entity."""
await async_setup_component(hass, ip.DOMAIN, VALID_CONFIG)
assert hass.states.get(VALID_ENTITY_ID)
async def test_setup_platform_with_auth(hass, mock_healthybox):
"""Set up platform with one entity and auth."""
valid_config_auth = VALID_CONFIG.copy()
valid_config_auth[ip.DOMAIN][CONF_USERNAME] = MOCK_USERNAME
valid_config_auth[ip.DOMAIN][CONF_PASSWORD] = MOCK_PASSWORD
await async_setup_component(hass, ip.DOMAIN, valid_config_auth)
assert hass.states.get(VALID_ENTITY_ID)
async def test_process_image(hass, mock_healthybox, mock_image):
"""Test successful processing of an image."""
await async_setup_component(hass, ip.DOMAIN, VALID_CONFIG)
assert hass.states.get(VALID_ENTITY_ID)
face_events = []
@callback
def mock_face_event(event):
"""Mock event."""
face_events.append(event)
hass.bus.async_listen("image_processing.detect_face", mock_face_event)
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/facebox/check"
mock_req.post(url, json=MOCK_JSON)
data = {ATTR_ENTITY_ID: VALID_ENTITY_ID}
await hass.services.async_call(ip.DOMAIN, ip.SERVICE_SCAN, service_data=data)
await hass.async_block_till_done()
state = hass.states.get(VALID_ENTITY_ID)
assert state.state == "1"
assert state.attributes.get("matched_faces") == MATCHED_FACES
assert state.attributes.get("total_matched_faces") == 1
PARSED_FACES[0][ATTR_ENTITY_ID] = VALID_ENTITY_ID # Update.
assert state.attributes.get("faces") == PARSED_FACES
assert state.attributes.get(CONF_FRIENDLY_NAME) == "facebox demo_camera"
assert len(face_events) == 1
assert face_events[0].data[ATTR_NAME] == PARSED_FACES[0][ATTR_NAME]
assert (
face_events[0].data[fb.ATTR_CONFIDENCE] == PARSED_FACES[0][fb.ATTR_CONFIDENCE]
)
assert face_events[0].data[ATTR_ENTITY_ID] == VALID_ENTITY_ID
assert face_events[0].data[fb.ATTR_IMAGE_ID] == PARSED_FACES[0][fb.ATTR_IMAGE_ID]
assert (
face_events[0].data[fb.ATTR_BOUNDING_BOX]
== PARSED_FACES[0][fb.ATTR_BOUNDING_BOX]
)
async def test_process_image_errors(hass, mock_healthybox, mock_image, caplog):
"""Test process_image errors."""
await async_setup_component(hass, ip.DOMAIN, VALID_CONFIG)
assert hass.states.get(VALID_ENTITY_ID)
# Test connection error.
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/facebox/check"
mock_req.register_uri("POST", url, exc=requests.exceptions.ConnectTimeout)
data = {ATTR_ENTITY_ID: VALID_ENTITY_ID}
await hass.services.async_call(ip.DOMAIN, ip.SERVICE_SCAN, service_data=data)
await hass.async_block_till_done()
assert "ConnectionError: Is facebox running?" in caplog.text
state = hass.states.get(VALID_ENTITY_ID)
assert state.state == STATE_UNKNOWN
assert state.attributes.get("faces") == []
assert state.attributes.get("matched_faces") == {}
# Now test with bad auth.
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/facebox/check"
mock_req.register_uri("POST", url, status_code=HTTP_UNAUTHORIZED)
data = {ATTR_ENTITY_ID: VALID_ENTITY_ID}
await hass.services.async_call(ip.DOMAIN, ip.SERVICE_SCAN, service_data=data)
await hass.async_block_till_done()
assert "AuthenticationError on facebox" in caplog.text
async def test_teach_service(
hass, mock_healthybox, mock_image, mock_isfile, mock_open_file, caplog
):
"""Test teaching of facebox."""
await async_setup_component(hass, ip.DOMAIN, VALID_CONFIG)
assert hass.states.get(VALID_ENTITY_ID)
# Patch out 'is_allowed_path' as the mock files aren't allowed
hass.config.is_allowed_path = Mock(return_value=True)
# Test successful teach.
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/facebox/teach"
mock_req.post(url, status_code=HTTP_OK)
data = {
ATTR_ENTITY_ID: VALID_ENTITY_ID,
ATTR_NAME: MOCK_NAME,
fb.FILE_PATH: MOCK_FILE_PATH,
}
await hass.services.async_call(
fb.DOMAIN, fb.SERVICE_TEACH_FACE, service_data=data
)
await hass.async_block_till_done()
# Now test with bad auth.
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/facebox/teach"
mock_req.post(url, status_code=HTTP_UNAUTHORIZED)
data = {
ATTR_ENTITY_ID: VALID_ENTITY_ID,
ATTR_NAME: MOCK_NAME,
fb.FILE_PATH: MOCK_FILE_PATH,
}
await hass.services.async_call(
fb.DOMAIN, fb.SERVICE_TEACH_FACE, service_data=data
)
await hass.async_block_till_done()
assert "AuthenticationError on facebox" in caplog.text
# Now test the failed teaching.
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/facebox/teach"
mock_req.post(url, status_code=HTTP_BAD_REQUEST, text=MOCK_ERROR_NO_FACE)
data = {
ATTR_ENTITY_ID: VALID_ENTITY_ID,
ATTR_NAME: MOCK_NAME,
fb.FILE_PATH: MOCK_FILE_PATH,
}
await hass.services.async_call(
fb.DOMAIN, fb.SERVICE_TEACH_FACE, service_data=data
)
await hass.async_block_till_done()
assert MOCK_ERROR_NO_FACE in caplog.text
# Now test connection error.
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/facebox/teach"
mock_req.post(url, exc=requests.exceptions.ConnectTimeout)
data = {
ATTR_ENTITY_ID: VALID_ENTITY_ID,
ATTR_NAME: MOCK_NAME,
fb.FILE_PATH: MOCK_FILE_PATH,
}
await hass.services.async_call(
fb.DOMAIN, fb.SERVICE_TEACH_FACE, service_data=data
)
await hass.async_block_till_done()
assert "ConnectionError: Is facebox running?" in caplog.text
async def test_setup_platform_with_name(hass, mock_healthybox):
"""Set up platform with one entity and a name."""
named_entity_id = f"image_processing.{MOCK_NAME}"
valid_config_named = VALID_CONFIG.copy()
valid_config_named[ip.DOMAIN][ip.CONF_SOURCE][ip.CONF_NAME] = MOCK_NAME
await async_setup_component(hass, ip.DOMAIN, valid_config_named)
assert hass.states.get(named_entity_id)
state = hass.states.get(named_entity_id)
assert state.attributes.get(CONF_FRIENDLY_NAME) == MOCK_NAME
|
py | b402125d528c991d78576689f1ce1f3c07128f33 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def sumNumbers(self, root: TreeNode) -> int:
res = []
num = root.val
val=0
self.helper(root,val,res)
return sum(res)
def helper(self,root,val,res):
if not root:
return
if not root.left and not root.right:
val=val*10+root.val
res.append(val)
return
if root:
val=val*10+root.val
self.helper(root.left,val,res)
self.helper(root.right,val,res)
|
py | b4021434996ac6ee52da71728189dcc441e3979e |
import pandas as pd
import json
import requests
from data_Per import *
from data_process import *
'''
This module handle the data gathering process from the API
All the documentation regarding the API is available at:
https://documentation.pubg.com/en/index.html
'''
# Shard specification: url = "https://api.pubg.com/shards/[SHARD HERE]/"
# Here we are only looking for steam useers
url = "https://api.pubg.com/shards/steam/"
headers = {"Authorization": "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJqdGkiOiIwOTI4NTQ0MC04YzRmLTAxMzctNDRiYy02Yjk0MzRkYzNmOGUiLCJpc3MiOiJnYW1lbG9ja2VyIiwiaWF0IjoxNTYzNTM5NDgwLCJwdWIiOiJibHVlaG9sZSIsInRpdGxlIjoicHViZyIsImFwcCI6ImNoZXRhbnBhbmRleTE3In0.cgjgrI1NLmvZHZmFFbDZsV9vGfxnD8vjqqnLEkMw4p0",
"Accept": "application/vnd.api+json"} # Dictionary which contains API key to access the API
final_data = pd.DataFrame()
def get_match_list(userid):
# Generating the user url
user_url = url + "players?filter[playerNames]=" + userid
req = requests.get(user_url, headers = headers)
if req.status_code == 200:
print("Successfully Connected!!!")
else:
print("Failed to Connect!!!")
player_stat = json.loads(req.text)
match_id_list = player_stat['data'][0]['relationships']['matches']['data'] # Json list which contains all match ids
return match_id_list
# Info of each match
def get_match_stats(match_stat):
match_id = match_stat['data']['id']
match_attributes = match_stat['data']['attributes']
return match_id,match_attributes
# The performance stats of the searched player in each match
def get_performance(userid, match_stat):
match_included = match_stat['included']
for i in match_included:
if (i['type'] == 'participant' and i['attributes']['stats']['name'] == userid):
per_info = i['attributes']['stats']
return per_info
# Example match url format: "https://api.pubg.com/shards/steam/matches/{id}"
def get_performance_stat(userid):
match_id_list = get_match_list(userid)
P = data_per(userid)
for match in match_id_list:
match_id = match['id']
match_url = url + "matches/{}".format(match_id)
match_r = requests.get(match_url, headers = headers)
if match_r.status_code != 200:
print("Failed to Connect!!!")
match_stat = json.loads(match_r.text)
match_data =get_match_stats(match_stat)
per_data= get_performance(userid, match_stat)
P.set_perf_info(per_data)
P.set_match_stat(match_data[0],match_data[1])
return P.return_data()
#x=get_performance_stat("SIKHWARRIOR")
#print(len(x))
def get_data_to_predict(userid):
data = get_performance_stat(userid)
data = list(data)
final_data = generate_data(data)
return final_data
|
py | b40214599b1967c70d54387025b358fab67d234b | from currencycloud.resources.resource import Resource
class Currency(Resource):
pass
class ConversionDates(Resource):
pass
class SettlementAccount(Resource):
pass
class BeneficiaryRequiredDetails(Resource):
pass
class PayerRequiredDetails(Resource):
pass
class PaymentPurposeCode(Resource):
pass
|
py | b40214676e3760a869ca2307fcbc50efbf2431f1 | import os
import sys
import torch
import cv2
import numpy as np
cur_path = os.path.abspath(os.path.dirname(__file__))
root_path = os.path.split(cur_path)[0]
sys.path.append(root_path)
from torchvision import transforms
import torch.nn.functional as F
from PIL import Image
from segmentron.utils.visualize import get_color_pallete
from segmentron.models.model_zoo import get_segmentation_model
from segmentron.utils.options import parse_args
from segmentron.utils.default_setup import default_setup
from segmentron.config import cfg
from crf import DenseCRF
from crfasrnn.crfrnn import CrfRnn
def demo():
args = parse_args()
cfg.update_from_file(args.config_file)
cfg.PHASE = 'test'
cfg.ROOT_PATH = root_path
cfg.check_and_freeze()
default_setup(args)
temp=1.8
# temp=3
usingCRF=False
usingCRF=True
# output folder
output_dir = os.path.join(cfg.VISUAL.OUTPUT_DIR, 'vis_result_{}_{}_{}_{}_temp_{}_crf_{}'.format(
cfg.MODEL.MODEL_NAME, cfg.MODEL.BACKBONE, cfg.DATASET.NAME, cfg.TIME_STAMP,temp,usingCRF))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# image transform
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(cfg.DATASET.MEAN, cfg.DATASET.STD),
])
model = get_segmentation_model().to(args.device)
model.eval()
if os.path.isdir(args.input_img):
img_paths = [os.path.join(args.input_img, x) for x in os.listdir(args.input_img)]
else:
img_paths = [args.input_img]
for img_path in img_paths:
image = Image.open(img_path).convert('RGB')
images = transform(image).unsqueeze(0).to(args.device)
with torch.no_grad():
output = model(images)
# import pdb;pdb.set_trace()
# output=output
_, H, W = images[0].shape
logit = F.interpolate(output[0], size=(H, W), mode="bilinear", align_corners=True)
print(img_path,logit.shape)
logit/=temp
# output_prob=F.softmax(logit/temp,dim=1)
# output_prob=output_prob.cpu().numpy()
if(usingCRF):
# raw_image = cv2.imread(img_path, cv2.IMREAD_COLOR).astype(np.float32)
# mean_bgr=np.array([103.53, 116.28, 123.675])
# # Do some subtraction
# raw_image-=mean_bgr
# # converted to C H W
# raw_image=raw_image.transpose(2,0,1)
# raw_image=raw_image.astype(np.uint8)
# raw_image=raw_image.transpose(1,2,0)
# raw_images.append(raw_image)
# postprocessor= DenseCRF(iter_max=cfg.CRF.ITER_MAX,
# pos_xy_std=cfg.CRF.POS_XY_STD,
# pos_w=cfg.CRF.POS_W,
# bi_xy_std=cfg.CRF.BI_XY_STD,
# bi_rgb_std=cfg.CRF.BI_RGB_STD,
# bi_w=cfg.CRF.BI_W,
# )
postprocessor = CrfRnn(21)
raw_image = cv2.imread(img_path, cv2.IMREAD_COLOR).astype(np.float32).transpose(2, 0, 1)
raw_image = torch.from_numpy(raw_image).unsqueeze(dim=0)
prob_post=postprocessor(raw_image,logit.cpu().softmax(dim=1))
print(prob_post.shape)
pred = np.argmax(prob_post.squeeze(0).detach().numpy(), axis=0)
else:
pred = torch.argmax(logit, 1).squeeze(0).cpu().data.numpy()
mask = get_color_pallete(pred, cfg.DATASET.NAME)
outname = os.path.splitext(os.path.split(img_path)[-1])[0] + f'_temp_{temp}_crf_{usingCRF}.png'
mask.save(os.path.join(output_dir, outname))
if __name__ == '__main__':
demo()
|
py | b4021523e4be1eb5f6eaa45d91c2cda49a74776f | from typing import List
import numpy as np
from numpy import zeros
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.op2.result_objects.op2_objects import get_complex_times_dtype
from pyNastran.op2.tables.oes_stressStrain.real.oes_objects import (
StressObject, StrainObject, OES_Object)
from pyNastran.f06.f06_formatting import write_imag_floats_13e, _eigenvalue_header
class ComplexSpringDamperArray(OES_Object):
def __init__(self, data_code, is_sort1, isubcase, dt):
OES_Object.__init__(self, data_code, isubcase, apply_data_code=False)
#self.code = [self.format_code, self.sort_code, self.s_code]
#self.ntimes = 0 # or frequency/mode
#self.ntotal = 0
self.nelements = 0 # result specific
if not is_sort1:
raise NotImplementedError('SORT2')
@property
def is_real(self) -> bool:
return False
@property
def is_complex(self) -> bool:
return True
@property
def nnodes_per_element(self) -> int:
return 1
def _reset_indices(self) -> None:
self.itotal = 0
self.ielement = 0
#def get_headers(self):
#headers = ['axial', 'torque']
#return headers
def build(self):
"""sizes the vectorized attributes of the ComplexSpringDamperArray"""
#print('ntimes=%s nelements=%s ntotal=%s' % (self.ntimes, self.nelements, self.ntotal))
assert self.ntimes > 0, 'ntimes=%s' % self.ntimes
assert self.nelements > 0, 'nelements=%s' % self.nelements
assert self.ntotal > 0, 'ntotal=%s' % self.ntotal
#self.names = []
self.nelements //= self.ntimes
self.itime = 0
self.ielement = 0
self.itotal = 0
#self.ntimes = 0
#self.nelements = 0
#print("ntimes=%s nelements=%s ntotal=%s" % (self.ntimes, self.nelements, self.ntotal))
dtype, idtype, cfdtype = get_complex_times_dtype(self.nonlinear_factor, self.size)
self._times = zeros(self.ntimes, dtype=dtype)
self.element = zeros(self.nelements, dtype=idtype)
#[spring_stress]
self.data = zeros((self.ntimes, self.ntotal, 1), dtype=cfdtype)
def build_dataframe(self):
"""creates a pandas dataframe"""
headers = self.get_headers()
column_names, column_values = self._build_dataframe_transient_header()
self.data_frame = self._build_pandas_transient_elements(
column_values, column_names,
headers, self.element, self.data)
def __eq__(self, table): # pragma: no cover
assert self.is_sort1 == table.is_sort1
self._eq_header(table)
if not np.array_equal(self.data, table.data):
msg = 'table_name=%r class_name=%s\n' % (self.table_name, self.__class__.__name__)
msg += '%s\n' % str(self.code_information())
i = 0
for itime in range(self.ntimes):
for ielem, eid in enumerate(self.element):
t1 = self.data[itime, ielem, :]
t2 = table.data[itime, ielem, :]
if not np.array_equal(t1, t2):
msg += '%s (%s, %s) (%s, %s)\n' % (
eid,
t1.real, t1.imag,
t2.real, t2.imag)
i += 1
if i > 10:
print(msg)
raise ValueError(msg)
#print(msg)
if i > 0:
raise ValueError(msg)
return True
def add_sort1(self, dt, eid, stress):
"""unvectorized method for adding SORT1 transient data"""
assert isinstance(eid, integer_types) and eid > 0, 'dt=%s eid=%s' % (dt, eid)
self._times[self.itime] = dt
self.element[self.ielement] = eid
self.data[self.itime, self.ielement, 0] = stress
self.ielement += 1
def get_stats(self, short: bool=False) -> List[str]:
if not self.is_built:
return [
'<%s>\n' % self.__class__.__name__,
f' ntimes: {self.ntimes:d}\n',
f' ntotal: {self.ntotal:d}\n',
]
nelements = self.nelements
ntimes = self.ntimes
#ntotal = self.ntotal
msg = []
if self.nonlinear_factor not in (None, np.nan): # transient
msg.append(' type=%s ntimes=%i nelements=%i; table_name=%r\n' % (
self.__class__.__name__, ntimes, nelements, self.table_name))
ntimes_word = 'ntimes'
else:
msg.append(' type=%s nelements=%i; table_name=%r\n' % (
self.__class__.__name__, nelements, self.table_name))
ntimes_word = '1'
msg.append(' eType\n')
headers = self.get_headers()
n = len(headers)
msg.append(' data: [%s, nelements, %i] where %i=[%s]\n' % (
ntimes_word, n, n, str(', '.join(headers))))
msg.append(f' element.shape = {self.element.shape}\n')
msg.append(f' data.shape = {self.data.shape}\n')
#msg.append(' element type: %s\n' % self.element_type)
msg.append(' element name: %s\n' % self.element_name)
msg += self.get_data_code()
return msg
def get_f06_header(self, is_mag_phase=True, is_sort1=True):
# 11-CELAS1, 12-CELAS2, 13-CELAS3, 14-CELAS4
#' FREQUENCY STRESS FREQUENCY STRESS'
if self.element_type == 11:
msg = [' C O M P L E X S T R E S S E S I N S C A L A R S P R I N G S ( C E L A S 1 )\n']
elif self.element_type == 12:
msg = [' C O M P L E X S T R E S S E S I N S C A L A R S P R I N G S ( C E L A S 2 )\n']
elif self.element_type == 13:
msg = [' C O M P L E X S T R E S S E S I N S C A L A R S P R I N G S ( C E L A S 3 )\n']
elif self.element_type == 14:
msg = [' C O M P L E X S T R E S S E S I N S C A L A R S P R I N G S ( C E L A S 4 )\n']
#elif self.element_type == 20: # CDAMP1
#msg = [' C O M P L E X F O R C E S I N S C A L A R D A M P E R S ( C D A M P 1 )\n']
#elif self.element_type == 21: # CDAMP2
#msg = [' C O M P L E X F O R C E S I N S C A L A R D A M P E R S ( C D A M P 2 )\n']
else:
raise NotImplementedError('element_name=%s element_type=%s' % (self.element_name, self.element_type))
if is_mag_phase:
msg += [' (MAGNITUDE/PHASE)\n \n']
else:
msg += [' (REAL/IMAGINARY)\n \n']
if is_sort1:
msg += [
' ELEMENT ELEMENT\n'
' ID. STRESS ID. STRESS\n'
]
#' 14 0.0 / 0.0 0.0 / 0.0'
else:
msg += [' FREQUENCY STRESS FREQUENCY STRESS\n']
return msg
def write_f06(self, f06_file, header=None, page_stamp='PAGE %s',
page_num: int=1, is_mag_phase: bool=False, is_sort1: bool=True):
if header is None:
header = []
msg_temp = self.get_f06_header(is_mag_phase=is_mag_phase, is_sort1=is_sort1)
ntimes = self.data.shape[0]
eids = self.element
#is_odd = False
#nwrite = len(eids)
#if len(eids) % 2 == 1:
#nwrite -= 1
#is_odd = True
#print('len(eids)=%s nwrite=%s is_odd=%s' % (len(eids), nwrite, is_odd))
for itime in range(ntimes):
dt = self._times[itime] # TODO: rename this...
header = _eigenvalue_header(self, header, itime, ntimes, dt)
f06_file.write(''.join(header + msg_temp))
#print("self.data.shape=%s itime=%s ieids=%s" % (str(self.data.shape), itime, str(ieids)))
spring_force = self.data[itime, :, 0]
for eid, spring_forcei in zip(eids, spring_force):
[rspring, ispring] = write_imag_floats_13e([spring_forcei], is_mag_phase)
#ELEMENT AXIAL TORSIONAL
#ID. STRESS STRESS
#14 0.0 / 0.0 0.0 / 0.0
f06_file.write(' %8i %-13s / %-13s\n' % (eid, rspring, ispring))
f06_file.write(page_stamp % page_num)
page_num += 1
return page_num - 1
def write_op2(self, op2_file, op2_ascii, itable, new_result,
date, is_mag_phase=False, endian='>'):
"""writes an OP2"""
import inspect
from struct import Struct, pack
frame = inspect.currentframe()
call_frame = inspect.getouterframes(frame, 2)
op2_ascii.write(f'{self.__class__.__name__}.write_op2: {call_frame[1][3]}\n')
if itable == -1:
self._write_table_header(op2_file, op2_ascii, date)
itable = -3
#eids = self.element
# table 4 info
#ntimes = self.data.shape[0]
#nnodes = self.data.shape[1]
nelements = self.data.shape[1]
# 21 = 1 node, 3 principal, 6 components, 9 vectors, 2 p/ovm
#ntotal = ((nnodes * 21) + 1) + (nelements * 4)
ntotali = self.num_wide
ntotal = ntotali * nelements
#print('shape = %s' % str(self.data.shape))
#assert self.ntimes == 1, self.ntimes
#device_code = self.device_code
op2_ascii.write(f' ntimes = {self.ntimes}\n')
eids_device = self.element * 10 + self.device_code
#print('ntotal=%s' % (ntotal))
#assert ntotal == 193, ntotal
if self.is_sort1:
struct1 = Struct(endian + b'i2f')
else:
raise NotImplementedError('SORT2')
op2_ascii.write('%s-nelements=%i\n' % (self.element_name, nelements))
for itime in range(self.ntimes):
self._write_table_3(op2_file, op2_ascii, new_result, itable, itime)
# record 4
itable -= 1
header = [4, itable, 4,
4, 1, 4,
4, 0, 4,
4, ntotal, 4,
4 * ntotal]
op2_file.write(pack('%ii' % len(header), *header))
op2_ascii.write('r4 [4, 0, 4]\n')
op2_ascii.write(f'r4 [4, {itable:d}, 4]\n')
op2_ascii.write(f'r4 [4, {4 * ntotal:d}, 4]\n')
from pyNastran.op2.op2_interface.utils import to_mag_phase
stress = self.data[itime, :, 0]
reals, imags = to_mag_phase(stress, is_mag_phase)
for eid, stress_real, stress_imag in zip(eids_device, reals, imags):
data = [eid, stress_real, stress_imag]
op2_ascii.write(f' eid={eid} stress={[stress_real, stress_imag]}\n')
op2_file.write(struct1.pack(*data))
itable -= 1
header = [4 * ntotal,]
op2_file.write(pack('i', *header))
op2_ascii.write('footer = %s\n' % header)
new_result = False
return itable
class ComplexSpringStressArray(ComplexSpringDamperArray, StressObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
ComplexSpringDamperArray.__init__(self, data_code, is_sort1, isubcase, dt)
StressObject.__init__(self, data_code, isubcase)
def get_headers(self) -> List[str]:
headers = ['spring_stress']
return headers
class ComplexSpringStrainArray(ComplexSpringDamperArray, StrainObject):
def __init__(self, data_code, is_sort1, isubcase, dt):
ComplexSpringDamperArray.__init__(self, data_code, is_sort1, isubcase, dt)
StrainObject.__init__(self, data_code, isubcase)
def get_headers(self) -> List[str]:
headers = ['spring_strain']
return headers
|
py | b40218195f4e2b4bd4db89d0b74c0b9b9b855ef4 | #!/usr/bin/env python3
import sys
import numpy as np
from collections import defaultdict
from heapq import heappush, heappop
import sys
sys.setrecursionlimit(10**6)
input = sys.stdin.buffer.readline
# INF = sys.maxsize
INF = 10 ** 9 + 1
# INF = float("inf")
def debug(*x):
print(*x)
def solve():
"void()"
pass
def main():
N, M = map(int, input().split())
from scipy.sparse import lil_matrix
from scipy.sparse.csgraph import dijkstra
graph = lil_matrix((N + 1, N + 1), dtype=np.int32)
for i in range(M):
v1, v2 = map(int, input().split())
graph[v1, v2] = 1
graph[v2, v1] = 1
start = int(input())
K = int(input())
targets = list(map(int, input().split()))
dist = dijkstra(graph)
# debug(dist)
costmemo = {}
visited = 0
t2i = {targets[i]: i for i in range(len(targets))}
def f(visited, last):
# debug(": visited, last", visited, last)
if (visited, last) in costmemo:
return costmemo[(visited, last)]
mask = 1 << (t2i[last])
buf = []
prev = visited ^ mask
if not prev:
# it is first vertex
c = dist[start, last]
costmemo[(visited, last)] = c
return c
for v in targets:
# debug(":: v", v)
vmask = 1 << (t2i[v])
# debug(":: vmask", vmask)
if prev & vmask: # v is in visited - last
buf.append(
f(prev, v) + dist[v, last]
)
c = min(buf)
costmemo[(visited, last)] = c
return c
fullbits = (1 << len(targets)) - 1
print(int(min(f(fullbits, last) for last in targets)))
# print(costmemo)
solve()
def _test():
import doctest
doctest.testmod()
as_input("""3 2
1 2
2 3
2
2
1 3 """)
main()
as_input("""
5 5
1 2
1 3
1 4
1 5
2 3
1
3
2 3 5
""")
main()
def as_input(s):
"use in test, use given string as input file"
import io
global read, input
f = io.StringIO(s.strip())
input = f.readline
read = f.read
USE_NUMBA = False
if (USE_NUMBA and sys.argv[-1] == 'ONLINE_JUDGE') or sys.argv[-1] == '-c':
print("compiling")
from numba.pycc import CC
cc = CC('my_module')
cc.export('solve', solve.__doc__.strip().split()[0])(solve)
cc.compile()
exit()
else:
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
if (USE_NUMBA and sys.argv[-1] != '-p') or sys.argv[-1] == "--numba":
# -p: pure python mode
# if not -p, import compiled module
from my_module import solve # pylint: disable=all
elif sys.argv[-1] == "-t":
_test()
sys.exit()
elif sys.argv[-1] != '-p' and len(sys.argv) == 2:
# input given as file
input_as_file = open(sys.argv[1])
input = input_as_file.buffer.readline
read = input_as_file.buffer.read
main()
|
py | b40218ab605517dd6a091c57057c2b7bfed1f325 | from gpconfig_modules.database_segment_guc import DatabaseSegmentGuc
from gpconfig_modules.file_segment_guc import FileSegmentGuc
from gpconfig_modules.segment_guc import SegmentGuc
class MultiValueGuc(SegmentGuc):
"""
encapsulate various GUC locations within a given segment.
A segment can include 2 databases: the primary and a mirror.
The database value is singular, since we strongly expect the values to be the same, given mirroring.
However, the file values of primary and mirror can be different.
So we model this MultiValueGuc object to accept 2 file values, and one database value.
"""
def __init__(self, guc1, guc2):
"""
accept 2 gucs in any order. gucs can be any combination of:
* database guc
* file guc
- primary
- mirror
* existing comparison guc, with or without mirror
"""
self.primary_file_seg_guc = None
self.mirror_file_seg_guc = None
self.db_seg_guc = None
if not guc1 or not guc2:
raise Exception("comparison requires two gucs")
SegmentGuc.__init__(self, [guc1.context, guc1.name])
if guc1.context != guc2.context:
raise Exception("Not the same context")
if isinstance(guc1, MultiValueGuc):
# copy constructor
self.db_seg_guc = guc1.db_seg_guc
self.primary_file_seg_guc = guc1.primary_file_seg_guc
self.mirror_file_seg_guc = guc1.mirror_file_seg_guc
if isinstance(guc2, MultiValueGuc):
# copy constructor
self.db_seg_guc = guc2.db_seg_guc
self.primary_file_seg_guc = guc2.primary_file_seg_guc
self.mirror_file_seg_guc = guc2.mirror_file_seg
if isinstance(guc1, FileSegmentGuc):
if self.primary_file_seg_guc:
if self.primary_file_seg_guc.dbid == guc1.dbid:
self.primary_file_seg_guc = guc1
else:
self.mirror_file_seg_guc = guc1
else:
self.primary_file_seg_guc = guc1
if isinstance(guc2, FileSegmentGuc):
if self.primary_file_seg_guc:
if self.primary_file_seg_guc.dbid == guc2.dbid:
self.primary_file_seg_guc = guc2
else:
self.mirror_file_seg_guc = guc2
else:
self.primary_file_seg_guc = guc2
if isinstance(guc1, DatabaseSegmentGuc):
self.db_seg_guc = guc1
if isinstance(guc2, DatabaseSegmentGuc):
self.db_seg_guc = guc2
def report_success_format(self):
file_val = self.primary_file_seg_guc.get_value()
if self.db_seg_guc:
result = "%s value: %s | file: %s" % (self.get_label(), self.db_seg_guc.value, self._use_dash_when_none(file_val))
else:
result = "%s value: %s" % (self.get_label(), file_val)
return result
def report_fail_format(self):
sort_seg_guc_objs = [obj for obj in [self.primary_file_seg_guc, self.mirror_file_seg_guc] if obj]
sort_seg_guc_objs.sort(key=lambda x: x.dbid)
if self.db_seg_guc:
report = [self._report_fail_format_with_database_and_file_gucs(seg_guc_obj) for seg_guc_obj in sort_seg_guc_objs]
else:
report = [seg_guc_obj.report_fail_format()[0] for seg_guc_obj in sort_seg_guc_objs]
return report
def _report_fail_format_with_database_and_file_gucs(self, segment_guc_obj):
return "[context: %s] [dbid: %s] [name: %s] [value: %s | file: %s]" % (
self.db_seg_guc.context,
segment_guc_obj.dbid,
self.db_seg_guc.name,
self.db_seg_guc.value,
self._use_dash_when_none(segment_guc_obj.value))
def _use_dash_when_none(self, value):
return value if value is not None else "-"
def is_internally_consistent(self):
if not self.db_seg_guc:
return self.compare_primary_and_mirror_files()
else:
if self.primary_file_seg_guc is None:
return True
if self.primary_file_seg_guc.get_value() is None:
return True
result = True
if self.mirror_file_seg_guc and self.db_seg_guc:
result = self.mirror_file_seg_guc.value == self.db_seg_guc.value
if not result:
return result
return self.db_seg_guc.value == self.primary_file_seg_guc.value and result
def get_value(self):
file_value = ""
if self.primary_file_seg_guc:
file_value = str(self.primary_file_seg_guc.get_value())
db_value = ""
if self.db_seg_guc:
db_value = str(self.db_seg_guc.get_value())
return db_value + "||" + file_value
def set_mirror_file_segment(self, mirror_file_seg):
self.mirror_file_seg_guc = mirror_file_seg
def get_primary_dbid(self):
return self.primary_file_seg_guc.dbid
def set_primary_file_segment(self, guc):
self.primary_file_seg_guc = guc
def compare_primary_and_mirror_files(self):
if self.primary_file_seg_guc and self.mirror_file_seg_guc:
return self.primary_file_seg_guc.get_value() == self.mirror_file_seg_guc.get_value()
return True
|
py | b40218d2f32cd17a3aec42d6d7a349501fc7393e | obvious = ["password", "qwerty", "hello123", "letmein", "12345"]
password = input("Please enter a password: ")
for count in range(len(obvious)):
if password == obvious[count]:
print("This password is weak.")
if len(password) < 8:
print("too short make it at least 8 characters")
char = 0
num = 0
upper = 0
lower = 0
for count in range(len(password)):
if password[count].isdigit():
num = num+1
elif password[count].isalpha():
char = char+1
if password[count].isupper():
upper = upper+1
elif password[count].islower():
lower = lower+1
if num == 0:
print("inclue digits")
if upper == 0 or lower ==0:
print(upper,lower,"include lower and uppercase letters")
if char == 0:
print("include letters")
if num > 0 and char > 0 and upper > 0 and lower > 0:
print("Your password is good to go") |
py | b402194d973dd89bfba10d63a994d066ca5e586e | import socket
from smtplib import *
from configuration import *
debug = False
verbose = True
version = "1.0.0"
key_sender = 'sender'
key_subject = 'subject'
key_username = 'username'
key_password = 'password'
key_receivers = 'receivers'
key_smtp_server = 'smtp_server'
key_smtp_server_port = 'smtp_server_port'
param_configuration_names = '--configuration'
def log(what):
if verbose:
print what
def notify(content, configuration_names):
if configuration_names:
for configuration_name in configuration_names:
if debug:
log("Using '" + configuration_name + "' configuration")
if configuration_name in configurations:
configuration = configurations[configuration_name]
notify_with_configuration(content, configuration)
else:
log("There is no configuration with the name: '" + configuration_name + "'")
else:
if debug:
log("Using all configurations.")
for configuration in configurations:
notify_with_configuration(content, configurations[configuration])
def notify_with_configuration(content, configuration):
receivers_str = ""
for receiver_str in configuration[key_receivers]:
if configuration[key_receivers].index(receiver_str) > 0:
receivers_str += ", " + receiver_str
else:
receivers_str += receiver_str
message = """From: %s
To: %s
Subject: %s
%s
""" % (configuration[key_sender], receivers_str, configuration[key_subject], content)
if debug:
log("We will send the following message:\n" + message)
try:
server = SMTP(configuration[key_smtp_server], configuration[key_smtp_server_port], timeout=30)
if key_username in configuration:
username = configuration[key_username]
log("Logging in user: " + username)
password = ""
if key_password in configuration:
password = configuration[key_password]
server.login(username, password)
receivers = configuration[key_receivers]
log("Sending mail to: " + str(receivers))
server.sendmail(configuration[key_sender], receivers, message)
log("Shutting down connection.")
server.quit()
return True
except (SMTPHeloError, SMTPAuthenticationError, SMTPAuthenticationError, SMTPException,
SMTPRecipientsRefused, SMTPSenderRefused, SMTPDataError, socket.timeout) as e:
log("Error: " + str(e))
pass
return False
|
py | b402195b0f2fb83eec543252894dc744fc575715 | def html_property(prop_name):
def getter(self):
return self._attributes[prop_name]
def setter(self, value):
self._set_attribute(prop_name, value)
def deleter(self):
self._set_attribute(prop_name, None)
return property(getter, setter, deleter)
|
pyw | b402196d08dd4275cf7b73c337e6e2f03383d24b | import sys
from PyQt5.QtWidgets import QDialog, QApplication
from demoScrollBar import *
class MyForm(QDialog):
def __init__(self):
super().__init__()
self.ui = Ui_Dialog()
self.ui.setupUi(self)
self.ui.horizontalScrollBarSugarLevel.valueChanged.connect(self.scrollhorizontal)
self.ui.verticalScrollBarPulseRate.valueChanged.connect(self.scrollvertical)
self.ui.horizontalSliderBloodPressure.valueChanged.connect(self.sliderhorizontal)
self.ui.verticalSliderCholestrolLevel.valueChanged.connect(self.slidervertical)
self.show()
def scrollhorizontal(self,value):
self.ui.lineEditResult.setText("Sugar Level : "+str(value))
def scrollvertical(self, value):
self.ui.lineEditResult.setText("Pulse Rate : "+str(value))
def sliderhorizontal(self, value):
self.ui.lineEditResult.setText("Blood Pressure : "+str(value))
def slidervertical(self, value):
self.ui.lineEditResult.setText("Cholestrol Level : "+str(value))
if __name__=="__main__":
app = QApplication(sys.argv)
w = MyForm()
w.show()
sys.exit(app.exec_())
|
py | b40219748dac6ffb4e623a49e9bce0bbc1efbaee | """Calculate scores based on the expression of gene lists.
"""
import numpy as np
import pandas as pd
import scipy.sparse
from .. import settings
from .. import logging as logg
def score_genes(
adata,
gene_list,
ctrl_size=50,
gene_pool=None,
n_bins=25,
score_name='score',
random_state=0,
copy=False): # we use the scikit-learn convention of calling the seed "random_state"
"""Score a set of genes [Satija15]_.
The score is the average expression of a set of genes subtracted with the
average expression of a reference set of genes. The reference set is
randomly sampled from the `gene_pool` for each binned expression value.
This reproduces the approach in Seurat [Satija15]_ and has been implemented
for Scanpy by Davide Cittaro.
Parameters
----------
adata : :class:`~scanpy.api.AnnData`
The annotated data matrix.
gene_list : iterable
The list of gene names used for score calculation.
ctrl_size : `int`, optional (default: 50)
Number of reference genes to be sampled. If `len(gene_list)` is not too
low, you can set `ctrl_size=len(gene_list)`.
gene_pool : `list` or `None`, optional (default: `None`)
Genes for sampling the reference set. Default is all genes.
n_bins : `int`, optional (default: 25)
Number of expression level bins for sampling.
score_name : `str`, optional (default: `'score'`)
Name of the field to be added in `.obs`.
random_state : `int`, optional (default: 0)
The random seed for sampling.
copy : `bool`, optional (default: `False`)
Copy `adata` or modify it inplace.
Returns
-------
Depending on `copy`, returns or updates `adata` with an additional field
`score_name`.
Examples
--------
See this `notebook <https://github.com/theislab/scanpy_usage/tree/master/180209_cell_cycle>`_.
"""
logg.info('computing score \'{}\''.format(score_name), r=True)
adata = adata.copy() if copy else adata
if random_state:
np.random.seed(random_state)
gene_list = set([x for x in gene_list if x in adata.var_names])
if not gene_pool:
gene_pool = list(adata.var_names)
else:
gene_pool = [x for x in gene_pool if x in adata.var_names]
# Trying here to match the Seurat approach in scoring cells.
# Basically we need to compare genes against random genes in a matched
# interval of expression.
# TODO: this densifies the whole data matrix for `gene_pool`
if scipy.sparse.issparse(adata.X):
obs_avg = pd.Series(
np.nanmean(
adata[:, gene_pool].X.toarray(), axis=0), index=gene_pool) # average expression of genes
else:
obs_avg = pd.Series(
np.nanmean(adata[:, gene_pool].X, axis=0), index=gene_pool) # average expression of genes
n_items = int(np.round(len(obs_avg) / (n_bins - 1)))
obs_cut = obs_avg.rank(method='min') // n_items
control_genes = set()
# now pick `ctrl_size` genes from every cut
for cut in np.unique(obs_cut.loc[gene_list]):
r_genes = np.array(obs_cut[obs_cut == cut].index)
np.random.shuffle(r_genes)
control_genes.update(set(r_genes[:ctrl_size])) # uses full r_genes if ctrl_size > len(r_genes)
# To index, we need a list - indexing implies an order.
control_genes = list(control_genes - gene_list)
gene_list = list(gene_list)
score = np.mean(adata[:, gene_list].X, axis=1) - np.mean(adata[:, control_genes].X, axis=1)
adata.obs[score_name] = pd.Series(np.array(score).ravel(), index=adata.obs_names)
logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
logg.hint('added\n'
' \'{}\', score of gene set (adata.obs)'.format(score_name))
return adata if copy else None
def score_genes_cell_cycle(
adata,
s_genes,
g2m_genes,
copy=False,
**kwargs):
"""Score cell cycle genes [Satija15]_.
Given two lists of genes associated to S phase and G2M phase, calculates
scores and assigns a cell cycle phase (G1, S or G2M). See
:func:`~scanpy.api.score_genes` for more explanation.
Parameters
----------
adata : :class:`~scanpy.api.AnnData`
The annotated data matrix.
s_genes : `list`
List of genes associated with S phase.
g2m_genes : `list`
List of genes associated with G2M phase.
copy : `bool`, optional (default: `False`)
Copy `adata` or modify it inplace.
**kwargs : optional keyword arguments
Are passed to :func:`~scanpy.api.score_genes`. `ctrl_size` is not
possible, as it's set as `min(len(s_genes), len(g2m_genes))`.
Returns
-------
Depending on `copy`, returns or updates `adata` with the following fields.
S_score : `adata.obs`, dtype `object`
The score for S phase for each cell.
G2M_score : `adata.obs`, dtype `object`
The score for G2M phase for each cell.
phase : `adata.obs`, dtype `object`
The cell cycle phase (`S`, `G2M` or `G1`) for each cell.
See also
--------
score_genes
Examples
--------
See this `notebook <https://github.com/theislab/scanpy_usage/tree/master/180209_cell_cycle>`_.
"""
logg.info('calculating cell cycle phase')
adata = adata.copy() if copy else adata
ctrl_size = min(len(s_genes), len(g2m_genes))
# add s-score
score_genes(adata, gene_list=s_genes, score_name='S_score', ctrl_size=ctrl_size, **kwargs)
# add g2m-score
score_genes(adata, gene_list=g2m_genes, score_name='G2M_score', ctrl_size=ctrl_size, **kwargs)
scores = adata.obs[['S_score', 'G2M_score']]
# default phase is S
phase = pd.Series('S', index=scores.index)
# if G2M is higher than S, it's G2M
phase[scores.G2M_score > scores.S_score] = 'G2M'
# if all scores are negative, it's G1...
phase[np.all(scores < 0, axis=1)] = 'G1'
adata.obs['phase'] = phase
logg.hint(' \'phase\', cell cycle phase (adata.obs)')
return adata if copy else None
|
py | b402197974a6735347f0694b0159c1dafd28de22 | # Common PDG data
#
# Mikael Mieskolainen, 2020
# [email protected]
# ------------------------------------------------------------------------
ELECTRON_MASS = 0.000510998950
PI_MASS = 0.13957018
K_MASS = 0.493677
# ------------------------------------------------------------------------
# http://pdg.lbl.gov/2019/reviews/rpp2019-rev-monte-carlo-numbering.pdf
PDG_e = 11 # electron-
PDG_y = 22 # gamma
PDG_rho = 113 # ρ(770)0
PDG_eta = 221 # eta
PDG_omega = 223 # omega(782)
PDG_K0L = 130 # K0L
PDG_K = 321 # kaon+
PDG_K892 = 313 # K*(892)0
PDG_K892P = 323 # K*(892)+
PDG_a1_1260 = 20213 # a1(1260)+
# ----------------------------------
PDG_B0 = 511 # B0
PDG_B = 521 # B+
# ----------------------------------
PDG_D = 411 # D+
PDG_D2010 = 413 # D*(2010)+
PDG_D0 = 421 # D0
PDG_D2007 = 423 # D∗(2007)0
PDG_D2460 = 415 # D∗2(2460)+
PDG_D2400 = 10411 # D∗0(2400)+
PDG_D1 = 20413 # D1(H)+
PDG_JPSI = 443
|
py | b40219d88af6a742782ab6eecc2d1cf334c81701 | from sqlalchemy import *
from sqlalchemy.orm import relation, backref
from meta import Base, engine
import util
# Some shit SQLAlchemy needs
metadata = Base.metadata
class Site(Base):
"""
Site object.
Represents a website.
"""
__tablename__ = "leolo_sites"
id = Column(Integer, primary_key=True) # id (autoincrement)
title = Column(Unicode(120)) # webpage/site title
url = Column(String(200)) # webpage/site url
inactive = Column(Boolean()) # blog active or not active
feed = relation("Feed", backref="leolo_sites", uselist=False) # a site has a feed
def __init__(self, feedurl, title=None, url=None):
self.feed = Feed(self, feedurl)
self.title = title
self.url = url
self.inactive = False
def __str__(self):
return self.__unicode__()
def __unicode__(self):
if not self.inactive:
return "<Site %i (%s - %s)>" % (self.id, self.title, self.feed.url)
return "<Site %i (%s - [Inactive])>" % (self.id, self.feed.url)
class Feed(Base):
"""
Feed object.
"""
__tablename__ = "leolo_feeds"
url = Column(String(300), primary_key=True) # feed url
last_modified = Column(String(150)) # last feed's modification date
last_check = Column(DateTime()) # last feed check
last_update = Column(DateTime()) # last feed update
last_entrylink = Column(String(500)) # last entry link
siteid = Column(Integer, ForeignKey("leolo_sites.id")) # site id owns this feed
def __init__(self, site, url):
self.url = url
self.last_modified = None
self.last_check = None
self.last_update = None
self.last_entrylink = None
self.updated = False
def clear_entries(self):
self.__entries = []
self.set_updated(False)
def count_entries(self):
try:
return len(self.__entries)
except:
return 0
def get_entries(self):
try:
self.__entries
except:
self.__entries = []
self.set_updated(False)
return self.__entries
def set_entries(self, entries):
try:
self.__entries
except:
self.__entries = []
if len(entries) > 0:
self.set_updated(True)
self.__entries += entries
entries = property(get_entries, set_entries)
def get_updated(self):
try:
self.__updated
except:
self.__updated = False
return self.__updated
def set_updated(self, updated):
try:
self.__updated
except:
pass
self.__updated = updated
updated = property(get_updated, set_updated)
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return "<Feed('%s', last update=%s)>" % (self.url, self.last_update)
class Entry(object):
"""
Entry object.
Represents a site's entry (also known as blog post).
"""
def __init__(self, link, title, content, enclosures):
self.link = link # link to the original entry page
self.title = title # title
self.content = content # content
self.enclosures = enclosures # list of external links
# associated with this entry
self.enclosures_cp = enclosures
self.__links = [] # links inside content in which we'll be interested
def get_links(self):
"""
Returns a list of links.
"""
return self.__links
def set_link(self, links):
"""
Adds a given link to the 'links' list.
Given link must be a string.
"""
if isinstance(links, str) or isinstance(links, unicode):
# Add url if it's not repeated in list
if not links in self.__links:
self.__links.append(util.sanitize_url(links))
elif isinstance(links, list):
for link in links:
if not link in self.__links:
self.__links.append(util.sanitize_url(link))
links = property(get_links, set_link)
def rm_repeated(self):
"""
If this function finds 2 urls repeated in enclosures and links,
then removes the url in links.
"""
for url in self.enclosures:
if url in self.__links:
self.__links.remove(url)
def __str__(self):
return self.__unicode__()
def __unicode__(self):
if not self.title:
return "<Entry(No title)>"
else:
return "<Entry('%s')>" % (self.title)
# Create table in DB if it doesn't exist
metadata.create_all(engine)
|
py | b4021ad7f114bdb0ee33985a3b371ee64745f06c | import numpy as np
import reward.utils as U
from reward.env.base_env import BaseEnv
from boltons.cacheutils import cachedproperty
# Soft dependency
try:
import gym
except ImportError:
_has_gym = False
else:
_has_gym = True
class GymEnv(BaseEnv):
"""
Creates and wraps a gym environment.
Parameters
----------
env_name: str
The Gym ID of the env. For a list of available envs check
`this <https://gym.openai.com/envs/>`_ page.
"""
def __init__(self, env_name):
if not _has_gym: raise ImportError("Could not import gym")
self.env_name = env_name
super().__init__()
@cachedproperty
def s_space(self): return GymEnv.get_space(self.env.observation_space)
@cachedproperty
def ac_space(self): return GymEnv.get_space(self.env.action_space)
def reset(self):
"""
Calls the reset method on the gym environment.
Returns
-------
state: numpy.ndarray
A numpy array with the state information.
"""
return self.env.reset()
def step(self, ac):
"""
Calls the step method on the gym environment.
Parameters
----------
action: int or float or numpy.ndarray
The action to be executed in the environment, it should be an int for
discrete enviroments and float for continuous. There's also the possibility
of executing multiple actions (if the environment supports so),
in this case it should be a numpy.ndarray.
Returns
-------
next_state: numpy.ndarray
A numpy array with the state information.
reward: float
The reward.
done: bool
Flag indicating the termination of the episode.
"""
# TODO: Squeezing may break some envs (e.g. Pendulum-v0)
ac = np.squeeze(ac)
if isinstance(self.ac_space, U.space.Discrete): ac = int(ac)
sn, r, d, info = self.env.step(ac)
return sn, r, d, info
def render(self): self.env.render()
def sample_random_ac(self): return self.env.action_space.sample()
def seed(self, value): self.env.seed(value)
def update_config(self, config):
"""
Updates a Config object to include information about the environment.
Parameters
----------
config: Config
Object used for storing configuration.
"""
super().update_config(config)
config.env.obj.update(dict(wrappers=self.wrappers))
def close(self): self.env.close()
# TODO: Not always the case that time-limit is the first wrapper
def remove_timestep_limit(self): self.env = self.env.env
def _create_env(self): return gym.make(self.env_name)
@staticmethod
def get_space(space):
"""
Gets the shape of the possible types of states in gym.
Parameters
----------
space: gym.spaces
Space object that describes the valid actions and observations
Returns
-------
dict
Dictionary containing the space shape and type
"""
if isinstance(space, gym.spaces.Box):
if space.dtype == np.float32:
return U.space.Continuous(
low=space.low, high=space.high, shape=space.shape
)
if isinstance(space, gym.spaces.Discrete):
return NotImplementedError
if isinstance(space, gym.spaces.MultiDiscrete):
return NotImplementedError
|
py | b4021b2eca09efe5ee75dc83554f6fd6d0e833ba | from base.base_train_multi import BaseTrainMulti
from tqdm import tqdm
import numpy as np
from time import sleep
from time import time
from utils.evaluations import save_results,determine_normality_param,predict_anomaly
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.pipeline import Pipeline
from sklearn.externals.joblib import load
import os
class AutoencoderDenoiserTrainer(BaseTrainMulti):
def __init__(self, sess, model, data, config, logger):
super(AutoencoderDenoiserTrainer, self).__init__(sess, model, data, config, logger)
self.batch_size = self.config.data_loader.batch_size
self.noise_dim = self.config.trainer.noise_dim
self.img_dims = self.config.trainer.image_dims
# Inititalize the train Dataset Iterator
self.sess.run(self.data.iterator.initializer)
# Initialize the test Dataset Iterator
self.sess.run(self.data.test_iterator.initializer)
if self.config.data_loader.validation:
self.sess.run(self.data.valid_iterator.initializer)
self.best_valid_loss = 0
self.nb_without_improvements = 0
def train_epoch_ae(self):
# Attach the epoch loop to a variable
begin = time()
# Make the loop of the epoch iterations
loop = tqdm(range(self.config.data_loader.num_iter_per_epoch))
ae_losses = []
summaries = []
image = self.data.image
cur_epoch = self.model.cur_epoch_tensor.eval(self.sess)
for _ in loop:
loop.set_description("Epoch:{}".format(cur_epoch + 1))
loop.refresh() # to show immediately the update
sleep(0.01)
ae, sum_ae = self.train_step_ae(image, cur_epoch)
ae_losses.append(ae)
summaries.append(sum_ae)
self.logger.info("Epoch {} terminated".format(cur_epoch))
self.summarizer.add_tensorboard(step=cur_epoch, summaries=summaries)
# Check for reconstruction
if cur_epoch % self.config.log.frequency_test == 0:
image_eval = self.sess.run(image)
feed_dict = {self.model.image_input: image_eval, self.model.is_training_ae: False}
reconstruction = self.sess.run(self.model.summary_op_ae, feed_dict=feed_dict)
self.summarizer.add_tensorboard(step=cur_epoch, summaries=[reconstruction])
ae_m = np.mean(ae_losses)
self.logger.info(
"Epoch: {} | time = {} s | loss AE= {:4f} ".format(
cur_epoch, time() - begin, ae_m
)
)
self.model.save(self.sess)
def train_epoch_den(self):
# Attach the epoch loop to a variable
begin = time()
# Make the loop of the epoch iterations
loop = tqdm(range(self.config.data_loader.num_iter_per_epoch))
den_losses = []
summaries = []
image = self.data.image
cur_epoch = self.model.cur_epoch_tensor.eval(self.sess)
for _ in loop:
loop.set_description("Epoch:{}".format(cur_epoch + 1))
loop.refresh() # to show immediately the update
sleep(0.01)
den, sum_den = self.train_step_den(image, cur_epoch)
den_losses.append(den)
summaries.append(sum_den)
self.logger.info("Epoch {} terminated".format(cur_epoch))
self.summarizer.add_tensorboard(step=cur_epoch, summaries=summaries)
# Check for reconstruction
if cur_epoch % self.config.log.frequency_test == 0:
image_eval = self.sess.run(image)
feed_dict = {self.model.image_input: image_eval, self.model.is_training_ae: False}
reconstruction = self.sess.run(self.model.summary_op_den, feed_dict=feed_dict)
self.summarizer.add_tensorboard(step=cur_epoch, summaries=[reconstruction])
den_m = np.mean(den_losses)
self.logger.info(
"Epoch: {} | time = {} s | loss DEN= {:4f} ".format(
cur_epoch, time() - begin, den_m
)
)
self.model.save(self.sess)
def train_step_ae(self, image, cur_epoch):
image_eval = self.sess.run(image)
feed_dict = {
self.model.image_input: image_eval,
self.model.is_training_ae: True,
}
# Train Autoencoder
_, lae, sm_ae = self.sess.run(
[self.model.train_auto_op, self.model.auto_loss, self.model.summary_op_loss_ae],
feed_dict=feed_dict,
)
return lae, sm_ae
def train_step_den(self, image, cur_epoch):
image_eval = self.sess.run(image)
feed_dict = {
self.model.image_input: image_eval,
self.model.is_training_ae: False,
}
# Train Denoiser
_, lden, sm_den = self.sess.run(
[self.model.train_den_op, self.model.den_loss, self.model.summary_op_loss_den],
feed_dict=feed_dict,
)
return lden, sm_den
def test_epoch(self):
self.logger.warn("Testing evaluation...")
scores_rec = []
scores_den = []
scores_pipe = []
inference_time = []
true_labels = []
pipe_output = []
pipe_delta = []
file_writer = tf.summary.FileWriter(os.path.join(self.config.log.summary_dir, "test"))
# Create the scores
test_loop = tqdm(range(self.config.data_loader.num_iter_per_test))
pred_labels = []
scores_km = []
for cur_epoch in test_loop:
test_batch_begin = time()
test_batch, test_labels = self.sess.run([self.data.test_image, self.data.test_label])
test_loop.refresh() # to show immediately the update
sleep(0.01)
feed_dict = {self.model.image_input: test_batch, self.model.is_training_ae: False}
scores_rec += self.sess.run(self.model.rec_score, feed_dict=feed_dict).tolist()
scores_den += self.sess.run(self.model.den_score, feed_dict=feed_dict).tolist()
scores_pipe += self.sess.run(self.model.pipe_score, feed_dict=feed_dict).tolist()
# output_ema = self.sess.run(self.model.output_ema, feed_dict=feed_dict).tolist()
# pipe_delta_batch = self.sess.run(self.model.pipe_delta, feed_dict=feed_dict).tolist()
# for im_i in range(self.model.config.data_loader.batch_size):
# if(test_labels[im_i] == True):
# deltaim = np.reshape(pipe_delta_batch[im_i],[28,28])
# testim = np.reshape(test_batch[im_i],[28,28])
# output_im = np.reshape(output_ema[im_i],[28,28])
# figureim = np.reshape(np.concatenate([deltaim,testim,deltaim>0,output_im]),[1,112,28,1])
# file_writer.add_summary(self.sess.run(tf.summary.image("delta", figureim)))
inference_time.append(time() - test_batch_begin)
true_labels += test_labels.tolist()
# pred_labels.append(pred_labels_temp)
# scores_km += (scores_km_temp.tolist())
# np.save('pred_labels',pred_labels)
true_labels = np.asarray(true_labels)
inference_time = np.mean(inference_time)
self.logger.info("Testing: Mean inference time is {:4f}".format(inference_time))
scores_rec = np.asarray(scores_rec)
scores_den = np.asarray(scores_den)
scores_pipe = np.asarray(scores_pipe)
step = self.sess.run(self.model.global_step_tensor)
percentiles = np.asarray(self.config.trainer.percentiles)
save_results(
self.config.log.result_dir,
scores_rec,
true_labels,
self.config.model.name,
self.config.data_loader.dataset_name,
"scores_rec",
"paper",
self.config.trainer.label,
self.config.data_loader.random_seed,
self.logger,
step,
percentile=percentiles,
)
save_results(
self.config.log.result_dir,
scores_den,
true_labels,
self.config.model.name,
self.config.data_loader.dataset_name,
"scores_den",
"paper",
self.config.trainer.label,
self.config.data_loader.random_seed,
self.logger,
step,
percentile=percentiles,
)
save_results(
self.config.log.result_dir,
scores_pipe,
true_labels,
self.config.model.name,
self.config.data_loader.dataset_name,
"scores_pipe",
"paper",
self.config.trainer.label,
self.config.data_loader.random_seed,
self.logger,
step,
percentile=percentiles,
)
|
py | b4021b3ed590dbb8ed8e25dee6b1b95c298dec4b | from rigging import MODULE_NAME
from utils import build_test_suite
from unittest import main
def test_suite():
return build_test_suite(MODULE_NAME,[
'test_encoding',
'test_engine',
'test_mimetypes',
'test_graph',
'test_transforms',
# 'test_rest',
# 'test_pdf',
# 'test_python',
# 'test_lynx',
])
if __name__=='__main__':
main(defaultTest='test_suite')
|
py | b4021b7626263cb95e72ee9961d5246a3a3e4736 | # -*- coding: utf-8 -*-
'''
Created on 06/04/2015
@author: david
'''
import logging
from engine.motor import Motor, StepMotor, MotorDummy
from stabilization.pid import Pid
class Driver(object):
'''
Controls a motor set
'''
#Driver modes
MODE_NORMAL = 0
MODE_ROTATE = 1
#Thresholds for throttle ranges. For each range a different turning method will be used.
THROTTLE_RANGE_THRESHOLD_1 = 25.0
THROTTLE_RANGE_THRESHOLD_2 = 75.0
THROTTLE_RANGE_THRESHOLD_DIFF = THROTTLE_RANGE_THRESHOLD_2 - THROTTLE_RANGE_THRESHOLD_1
#Direction divisors to set the wheels spining at diferent speeds in order to turn the robot.
DIRECTION_DIV1 = 50.0
DIRECTION_DIV2 = 200.0
@staticmethod
def createForRobot(stepMotor = False):
'''
Creates a new motor driver for robot context
@return: The driver object
'''
driver = Driver()
if stepMotor:
driver.setMotors(StepMotor(1), StepMotor(0))
else:
driver.setMotors(Motor(1), Motor(0))
return driver
@staticmethod
def createForTesting():
'''
Creates a new motor driver for testing context
@return: The driver object
'''
driver = Driver()
driver.setMotors(MotorDummy(1), MotorDummy(0))
return driver
def __init__(self):
'''
Constructor
'''
self._leftMotor = None
self._rightMotor = None
self._throttle = 0.0
self._direction = 0.0
self._mode = Driver.MODE_NORMAL
def setMotors(self, leftMotor, rightMotor):
'''
Set motors to be driven
@param leftMotor: The left motor
@param rightMotor: The right motor
'''
self._leftMotor = leftMotor
self._rightMotor = rightMotor
def start(self):
'''
Starts the driver
'''
if self._leftMotor and self._rightMotor:
self._leftMotor.start()
self._rightMotor.start()
else:
raise Exception("Motors not assigned yet. Please, use setMotors() before start.")
def stop(self):
'''
Stop the motors
'''
self.setNeutral()
self._leftMotor.stop()
self._rightMotor.stop()
def setThrottle(self, throttle):
'''
Set the throttle.
@param throttle: Throttle range is [-100, 100], where negative values mean backwards and positive ones mean forwards.
'''
self.setMotionVector(throttle, self.getDirection())
def getThrottle(self):
'''
Get the throttle.
@return: Throttle range is [-100, 100], where negative values mean backwards and positive ones mean forwards.
'''
return self._throttle
def setDirection(self, direction):
'''
Set the direction.
@param direction: Direction range is [-100, 100], where negative values mean left and positive ones mean right.
'''
self.setMotionVector(self.getThrottle(), direction)
def getDirection(self):
'''
Get the direction.
@return: Direction range is [-100, 100], where negative values mean left and positive ones mean right.
'''
return self._direction
def setNeutral(self):
'''
Set the motion to neutral (stopped). Throttle and direction will be zero.
'''
self.setMotionVector(0.0, 0.0)
def setMotionVector(self, throttle, direction):
'''
Set the motion vector (both, throttle and direction) directly.
Actual effect depends on the current driving mode.
@param throttle: Throttle range is [-100, 100], where negative values mean backwards and positive ones mean forwards.
@param direction: Direction range is [-100, 100], where negative values mean left and positive ones mean right.
'''
self._throttle = throttle
self._direction = direction
logging.debug("motion vector=(t:{0}, d:{1})".format(self._throttle, self._direction))
if self._mode == Driver.MODE_NORMAL:
self._setMotionVectorOnNormalMode()
else: #Driver.MODE_ROTATE
self._setMotionVectorOnRotateMode()
def _setMotionVectorOnNormalMode(self):
'''
Set the motion vector on normal driving mode.
'''
if self._throttle != 0.0:
modThrottle = abs(self._throttle)
if modThrottle < Driver.THROTTLE_RANGE_THRESHOLD_1:
if self._direction >= 0.0:
leftThrottle = self._throttle + self._throttle * (self._direction/Driver.DIRECTION_DIV1)
rightThrottle = self._throttle
else:
leftThrottle = self._throttle
rightThrottle = self._throttle - self._throttle * (self._direction/Driver.DIRECTION_DIV1)
elif Driver.THROTTLE_RANGE_THRESHOLD_1 <= modThrottle < Driver.THROTTLE_RANGE_THRESHOLD_2:
if self._direction >= 0.0:
leftThrottle = self._throttle + self._throttle * (self._direction/Driver.DIRECTION_DIV1) \
* ((Driver.THROTTLE_RANGE_THRESHOLD_2 - modThrottle) / Driver.THROTTLE_RANGE_THRESHOLD_DIFF)
rightThrottle = self._throttle - self._throttle * (self._direction/Driver.DIRECTION_DIV2) \
* ((modThrottle - Driver.THROTTLE_RANGE_THRESHOLD_1) / Driver.THROTTLE_RANGE_THRESHOLD_DIFF)
else:
leftThrottle = self._throttle + self._throttle * (self._direction/Driver.DIRECTION_DIV2) \
* ((modThrottle - Driver.THROTTLE_RANGE_THRESHOLD_1) / Driver.THROTTLE_RANGE_THRESHOLD_DIFF)
rightThrottle = self._throttle - self._throttle * (self._direction/Driver.DIRECTION_DIV1) \
* ((Driver.THROTTLE_RANGE_THRESHOLD_2 - modThrottle) / Driver.THROTTLE_RANGE_THRESHOLD_DIFF)
else:
if self._direction >= 0.0:
leftThrottle = self._throttle
rightThrottle = self._throttle - self._throttle * (self._direction/Driver.DIRECTION_DIV2)
else:
leftThrottle = self._throttle + self._throttle * (self._direction/Driver.DIRECTION_DIV2)
rightThrottle = self._throttle
self._leftMotor.setThrottle(leftThrottle)
self._rightMotor.setThrottle(rightThrottle)
else:
self._leftMotor.setNeutralThrottle()
self._rightMotor.setNeutralThrottle()
def _setMotionVectorOnRotateMode(self):
'''
Set the motion vector on rotate driving mode.
'''
if self._direction != 0:
leftThrottle = self._direction
rightThrottle = -self._direction
self._leftMotor.setThrottle(leftThrottle)
self._rightMotor.setThrottle(rightThrottle)
else:
self._leftMotor.setNeutralThrottle()
self._rightMotor.setNeutralThrottle()
def setMode(self, mode):
'''
Set driver mode
@param mode: Driving mode. See Driver.MODE_*
'''
if self._mode != mode:
self.setNeutral()
self._mode = mode
def getMode(self):
'''
Get current driver mode
@return: Any of Driver.MODE_*
'''
return self._mode
class StabilizedDriver(Driver):
'''
Controls the driver stabilizing the direction
'''
PID_PERIOD = 0.1 #seconds
MAX_ANG_SPEED = 10.0 #degrees / second
@staticmethod
def createForRobot(imu):
'''
Creates a new motor driver for robot context
@param imu: IMU/MPU in order to know the device's attitude.
The sensor will be initialized by the returned object's start-method
@return: The driver object
'''
driver = StabilizedDriver(imu)
driver.setMotors(Motor(1), Motor(0))
return driver
@staticmethod
def createForTesting(imu):
'''
Creates a new motor driver for testing context
@param imu: IMU/MPU in order to know the device's attitude.
The sensor will be initialized by the returned object's start-method
@return: The driver object
'''
driver = StabilizedDriver(imu)
driver.setMotors(MotorDummy(1), MotorDummy(0))
return driver
def __init__(self, sensor):
'''
Constructor
@param sensor: IMU/MPU in order to know the device's attitude.
The sensor will be initialized by this object's start-method.
'''
super().__init__()
self._sensor = sensor
self._directionTarget = 0.0
self._stabilizerPid = Pid(StabilizedDriver.PID_PERIOD, 1, self._readCurrentValues, self._setPidOutput, "PID_{0}".format(type(self).__name__))
self._stabilizerPid.setProportionalConstants([1.0])
self._stabilizerPid.setIntegralConstants([0.0])
def _readCurrentValues(self):
return [self._sensor.readAngSpeedZ()]
def _setPidOutput(self, pidOuput):
throttle = super().getThrottle()
direction = -pidOuput[0]
super().setMotionVector(throttle, direction)
def setMotionVector(self, throttle, direction):
'''
Set the motion vector, that is throttle and direction.
Actual effect depends on the current driving mode.
The direction set the angular speed target for turning.
@param throttle: Throttle range is [-100, 100], where negative values mean backwards and positive ones mean forwards.
@param direction: Direction range is [-100, 100], where negative values mean left and positive ones mean right.
'''
if super().getMode() == Driver.MODE_NORMAL:
super().setMotionVector(throttle, super().getDirection())
self._directionTarget = -direction if throttle > 0.0 else direction
self._stabilizerPid.setTargets([self._directionTarget * StabilizedDriver.MAX_ANG_SPEED / 100.0])
if throttle == 0.0 and self._stabilizerPid.isRunning():
self._stabilizerPid.stop()
elif throttle != 0.0 and not self._stabilizerPid.isRunning():
self._stabilizerPid.start()
else:
super().setMotionVector(throttle, direction)
def getDirection(self):
'''
Get the direction.
@return: Direction range is [-100, 100], where negative values mean left and positive ones mean right.
'''
return self._directionTarget
def setMode(self, mode):
'''
Set driver mode
@param mode: Driving mode. See Driver.MODE_*
'''
super().setMode(mode)
if super().getMode() == Driver.MODE_NORMAL and super().getThrottle() != 0.0 \
and not self._stabilizerPid.isRunning():
self._stabilizerPid.start()
else:
self._stabilizerPid.stop()
def start(self):
'''
Starts the driver
'''
self._sensor.start()
super().start()
def stop(self):
'''
Stops the driver
'''
if self._stabilizerPid.isRunning():
self._stabilizerPid.stop()
super().stop()
self._sensor.stop()
def setProportionalPidConstant(self, kp):
'''
Sets the proportional constant (KP) for the stabilization.
@param kp: The proportional constant
'''
self._stabilizerPid.setProportionalConstants([kp])
def getProportionalPidConstant(self):
'''
Gets the proportional constant (KP) for the stabilization.
@return: The proportional constant
'''
return self._stabilizerPid.getProportionalConstants()[0]
def setIntegralPidConstant(self, ki):
'''
Sets the integral constant (KI) for the stabilization.
@param ki: The integral constant
'''
self._stabilizerPid.setProportionalConstants([ki])
def getIntegralPidConstant(self):
'''
Gets the integral constant (KI) for the stabilization.
@return: The integral constant
'''
return self._stabilizerPid.getIntegralConstants()[0]
|
py | b4021ba4d48cb8c13d7c7d9b367d83320a7d947a | # The MIT License (MIT)
#
# Copyright (c) 2021 Alexander Kirillov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`yozh`
====================================================
This is a CircuitPython library for Yozh robot.
* Author(s): Alexander Kirillov
* Version: 2.0
"""
import gc
import board
from adafruit_bus_device.i2c_device import I2CDevice
import time
import simpleio
from digitalio import DigitalInOut, Direction, Pull
from adafruit_vl53l0x import VL53L0X
# now display-related things
import displayio
import adafruit_displayio_ssd1306
import terminalio
# from adafruit_bitmap_font import bitmap_font
from adafruit_display_text.bitmap_label import Label
from adafruit_display_text import wrap_text_to_lines
YOZH_DEFAULT_I2C_ADDR =const(0x11)
# buttons
YOZH_BUTTON_A = DigitalInOut(board.D13)
YOZH_BUTTON_B = DigitalInOut(board.D12)
# buzzer
YOZH_BUZZER = board.D5
# distance sensors XSHUT pins
YOZH_XSHUT_L = DigitalInOut(board.D7) #XSHUT1=I1 - pin D7
YOZH_XSHUT_R = DigitalInOut(board.D25) #XSHUT2=I2 - pin D25
# Registers
# R/W registers
YOZH_REG_MAX_SPEED =const(0)
YOZH_REG_PID_KP =const(2)
YOZH_REG_PID_TI =const(4)
YOZH_REG_PID_TD =const(6)
YOZH_REG_PID_ILIM =const(8)
YOZH_REG_MOTOR_CONFIG =const(10)
YOZH_REG_MOTOR_MODE =const(11)
YOZH_REG_POWER_L =const(12)
YOZH_REG_POWER_R =const(14)
YOZH_REG_DRIVE_DISTANCE =const(16)
YOZH_REG_TURN_ANGLE =const(20)
YOZH_REG_DRIVE_SPEED =const(22)
YOZH_REG_ENC_RESET =const(24)
YOZH_REG_IMU_INIT =const(25)
YOZH_REG_NEOPIXEL_L =const(26)
YOZH_REG_NEOPIXEL_R =const(29)
YOZH_REG_NEOPIXEL_BRIGHTNESS =const(32)
YOZH_REG_LINEARRAY_INIT =const(33)
YOZH_REG_SERVO1 =const(34)
YOZH_REG_SERVO2 =const(36)
#Read-only registers
YOZH_REG_FW_VERSION =const(40)
YOZH_REG_WHO_AM_I =const(42)
YOZH_REG_IMU_STATUS =const(43)
YOZH_REG_ENCODER_L =const(44)
YOZH_REG_ENCODER_R =const(48)
YOZH_REG_SPEED_L =const(52)
YOZH_REG_SPEED_R =const(54)
YOZH_REG_LINEARRAY_RAW =const(56)
YOZH_REG_VSENSE =const(72)
YOZH_REG_ACCEL =const(74)
YOZH_REG_GYRO =const(80)
YOZH_REG_MAG =const(86)
YOZH_REG_YAW =const(92)
YOZH_REG_PITCH =const(94)
YOZH_REG_ROLL =const(96)
YOZH_REG_QUAT =const(100)
YOZH_REG_DRIVE_STATUS =const(116)
# imu constants
gRes = 500.0 / 32768.0 # gyro resolution, in (deg/s)/LSB
aRes = 2.0 / 32768.0 # accelerometer resolution, in g/LSB
class Yozh:
def __init__(self, i2c=board.I2C(), oled=0x3C, address=YOZH_DEFAULT_I2C_ADDR, distance_sensors=True):
self._device = I2CDevice(i2c, address)
self._out_buffer = bytearray(16)
self._in_buffer = bytearray(16)
self.button_A=YOZH_BUTTON_A
self.button_B=YOZH_BUTTON_B
self.encoder_L = 0
self.encoder_R = 0
self.speed_L = 0
self.speed_R = 0
self.ax = 0 #acceleration
self.ay = 0
self.az = 0
self.gx = 0 #gyro
self.gy = 0
self.gz = 0
self._calibrate_W = 70 #reasonable defaults for black and white sensor readings
self._calibrate_B = 950
self._threshold = 500 # black/white threshold
time.sleep(0.2)
with self._device:
result = bytearray(1)
self._device.write(bytes([YOZH_REG_WHO_AM_I]))
self._device.readinto(result)
who_am_i = result[0]
if who_am_i != YOZH_DEFAULT_I2C_ADDR:
print(who_am_i);
raise RuntimeError('Could not find Yozh Bot, is it connected and powered? ')
else:
print("Yozh bot initialized")
# now initialize the display and buttons
if oled is not None:
displayio.release_displays()
self._display_bus = displayio.I2CDisplay(i2c, device_address=oled)
self.display = adafruit_displayio_ssd1306.SSD1306(self._display_bus, width=128, height=64)
self.splash=displayio.Group()
self.display.show(self.splash)
self._fonts = {}
self._textboxes = []
# Draw a label
self.add_textbox(text_position =(10,20), text_scale=2, text="Yozh")
self.button_A.direction = Direction.INPUT
self.button_A.pull = Pull.UP
self.button_B.direction = Direction.INPUT
self.button_B.pull = Pull.UP
if distance_sensors:
YOZH_XSHUT_L.switch_to_output(value=False) #turn off left
YOZH_XSHUT_R.switch_to_output(value=True) #turn on right
# configure i2c address of right sensor
self.distance_R = VL53L0X(i2c)
self.distance_R.set_address(0x30)
# now, turn on the left one as well; it iwll use default address of 0x29
YOZH_XSHUT_L.value = True
self.distance_L = VL53L0X(i2c)
#give names to reflectance sensors
self.A1=0
self.A2=1
self.A3=2
self.A4=3
self.A5=4
self.A6=5
self.A7=6
self.A8=7
# various constants
self.CM_TO_TICKS = 150
self.DEG_TO_TICKS = 14
# basic configuration of PID
self.configure_PID(maxspeed=4200)
self._pid = False
######## Firmware version
def fw_version(self):
"""Returns firmware version as a string"""
minor = self._read_8(YOZH_REG_FW_VERSION)
major = self._read_8(YOZH_REG_FW_VERSION + 1)
version="{}.{}"
return(version.format(major,minor))
######## BATTERY LEVEL
def battery(self):
"""Returns battery level, in volts"""
raw = self._read_16(YOZH_REG_VSENSE)
voltage = raw*6.6/1023.0 # reference voltage =3.3V; taking into acct voltage divider
# we get 1023 = 6.6V
return(round(voltage,2))
########## BUTTONS ########################################
def wait_for(self,pin):
while (pin.value):
pass
def is_pressed(self, pin):
return(not pin.value)
def choose_button(self):
while (self.button_A.value and self.button_B.value):
pass
if (not self.button_A.value):
# Button A was pressed
return "A"
else:
return "B"
########## MOTORS ########################################
def set_motors(self, power_left, power_right):
"""Sets the power for motors. Each value ranges from -100..100."""
if power_left>100:
power_left=100
elif power_left<-100:
power_left=-100
if power_right>100:
power_right=100
elif power_right<-100:
power_right=-100
self._write_16_array(YOZH_REG_POWER_L,[(int)(power_left*5), (int)(power_right*5)])
def stop_motors(self):
"""Stops both motors."""
self._write_16_array(YOZH_REG_POWER_L,[0,0])
def get_encoders(self):
"""
Gets and saves values of the two encoders
"""
self.encoder_L = self._read_32(YOZH_REG_ENCODER_L)
self.encoder_R = self._read_32(YOZH_REG_ENCODER_R)
def reset_encoders(self):
self._write_8(YOZH_REG_ENC_RESET, 0x01)
self.encoder_L=0
self.encoder_R=0
self.speed_L=0
self.speed_R=0
def get_speeds(self):
"""
Gets and saves speeds (ticks/s) of the two motors
"""
self.speed_L = self._read_16(YOZH_REG_SPEED_L)
self.speed_R = self._read_16(YOZH_REG_SPEED_R)
########## Motor/PID config ########################################
def configure_PID(self, maxspeed, Kp = None, Ti = None, Td = None, Ilim = None ):
"""
Configures PID.
Maxspeed is motor free rotation speed, in ticks/s.
The rest is documented in yozh.rtfd.org
"""
if Kp is None:
Kp = 0.8/maxspeed
Ti = 0.3
Td = 0.03
Ilim = 1000
data = [round(maxspeed), round(Kp*10000000), round(Ti*1000), round (Td*1000), round(Ilim)]
self._write_16_array(YOZH_REG_MAX_SPEED, data)
def PID_on(self):
self._write_8(YOZH_REG_MOTOR_MODE, 0x02)
self._pid = True
def PID_off(self):
self._write_8(YOZH_REG_MOTOR_MODE, 0x00)
self._pid = False
########## DRIVING ########################################
def go_forward(self, distance, speed=50):
self.reset_encoders()
old_pid_mode = self._pid
self.PID_on()
self.set_motors(speed, speed)
target = self.CM_TO_TICKS * distance # travel for given number of cm
while (self.encoder_L+self.encoder_R<target):
self.get_encoders()
self.stop_motors()
# restore old pid setting
if not old_pid_mode:
self.PID_off()
def go_backward(self, distance, speed=50):
self.reset_encoders()
old_pid_mode = self._pid
self.set_motors(-speed, -speed)
target = -self.CM_TO_TICKS * distance # travel for given number of cm
while (self.encoder_L+self.encoder_R>target):
self.get_encoders()
self.stop_motors()
# restore old pid setting
if not old_pid_mode:
self.PID_off()
def turn(self, angle, speed=50):
self.reset_encoders()
target = self.DEG_TO_TICKS * angle # turn by given number of degrees
if angle>0:
self.set_motors(speed, -speed)
while (self.encoder_L-self.encoder_R<target):
self.get_encoders()
#print(self.encoder_L, self.encoder_R)
else:
self.set_motors(-speed, speed)
while (self.encoder_L-self.encoder_R>target):
self.get_encoders()
self.stop_motors()
########## SERVOS ########################################
def set_servo1(self, pos):
"""
Sets servo 1 to given position. Position ranges from 0...1
"""
self._write_16(YOZH_REG_SERVO1, (int) (500+pos*2000))
def set_servo2(self, pos):
"""
Sets servo 2 to given position. Position ranges from 0...1
"""
self._write_16(YOZH_REG_SERVO2, (int) (500+pos*2000))
########## BUZZER ########################################
def buzz(self, freq, dur=0.5):
simpleio.tone(YOZH_BUZZER, freq, duration=dur)
########## IMU ########################################
def IMU_start(self):
self._write_8(YOZH_REG_IMU_INIT, 1)
def IMU_calibrate(self):
self._write_8(YOZH_REG_IMU_INIT, 2)
time.sleep(1.0)
while (self._read_8(YOZH_REG_IMU_STATUS)==2):
pass
def IMU_stop(self):
self._write_8(YOZH_REG_IMU_INIT, 0)
def IMU_status(self):
return(self._read_8(YOZH_REG_IMU_STATUS))
def IMU_get_accel(self):
accel=[0,0,0]
self._read_16_array(YOZH_REG_ACCEL, accel)
self.ax=accel[0]*aRes
self.ay=accel[1]*aRes
self.az=accel[2]*aRes
def IMU_get_gyro(self):
gyro=[0,0,0]
self._read_16_array(YOZH_REG_GYRO, gyro)
self.gx=gyro[0]*gRes
self.gy=gyro[1]*gRes
self.gz=gyro[2]*gRes
def IMU_yaw(self):
return(self._read_16(YOZH_REG_YAW)*0.1)
def IMU_pitch(self):
return(self._read_16(YOZH_REG_PITCH)*0.1)
def IMU_roll(self):
return(self._read_16(YOZH_REG_ROLL)*0.1)
########## LEDS ########################################
def set_led_L(self, color):
"""
Sets color of left LED.
Color should be list of 3 values, R, G, B, each ranging 0...255
e.g. color = [0,0,255]
"""
self._write_8_array(YOZH_REG_NEOPIXEL_L, color)
def set_led_R(self, color):
"""
Sets color of right LED.
Color should be list of 3 values, R, G, B, each ranging 0...255
e.g. color = [0,0,255]
"""
self._write_8_array(YOZH_REG_NEOPIXEL_R, color)
def set_leds(self, color_l, color_r = None):
"""
Sets color of both LEDs.
Each color should be list of 3 values, R, G, B, each ranging 0...255
e.g. color = [0,0,255]
"""
if color_r is None:
color_r=color_l
self._write_8_array(YOZH_REG_NEOPIXEL_L, color_l+color_r)
def set_led_brightness(self, value):
"""
Sets LED brightness
"""
self._write_8(YOZH_REG_NEOPIXEL_BRIGHTNESS, value)
########## REFL. ARRAY ########################################
def linearray_on(self):
"""
Turns the bottom line array of reflectance sensors ON
"""
self._write_8(YOZH_REG_LINEARRAY_INIT, 1)
def linearray_off(self):
"""
Turns the bottom line array of reflectance sensors OFF
"""
self._write_8(YOZH_REG_LINEARRAY_INIT, 0)
def linearray_raw(self,i):
"""
Returns the raw reading of i-th of reflectance sensor, i=0...7
"""
return self._read_16(YOZH_REG_LINEARRAY_RAW+2*i)
def linearray_cal(self,i):
"""
Returns the scaled reading of i-th of reflectance sensor, i=0...7
Results are scaled to be between 0...100
White is 0, black is 100
"""
raw = self._read_16(YOZH_REG_LINEARRAY_RAW+2*i)
if (raw<self._calibrate_W):
return(0.0)
elif (raw>self._calibrate_B):
return(100.0)
else:
return 100.0*(raw-self._calibrate_W)/(self._calibrate_B - self._calibrate_W)
def calibrate(self):
self.linearray_on()
min=1023
max=0
for i in range (8):
x=self.linearray_raw(i)
if (x<min):
min = x
elif (x>max):
max = x
self._calibrate_B = max
self._calibrate_W = min
self._threshold = 0.5*(max+min)
print(max, min)
print("Calibration complete")
print("The two values above should be about 900 and 100.")
def sensor_on_white(self,i):
"""
Is reflectance sensor i (i=0...7) on white?
"""
raw = self._read_16(YOZH_REG_LINEARRAY_RAW+2*i)
return(raw<self._threshold)
def sensor_on_black(self,i):
"""
Is reflectance sensor i (i=0...7) on black?
"""
raw = self._read_16(YOZH_REG_LINEARRAY_RAW+2*i)
return(raw>=self._threshold)
def line_position_white(self):
"""
returns position of while line under the bot.
"""
upper_bound=self._calibrate_B*0.8+self._calibrate_W*0.2
lower_bound=self._calibrate_B*0.3+self._calibrate_W*0.7
#print(lower_bound, upper_bound)
spread=upper_bound-lower_bound
raw_values = [0]*8
position=0
right=0.0
left=0.0
self._read_16_array(YOZH_REG_LINEARRAY_RAW, raw_values)
i=0
# count sensors on the right of the white line
while (i<8) and (raw_values[i]>lower_bound):
if (raw_values[i]>upper_bound):
right+=1.0
else:
right+=1.0*(raw_values[i]-lower_bound)/spread
i +=1
#print(right, end=' ')
# now count sensors n the left of white line
i=7
while (i>=0) and (raw_values[i]>lower_bound):
if (raw_values[i]>upper_bound):
left+=1.0
else:
left+=1.0*(raw_values[i]-lower_bound)/spread
i -=1
#print(left)
return(left-right)
def line_position_black(self):
"""
returns position of black line under the bot.
"""
upper_bound=self._calibrate_B*0.7+self._calibrate_W*0.3
lower_bound=self._calibrate_B*0.2+self._calibrate_W*0.8
#print(lower_bound, upper_bound)
spread=upper_bound-lower_bound
raw_values = [0]*8
position=0
right=0.0
left=0.0
self._read_16_array(YOZH_REG_LINEARRAY_RAW, raw_values)
i=0
while (i<8) and (raw_values[i]<upper_bound):
if (raw_values[i]<lower_bound):
right+=1.0
else:
right+=1.0*(upper_bound-raw_values[i])/spread
i +=1
#print(right, end=' ')
i=7
while (i>=0) and (raw_values[i]<upper_bound):
if (raw_values[i]<lower_bound):
left+=1.0
else:
left+=1.0*(upper_bound-raw_values[i])/spread
i -=1
#print(left)
return(left-right)
########## DISPLAY ########################################
def clear_display(self):
self._textboxes=[]
N=len(self.splash)
for i in range(N):
self.splash.pop()
gc.collect()
def _load_font(self, font):
"""
Load and cache a font if not previously loaded
Return the key of the cached font
:param font: Either terminalio.FONT or the path to the bdf font file
"""
if font is terminalio.FONT:
if "terminal" not in self._fonts:
self._fonts["terminal"] = terminalio.FONT
return "terminal"
if font not in self._fonts:
self._fonts[font] = bitmap_font.load_font(font)
return font
@staticmethod
def wrap_nicely(string, max_chars):
"""A helper that will return a list of lines with word-break wrapping.
:param str string: The text to be wrapped.
:param int max_chars: The maximum number of characters on a line before wrapping.
"""
return wrap_text_to_lines(string, max_chars)
def add_textbox(
self,
text_position=(0, 0),
text_font=terminalio.FONT,
text_wrap=0,
text_maxlen=0,
text_scale=1,
line_spacing=1.15,
text_anchor_point=(0, 0),
text=None,
):
"""
Add text labels with settings
:param str text_font: The path to your font file for your data text display.
:param text_position: The position of your extracted text on the display in an (x, y) tuple.
Can be a list of tuples for when there's a list of json_paths, for
example.
:param text_wrap: When non-zero, the maximum number of characters on each line before text
is wrapped. (for long text data chunks). Defaults to 0, no wrapping.
:param text_maxlen: The max length of the text. If non-zero, it will be truncated to this
length. Defaults to 0.
:param int text_scale: The factor to scale the default size of the text by
:param float line_spacing: The factor to space the lines apart
:param (float,float) text_anchor_point: Values between 0 and 1 to indicate where the text
position is relative to the label
:param str text: If this is provided, it will set the initial text of the label.
"""
if not self.display:
return(-1)
if not text_wrap:
text_wrap = 0
if not text_maxlen:
text_maxlen = 0
if not isinstance(text_scale, (int, float)) or text_scale < 1:
text_scale = 1
if not isinstance(text_anchor_point, (tuple, list)):
text_anchor_point = (0, 0.5)
if not 0 <= text_anchor_point[0] <= 1 or not 0 <= text_anchor_point[1] <= 1:
raise ValueError("Text anchor point values should be between 0 and 1.")
text_scale = round(text_scale)
gc.collect()
text_field = {
"label": None,
"font": self._load_font(text_font),
"position": text_position,
"wrap": text_wrap,
"maxlen": text_maxlen,
"scale": text_scale,
"line_spacing": line_spacing,
"anchor_point": text_anchor_point,
}
self._textboxes.append(text_field)
text_index = len(self._textboxes) - 1
if text is not None:
self.set_text(text, text_index)
return text_index
def set_text(self, val, index=0):
"""Display text, with indexing into our list of text boxes.
:param str val: The text to be displayed
:param index: Defaults to 0.
"""
if not self.display:
return
# Make sure at least a single label exists
if not self._textboxes:
self.add_textbox()
string = str(val)
if self._textboxes[index]["maxlen"] and len(string) > self._textboxes[index]["maxlen"]:
# too long! shorten it
if len(string) >= 3:
string = string[: self._textboxes[index]["maxlen"] - 3] + "..."
else:
string = string[: self._textboxes[index]["maxlen"]]
index_in_splash = None
if len(string) > 0 and self._textboxes[index]["wrap"]:
lines = self.wrap_nicely(string, self._textboxes[index]["wrap"])
string = "\n".join(lines)
if self._textboxes[index]["label"] is not None:
index_in_splash = self.splash.index(self._textboxes[index]["label"])
if len(string) > 0:
if self._textboxes[index]["label"] is None:
self._textboxes[index]["label"] = Label(
self._fonts[self._textboxes[index]["font"]],
text=string,
scale=self._textboxes[index]["scale"],
)
if index_in_splash is not None:
self.splash[index_in_splash] = self._textboxes[index]["label"]
else:
self.splash.append(self._textboxes[index]["label"])
else:
self._textboxes[index]["label"].text = string
self._textboxes[index]["label"].anchor_point = self._textboxes[index]["anchor_point"]
self._textboxes[index]["label"].anchored_position = self._textboxes[index]["position"]
self._textboxes[index]["label"].line_spacing = self._textboxes[index]["line_spacing"]
elif index_in_splash is not None:
self._textboxes[index]["label"] = None
# Remove the label from splash
if index_in_splash is not None and self._textboxes[index]["label"] is None:
del self.splash[index_in_splash]
gc.collect()
########## I2C UTILITY ########################################
def _write_8(self, address, data):
# Write 1 byte of data to the specified register address.
with self._device:
self._device.write(bytes([address & 0xFF,
data]))
def _write_8_array(self, address, data):
# write an array of bytes to specified register address
self._out_buffer[0] = address & 0xFF
l=len(data)
for i in range(l):
self._out_buffer[i+1]=data[i]& 0xFF
with self._device:
self._device.write(self._out_buffer,end=l+1)
def _write_16(self, address, data):
# Write a 16-bit little endian value to the specified register
# address.
with self._device:
self._device.write(bytes([address & 0xFF,
data & 0xFF,
(data >> 8) & 0xFF]))
def _write_16_array(self, address, data):
# write an array of littel endian 16-bit values to specified register address
self._out_buffer[0] = address & 0xFF
l=len(data)
for i in range(l):
self._out_buffer[2*i+1]=data[i] & 0xFF
self._out_buffer[2*i+2]=(data[i]>>8) & 0xFF
with self._device:
self._device.write(self._out_buffer,end=2*l+1)
def _read_8(self, address):
# Read and return a byte from the specified register address.
with self._device:
result = bytearray(1)
self._device.write(bytes([address & 0xFF]))
self._device.readinto(result)
#self._device.write_then_readinto(bytes([address & 0xFF]),result)
return result[0]
def _read_16(self, address):
# Read and return a 16-bit signed little endian value from the
# specified register address.
with self._device:
self._device.write(bytes([address & 0xFF]))
self._device.readinto(self._in_buffer, end = 2)
raw = (self._in_buffer[1] << 8) | self._in_buffer[0]
if (raw & (1<<15)): # sign bit is set
return (raw - (1<<16))
else:
return raw
def _read_16_array(self, address, result_array):
# Read and saves into result_arrray a sequence of 16-bit little endian
# values starting from the specified register address.
# FIXME: signed
count=len(result_array)
with self._device:
self._device.write(bytes([address & 0xFF]))
self._device.readinto(self._in_buffer, end = 2*count)
#self._device.write_then_readinto(bytes([address & 0xFF]),self._in_buffer,in_end = 2*count )
for i in range(count):
raw=self._in_buffer[2*i] |(self._in_buffer[2*i+1]<<8)
if (raw & (1<<15)): # sign bit is set
result_array[i] = (raw - (1<<16))
else:
result_array[i] = raw
def _read_32(self, address):
# Read and return a 32-bit signed little endian value from the
# specified register address.
with self._device:
self._device.write(bytes([address & 0xFF]))
self._device.readinto(self._in_buffer, end = 4)
#self._device.write_then_readinto(bytes([address & 0xFF]),self._in_buffer, in_end = 4)
raw = (self._in_buffer[3] << 24) | (self._in_buffer[2] << 16) | (self._in_buffer[1] << 8) | self._in_buffer[0]
if (raw & (1<<31)): # sign bit is set
return (raw - (1<<32))
else:
return raw
|
py | b4021bb5387e58c2eb9bdb13691860abab878262 | from xarray import Dataset, DataArray, open_dataset, merge
from xarray.testing import assert_equal
import pytest
import numpy as np
from xbout.load import open_boutdataset
from xbout.geometries import register_geometry, REGISTERED_GEOMETRIES
@pytest.fixture
def create_example_grid_file(tmp_path_factory):
"""
Mocks up a set of BOUT-like netCDF files, and return the temporary test
directory containing them.
Deletes the temporary directory once that test is done.
"""
# Create grid dataset
arr = np.arange(6).reshape(2, 3)
grid = DataArray(data=arr, name="arr", dims=["x", "y"]).to_dataset()
grid["dy"] = DataArray(np.ones((2, 3)), dims=["x", "y"])
grid = grid.set_coords(["dy"])
# Create temporary directory
save_dir = tmp_path_factory.mktemp("griddata")
# Save
filepath = save_dir.joinpath("grid.nc")
grid.to_netcdf(filepath, engine="netcdf4")
return filepath
class TestOpenGrid:
def test_open_grid(self, create_example_grid_file):
example_grid = create_example_grid_file
with pytest.warns(UserWarning):
result = open_boutdataset(datapath=example_grid)
result = result.drop_vars(["x", "y"])
assert_equal(result, open_dataset(example_grid))
result.close()
def test_open_grid_extra_dims(self, create_example_grid_file, tmp_path_factory):
example_grid = open_dataset(create_example_grid_file)
new_var = DataArray(name="new", data=[[1, 2], [8, 9]], dims=["x", "w"])
dodgy_grid_directory = tmp_path_factory.mktemp("dodgy_grid")
dodgy_grid_path = dodgy_grid_directory.joinpath("dodgy_grid.nc")
merge([example_grid, new_var]).to_netcdf(dodgy_grid_path, engine="netcdf4")
with pytest.warns(
UserWarning, match="drop all variables containing " "the dimensions 'w'"
):
result = open_boutdataset(datapath=dodgy_grid_path)
result = result.drop_vars(["x", "y"])
assert_equal(result, example_grid)
result.close()
def test_open_grid_apply_geometry(self, create_example_grid_file):
@register_geometry(name="Schwarzschild")
def add_schwarzschild_coords(ds, coordinates=None):
ds["event_horizon"] = 4.0
ds["event_horizon"].attrs = ds.attrs.copy()
return ds
example_grid = create_example_grid_file
result = result = open_boutdataset(
datapath=example_grid, geometry="Schwarzschild"
)
assert_equal(result["event_horizon"], DataArray(4.0))
# clean up
del REGISTERED_GEOMETRIES["Schwarzschild"]
result.close()
def test_open_grid_chunks(self, create_example_grid_file):
example_grid = create_example_grid_file
with pytest.warns(UserWarning):
result = open_boutdataset(datapath=example_grid, chunks={"x": 4, "y": 5})
result = result.drop_vars(["x", "y"])
assert_equal(result, open_dataset(example_grid))
result.close()
def test_open_grid_chunks_not_in_grid(self, create_example_grid_file):
example_grid = create_example_grid_file
with pytest.warns(UserWarning):
result = open_boutdataset(
datapath=example_grid, chunks={"anonexistantdimension": 5}
)
result = result.drop_vars(["x", "y"])
assert_equal(result, open_dataset(example_grid))
result.close()
|
py | b4021c14be4940a744f3e3c4553b5de354e60bdb | # Generated by Django 2.1.2 on 2018-12-02 04:42
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('name', models.CharField(blank=True, max_length=255, verbose_name='Name of User')),
('qr', models.ImageField(blank=True, null=True, upload_to='')),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='BeerBearCustomer',
fields=[
('user_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'BeerBearCustomer',
'verbose_name_plural': 'BeerBearCustomers',
},
bases=('users.user',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='BeershopOwner',
fields=[
('user_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'BeershopOwner',
'verbose_name_plural': 'BeershopOwners',
},
bases=('users.user',),
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.AddField(
model_name='user',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AddField(
model_name='user',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
]
|
py | b4021c221f059aad080ca9d1580171019654cc3b | """.. Ignore pydocstyle D400.
===================
Resolwe Test Runner
===================
"""
import asyncio
import contextlib
import logging
import os
import re
import shutil
import subprocess
import sys
from unittest import mock
import yaml
from channels.db import database_sync_to_async
from django.conf import settings
from django.core.management.base import CommandError
from django.test import override_settings
from django.test.runner import DiscoverRunner, ParallelTestSuite, RemoteTestRunner
from django.utils.crypto import get_random_string
# Make sure we already have the patched FLOW_* available here; otherwise
# resolwe.test.testcases.TransactionTestCase will override them with the module above,
# negating anything we do here with Django's override_settings.
import resolwe.test.testcases.setting_overrides as resolwe_settings
from resolwe.flow.finders import get_finders
from resolwe.flow.managers import manager, state
from resolwe.flow.managers.listener import ExecutorListener
from resolwe.test.utils import generate_process_tag
logger = logging.getLogger(__name__)
SPAWN_PROCESS_REGEX = re.compile(
r'run\s+\{.*?["\']process["\']\s*:\s*["\'](.+?)["\'].*?\}'
)
TESTING_CONTEXT = {
"is_testing": False,
}
class TestingContext:
"""Context manager which maintains current testing status."""
def __enter__(self):
"""Enter testing context."""
TESTING_CONTEXT["is_testing"] = True
def __exit__(self, *args, **kwargs):
"""Exit testing context."""
TESTING_CONTEXT["is_testing"] = False
# Propagate exceptions.
return False
class AtScopeExit:
"""Utility class for calling a function once a context exits."""
def __init__(self, call, *args, **kwargs):
"""Construct a context manager and save arguments.
:param call: The callable to call on exit.
:param args: Positional arguments for the callable.
:param kwargs: Keyword arguments for the callable.
"""
self.call = call
self.args = args
self.kwargs = kwargs
def __enter__(self):
"""Enter the ``with`` context."""
return self
def __exit__(self, *args, **kwargs):
"""Exit the context and call the saved callable."""
self.call(*self.args, **self.kwargs)
return False
def _manager_setup():
"""Execute setup operations common to serial and parallel testing.
This mostly means state cleanup, such as resetting database
connections and clearing the shared state.
"""
if TESTING_CONTEXT.get("manager_reset", False):
return
TESTING_CONTEXT["manager_reset"] = True
state.update_constants()
manager.reset()
def _sequence_paths(paths):
"""Extend the last components of the given paths with a number.
The method finds the lowest number such that all given paths, when
extended by it, are unique and can be created. The paths are then
also created.
:param paths: The list of paths to be extended and created.
:return: The list of created paths.
"""
seq = 0
while True:
# Note for parallel execution: infinite zigzagging ladders are
# not possible, because the directories are always created in
# the same order. The problem would be if process A succeeded
# in creating data/test_1, but process B would beat it to
# upload/test_1 (so that both would roll back and continue
# with _2, etc.). For B to succeed in creating upload/test_1,
# it must have already succeeded in creating data/test_1,
# meaning A could not possibly have succeeded with data/test_1.
seq += 1
created = []
for base_path in paths:
path = os.path.join(base_path, "test_{}".format(seq))
try:
os.makedirs(path)
created.append(path)
except OSError:
break
if len(created) == len(paths):
return created
# If they're not equal, we failed and need to roll back;
# errors are entirely irrelevant here, removal is purely
# best effort.
for path in created:
try:
os.rmdir(path)
except Exception:
pass
def _create_test_dirs():
"""Create all the testing directories."""
if "test_paths" in TESTING_CONTEXT:
return TESTING_CONTEXT["test_paths"]
items = ["DATA_DIR", "UPLOAD_DIR", "RUNTIME_DIR"]
paths = _sequence_paths([resolwe_settings.FLOW_EXECUTOR_SETTINGS[i] for i in items])
for item, path in zip(items, paths):
resolwe_settings.FLOW_EXECUTOR_SETTINGS[item] = path
TESTING_CONTEXT["test_paths"] = paths
return paths
def _prepare_settings():
"""Prepare and apply settings overrides needed for testing."""
# Override container name prefix setting.
resolwe_settings.FLOW_EXECUTOR_SETTINGS[
"CONTAINER_NAME_PREFIX"
] = "{}_{}_{}".format(
resolwe_settings.FLOW_EXECUTOR_SETTINGS.get("CONTAINER_NAME_PREFIX", "resolwe"),
# NOTE: This is necessary to avoid container name clashes when tests are run from
# different Resolwe code bases on the same system (e.g. on a CI server).
get_random_string(length=6),
os.path.basename(resolwe_settings.FLOW_EXECUTOR_SETTINGS["DATA_DIR"]),
)
return override_settings(
CELERY_ALWAYS_EAGER=True,
FLOW_EXECUTOR=resolwe_settings.FLOW_EXECUTOR_SETTINGS,
FLOW_MANAGER=resolwe_settings.FLOW_MANAGER_SETTINGS,
)
def _custom_worker_init(django_init_worker):
"""Wrap the original worker init to also start the manager."""
def _init_worker(*args, **kwargs):
"""Initialize a :class:`multiprocessing.Pool` worker.
Call the Django's ``ParallelTestSuite.init_worker`` and then
also start the manager infrastructure.
"""
result = django_init_worker(*args, **kwargs)
# Further patch channel names and the like with our current pid,
# so that parallel managers and executors don't clash on the
# same channels and directories.
resolwe_settings.FLOW_MANAGER_SETTINGS[
"REDIS_PREFIX"
] += "-parallel-pid{}".format(os.getpid())
return result
return _init_worker
def _run_in_event_loop(coro, *args, **kwargs):
"""Run a coroutine in a runloop call.
This is needed as the top level call into Resolwe Manager-using
tests. An event loop is started so that it can be used within the
call tree.
:param coro: The coroutine to run with an underlying event loop. All
other arguments given to this function are forwarded to it.
"""
asyncio.get_event_loop().close()
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
task = asyncio.ensure_future(coro(*args, **kwargs), loop=loop)
loop.run_until_complete(task)
loop.close()
return task.result()
async def _run_on_infrastructure(meth, *args, **kwargs):
"""Start the Manager infrastructure and call the given callable.
The method given is run through a serializing wrapper, so that
Django database accesses are correct.
:param meth: The callable to run on the infrastructure. All other
arguments are forwarded to it.
"""
with TestingContext():
_create_test_dirs()
with _prepare_settings():
await database_sync_to_async(_manager_setup)()
with AtScopeExit(manager.state.destroy_channels):
redis_params = getattr(settings, "FLOW_MANAGER", {}).get(
"REDIS_CONNECTION", {}
)
listener = ExecutorListener(redis_params=redis_params)
await listener.clear_queue()
async with listener:
try:
with override_settings(FLOW_MANAGER_SYNC_AUTO_CALLS=True):
result = await database_sync_to_async(meth)(*args, **kwargs)
return result
finally:
listener.terminate()
def _run_manager(meth, *args, **kwargs):
"""Start the Manager properly and nest the given callable in it.
:param meth: The callable to start the environment for; all other
arguments are forwarded to it.
"""
return _run_in_event_loop(_run_on_infrastructure, meth, *args, **kwargs)
class CustomRemoteRunner(RemoteTestRunner):
"""Standard Django remote runner with a custom run method."""
def run(self, *args, **kwargs):
"""Run the superclass method with an underlying event loop."""
# NOTE: An instance of this class is constructed for _each_ batch
# of tests, not just once per pool worker, so some care needs to
# be taken to avoid incremental corruption, such as in
# _create_test_dirs().
return _run_manager(super().run, *args, **kwargs)
class CustomParallelTestSuite(ParallelTestSuite):
"""Standard parallel suite with a custom worker initializer."""
init_worker = _custom_worker_init(ParallelTestSuite.init_worker)
runner_class = CustomRemoteRunner
class ResolweRunner(DiscoverRunner):
"""Resolwe test runner."""
parallel_test_suite = CustomParallelTestSuite
def __init__(self, *args, **kwargs):
"""Initialize test runner."""
self.only_changes_to = kwargs.pop("only_changes_to", None)
self.changes_file_types = kwargs.pop("changes_file_types", None)
# Handle implication first, meanings get inverted later.
self.keep_data = False
if "no_mock_purge" in kwargs:
self.keep_data = True
# mock_purge used to be the default, so keep it that way;
# this means the command line option does the opposite
# (disables it), so the boolean must be inverted here.
self.mock_purge = not kwargs.pop("no_mock_purge", False)
self.keep_data = kwargs.pop("keep_data", self.keep_data)
super().__init__(*args, **kwargs)
@classmethod
def add_arguments(cls, parser):
"""Add command-line arguments.
:param parser: Argument parser instance
"""
super().add_arguments(parser)
parser.add_argument(
"--only-changes-to",
dest="only_changes_to",
help="Only test changes against given Git commit reference",
)
parser.add_argument(
"--changes-file-types",
dest="changes_file_types",
help="File which describes what kind of changes are available",
)
parser.add_argument(
"--keep-data",
dest="keep_data",
action="store_true",
help="Prevent test cases from cleaning up after execution",
)
parser.add_argument(
"--no-mock-purge",
dest="no_mock_purge",
action="store_true",
help="Do not mock purging functions (implies --keep-data)",
)
def build_suite(self, *args, **kwargs):
"""Build test suite."""
suite = super().build_suite(*args, **kwargs)
# Build suite first constructs the parallel suite and then may reduce self.parallel,
# while keeping suite.processes unchanged. We need to propagate the change here to
# avoid spawning more processes than there are databases.
suite.processes = self.parallel
# Augment all test cases with manager state validation logic.
def validate_manager_state(case, teardown):
"""Decorate test case with manager state validation."""
def wrapper(*args, **kwargs):
"""Validate manager state on teardown."""
if manager.sync_counter.value != 0:
case.fail(
"Test has outstanding manager processes. Ensure that all processes have "
"completed or that you have reset the state manually in case you have "
"bypassed the regular manager flow in any way.\n"
"\n"
"Synchronization count: {value} (should be 0)\n"
"".format(value=manager.sync_counter.value,)
)
teardown(*args, **kwargs)
return wrapper
if isinstance(suite, self.parallel_test_suite):
# NOTE: validate_manager_state function cannot be pickled, so it
# cannot be used in parallel tests.
pass
else:
for case in suite:
case.tearDown = validate_manager_state(case, case.tearDown)
return suite
def run_suite(self, suite, **kwargs):
"""Run the test suite with manager workers in the background."""
# Due to the way the app modules are imported, there's no way to
# statically override settings with TEST overrides before e.g.
# resolwe.flow.managers.manager is loaded somewhere in the code
# (the most problematic files are signals.py and
# resolwe.test.testcases.process); the only realistic mechanism
# is to override later and call some sort of commit method in
# the manager.
keep_data_override = override_settings(FLOW_MANAGER_KEEP_DATA=self.keep_data)
keep_data_override.__enter__()
if self.keep_data and self.mock_purge:
purge_mock_os = mock.patch("resolwe.flow.utils.purge.os", wraps=os).start()
purge_mock_os.remove = mock.MagicMock()
purge_mock_shutil = mock.patch(
"resolwe.flow.utils.purge.shutil", wraps=shutil
).start()
purge_mock_shutil.rmtree = mock.MagicMock()
if self.parallel > 1:
return super().run_suite(suite, **kwargs)
return _run_manager(super().run_suite, suite, **kwargs)
def run_tests(self, test_labels, **kwargs):
"""Run tests.
:param test_labels: Labels of tests to run
"""
if self.only_changes_to:
# Check if there are changed files. We need to be able to switch between branches so we
# can correctly detect changes.
repo_status = self._git("status", "--porcelain", "--untracked-files=no")
if repo_status:
print(
"ERROR: Git repository is not clean. Running tests with --only-changes-to",
file=sys.stderr,
)
print(
" requires that the current Git repository is clean.",
file=sys.stderr,
)
return False
print(
"Detecting changed files between {} and HEAD.".format(
self.only_changes_to
)
)
changed_files = self._git(
"diff", "--name-only", self.only_changes_to, "HEAD"
)
changed_files = changed_files.strip().split("\n")
changed_files = [file for file in changed_files if file.strip()]
top_level_path = self._git("rev-parse", "--show-toplevel")
top_level_path = top_level_path.strip()
# Process changed files to discover what they are.
changed_files, tags, tests, full_suite = self.process_changed_files(
changed_files, top_level_path
)
print("Changed files:")
for filename, file_type in changed_files:
print(" {} ({})".format(filename, file_type))
if not changed_files:
print(" none")
print(
"No files have been changed, assuming target is HEAD, running full suite."
)
elif full_suite:
print(
"Non-test code or unknown files have been modified, running full test suite."
)
else:
# Run specific tests/tags.
print("Running with following partial tags: {}".format(", ".join(tags)))
print(
"Running with following partial tests: {}".format(", ".join(tests))
)
failed_tests = 0
# First run with specific tags. Since run_tests may modify self.parallel, we need to store
# it here and restore it later if we also run with specific test labels.
parallel = self.parallel
if tags:
self.tags = tags
failed_tests += super().run_tests(test_labels, **kwargs)
# Then run with specific test labels.
if tests:
self.parallel = parallel
self.tags = set()
failed_tests += super().run_tests(tests, **kwargs)
return failed_tests
return super().run_tests(test_labels, **kwargs)
def _git(self, *args):
"""Helper to run Git command."""
try:
return subprocess.check_output(["git"] + list(args)).decode("utf8").strip()
except subprocess.CalledProcessError:
raise CommandError("Git command failed.")
@contextlib.contextmanager
def git_branch(self, branch):
"""Temporarily switch to a different Git branch."""
current_branch = self._git("rev-parse", "--abbrev-ref", "HEAD")
if current_branch == "HEAD":
# Detached HEAD state, we need to get the actual commit.
current_branch = self._git("rev-parse", "HEAD")
if current_branch != branch:
self._git("checkout", branch)
try:
yield
finally:
if current_branch != branch:
self._git("checkout", current_branch)
def process_changed_files(self, changed_files, top_level_path):
"""Process changed files based on specified patterns.
:param changed_files: A list of changed file pats, relative to top-level path
:param top_level_path: Absolute path to top-level project directory
:return: Tuple (changed_files, tags, tests, full_suite)
"""
result = []
processes = []
tests = []
full_suite = False
types = []
if self.changes_file_types:
# Parse file types metadata.
try:
with open(self.changes_file_types, "r") as definition_file:
types = yaml.load(definition_file, Loader=yaml.FullLoader)
except (OSError, ValueError):
raise CommandError("Failed loading or parsing file types metadata.")
else:
print(
"WARNING: Treating all files as unknown because --changes-file-types option not specified.",
file=sys.stderr,
)
for filename in changed_files:
# Match file type.
file_type = "unknown"
file_type_name = "unknown"
for definition in types:
if re.search(definition["match"], filename):
file_type = definition["type"]
file_type_name = definition.get("name", file_type)
break
result.append((filename, file_type_name))
if file_type in ("unknown", "force_run"):
full_suite = True
elif file_type == "ignore":
# Ignore
pass
elif file_type == "process":
# Resolve process tag.
processes.append(os.path.join(top_level_path, filename))
elif file_type == "test":
# Generate test name.
tests.append(re.sub(r"\.py$", "", filename).replace(os.path.sep, "."))
else:
raise CommandError("Unsupported file type: {}".format(file_type))
# Resolve tags.
tags = self.resolve_process_tags(processes)
return result, tags, tests, full_suite
def find_schemas(self, schema_path):
"""Find process schemas.
:param schema_path: Path where to look for process schemas
:return: Found schemas
"""
schema_matches = []
for root, _, files in os.walk(schema_path):
for schema_file in [os.path.join(root, fn) for fn in files]:
if not schema_file.lower().endswith((".yml", ".yaml")):
continue
with open(schema_file) as fn:
schemas = yaml.load(fn, Loader=yaml.FullLoader)
if not schemas:
print(
"WARNING: Could not read YAML file {}".format(schema_file),
file=sys.stderr,
)
continue
for schema in schemas:
schema_matches.append(schema)
return schema_matches
def find_dependencies(self, schemas):
"""Compute process dependencies.
:param schemas: A list of all discovered process schemas
:return: Process dependency dictionary
"""
dependencies = {}
for schema in schemas:
slug = schema["slug"]
run = schema.get("run", {})
program = run.get("program", None)
language = run.get("language", None)
if language == "workflow":
for step in program:
dependencies.setdefault(step["run"], set()).add(slug)
elif language == "bash":
# Process re-spawn instructions to discover dependencies.
matches = SPAWN_PROCESS_REGEX.findall(program)
if matches:
for match in matches:
dependencies.setdefault(match, set()).add(slug)
return dependencies
def resolve_process_tags(self, files):
"""Resolve process tags.
:param files: List of changed process files
:return: Test tags that need to be run
"""
processes_paths = []
for finder in get_finders():
processes_paths.extend(finder.find_processes())
process_schemas = []
for proc_path in processes_paths:
process_schemas.extend(self.find_schemas(proc_path))
# Switch to source branch and get all the schemas from there as well, since some schemas
# might have been removed.
with self.git_branch(self.only_changes_to):
for proc_path in processes_paths:
process_schemas.extend(self.find_schemas(proc_path))
dependencies = self.find_dependencies(process_schemas)
processes = set()
def load_process_slugs(filename):
"""Add all process slugs from specified file."""
with open(filename, "r") as process_file:
data = yaml.load(process_file, Loader=yaml.FullLoader)
for process in data:
# Add all process slugs.
processes.add(process["slug"])
for filename in files:
try:
load_process_slugs(filename)
except FileNotFoundError:
# File was removed, so we will handle it below when we check the original branch.
pass
# Switch to source branch and check modified files there as well.
with self.git_branch(self.only_changes_to):
for filename in files:
try:
load_process_slugs(filename)
except FileNotFoundError:
# File was added, so it has already been handled.
pass
# Add all dependencies.
dep_processes = set()
while processes:
process = processes.pop()
if process in dep_processes:
continue
dep_processes.add(process)
processes.update(dependencies.get(process, set()))
tags = set()
for process in dep_processes:
tags.add(generate_process_tag(process))
return tags
def is_testing():
"""Return current testing status."""
return TESTING_CONTEXT["is_testing"]
|
py | b4021df631d8b37efa42aa8b32c73b6de36ffa8c | """
Masked arrays add-ons.
A collection of utilities for `numpy.ma`.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $
"""
from __future__ import division, absolute_import, print_function
__all__ = [
'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d',
'atleast_3d', 'average', 'clump_masked', 'clump_unmasked',
'column_stack', 'compress_cols', 'compress_nd', 'compress_rowcols',
'compress_rows', 'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot',
'dstack', 'ediff1d', 'flatnotmasked_contiguous', 'flatnotmasked_edges',
'hsplit', 'hstack', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols',
'mask_rows', 'masked_all', 'masked_all_like', 'median', 'mr_',
'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack',
'setdiff1d', 'setxor1d', 'unique', 'union1d', 'vander', 'vstack',
]
import itertools
import warnings
from . import core as ma
from .core import (
MaskedArray, MAError, add, array, asarray, concatenate, filled,
getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or,
nomask, ones, sort, zeros, getdata
)
import numpy as np
from numpy import ndarray, array as nxarray
import numpy.core.umath as umath
from numpy.lib.index_tricks import AxisConcatenator
def issequence(seq):
"""
Is seq a sequence (ndarray, list or tuple)?
"""
if isinstance(seq, (ndarray, tuple, list)):
return True
return False
def count_masked(arr, axis=None):
"""
Count the number of masked elements along the given axis.
Parameters
----------
arr : array_like
An array with (possibly) masked elements.
axis : int, optional
Axis along which to count. If None (default), a flattened
version of the array is used.
Returns
-------
count : int, ndarray
The total number of masked elements (axis=None) or the number
of masked elements along each slice of the given axis.
See Also
--------
MaskedArray.count : Count non-masked elements.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(9).reshape((3,3))
>>> a = ma.array(a)
>>> a[1, 0] = ma.masked
>>> a[1, 2] = ma.masked
>>> a[2, 1] = ma.masked
>>> a
masked_array(data =
[[0 1 2]
[-- 4 --]
[6 -- 8]],
mask =
[[False False False]
[ True False True]
[False True False]],
fill_value=999999)
>>> ma.count_masked(a)
3
When the `axis` keyword is used an array is returned.
>>> ma.count_masked(a, axis=0)
array([1, 1, 1])
>>> ma.count_masked(a, axis=1)
array([0, 2, 1])
"""
m = getmaskarray(arr)
return m.sum(axis)
def masked_all(shape, dtype=float):
"""
Empty masked array with all elements masked.
Return an empty masked array of the given shape and dtype, where all the
data are masked.
Parameters
----------
shape : tuple
Shape of the required MaskedArray.
dtype : dtype, optional
Data type of the output.
Returns
-------
a : MaskedArray
A masked array with all data masked.
See Also
--------
masked_all_like : Empty masked array modelled on an existing array.
Examples
--------
>>> import numpy.ma as ma
>>> ma.masked_all((3, 3))
masked_array(data =
[[-- -- --]
[-- -- --]
[-- -- --]],
mask =
[[ True True True]
[ True True True]
[ True True True]],
fill_value=1e+20)
The `dtype` parameter defines the underlying data type.
>>> a = ma.masked_all((3, 3))
>>> a.dtype
dtype('float64')
>>> a = ma.masked_all((3, 3), dtype=np.int32)
>>> a.dtype
dtype('int32')
"""
a = masked_array(np.empty(shape, dtype),
mask=np.ones(shape, make_mask_descr(dtype)))
return a
def masked_all_like(arr):
"""
Empty masked array with the properties of an existing array.
Return an empty masked array of the same shape and dtype as
the array `arr`, where all the data are masked.
Parameters
----------
arr : ndarray
An array describing the shape and dtype of the required MaskedArray.
Returns
-------
a : MaskedArray
A masked array with all data masked.
Raises
------
AttributeError
If `arr` doesn't have a shape attribute (i.e. not an ndarray)
See Also
--------
masked_all : Empty masked array with all elements masked.
Examples
--------
>>> import numpy.ma as ma
>>> arr = np.zeros((2, 3), dtype=np.float32)
>>> arr
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> ma.masked_all_like(arr)
masked_array(data =
[[-- -- --]
[-- -- --]],
mask =
[[ True True True]
[ True True True]],
fill_value=1e+20)
The dtype of the masked array matches the dtype of `arr`.
>>> arr.dtype
dtype('float32')
>>> ma.masked_all_like(arr).dtype
dtype('float32')
"""
a = np.empty_like(arr).view(MaskedArray)
a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype))
return a
#####--------------------------------------------------------------------------
#---- --- Standard functions ---
#####--------------------------------------------------------------------------
class _fromnxfunction:
"""
Defines a wrapper to adapt NumPy functions to masked arrays.
An instance of `_fromnxfunction` can be called with the same parameters
as the wrapped NumPy function. The docstring of `newfunc` is adapted from
the wrapped function as well, see `getdoc`.
Parameters
----------
funcname : str
The name of the function to be adapted. The function should be
in the NumPy namespace (i.e. ``np.funcname``).
"""
def __init__(self, funcname):
self.__name__ = funcname
self.__doc__ = self.getdoc()
def getdoc(self):
"""
Retrieve the docstring and signature from the function.
The ``__doc__`` attribute of the function is used as the docstring for
the new masked array version of the function. A note on application
of the function to the mask is appended.
.. warning::
If the function docstring already contained a Notes section, the
new docstring will have two Notes sections instead of appending a note
to the existing section.
Parameters
----------
None
"""
npfunc = getattr(np, self.__name__, None)
doc = getattr(npfunc, '__doc__', None)
if doc:
sig = self.__name__ + ma.get_object_signature(npfunc)
locdoc = "Notes\n-----\nThe function is applied to both the _data"\
" and the _mask, if any."
return '\n'.join((sig, doc, locdoc))
return
def __call__(self, *args, **params):
func = getattr(np, self.__name__)
if len(args) == 1:
x = args[0]
if isinstance(x, ndarray):
_d = func(x.__array__(), **params)
_m = func(getmaskarray(x), **params)
return masked_array(_d, mask=_m)
elif isinstance(x, tuple) or isinstance(x, list):
_d = func(tuple([np.asarray(a) for a in x]), **params)
_m = func(tuple([getmaskarray(a) for a in x]), **params)
return masked_array(_d, mask=_m)
else:
arrays = []
args = list(args)
while len(args) > 0 and issequence(args[0]):
arrays.append(args.pop(0))
res = []
for x in arrays:
_d = func(np.asarray(x), *args, **params)
_m = func(getmaskarray(x), *args, **params)
res.append(masked_array(_d, mask=_m))
return res
atleast_1d = _fromnxfunction('atleast_1d')
atleast_2d = _fromnxfunction('atleast_2d')
atleast_3d = _fromnxfunction('atleast_3d')
#atleast_1d = np.atleast_1d
#atleast_2d = np.atleast_2d
#atleast_3d = np.atleast_3d
vstack = row_stack = _fromnxfunction('vstack')
hstack = _fromnxfunction('hstack')
column_stack = _fromnxfunction('column_stack')
dstack = _fromnxfunction('dstack')
hsplit = _fromnxfunction('hsplit')
diagflat = _fromnxfunction('diagflat')
#####--------------------------------------------------------------------------
#----
#####--------------------------------------------------------------------------
def flatten_inplace(seq):
"""Flatten a sequence in place."""
k = 0
while (k != len(seq)):
while hasattr(seq[k], '__iter__'):
seq[k:(k + 1)] = seq[k]
k += 1
return seq
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
(This docstring should be overwritten)
"""
arr = array(arr, copy=False, subok=True)
nd = arr.ndim
if axis < 0:
axis += nd
if (axis >= nd):
raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d."
% (axis, nd))
ind = [0] * (nd - 1)
i = np.zeros(nd, 'O')
indlist = list(range(nd))
indlist.remove(axis)
i[axis] = slice(None, None)
outshape = np.asarray(arr.shape).take(indlist)
i.put(indlist, ind)
j = i.copy()
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
# if res is a number, then we have a smaller output array
asscalar = np.isscalar(res)
if not asscalar:
try:
len(res)
except TypeError:
asscalar = True
# Note: we shouldn't set the dtype of the output from the first result
# so we force the type to object, and build a list of dtypes. We'll
# just take the largest, to avoid some downcasting
dtypes = []
if asscalar:
dtypes.append(np.asarray(res).dtype)
outarr = zeros(outshape, object)
outarr[tuple(ind)] = res
Ntot = np.product(outshape)
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= outshape[n]) and (n > (1 - nd)):
ind[n - 1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(ind)] = res
dtypes.append(asarray(res).dtype)
k += 1
else:
res = array(res, copy=False, subok=True)
j = i.copy()
j[axis] = ([slice(None, None)] * res.ndim)
j.put(indlist, ind)
Ntot = np.product(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = res.shape
dtypes.append(asarray(res).dtype)
outshape = flatten_inplace(outshape)
outarr = zeros(outshape, object)
outarr[tuple(flatten_inplace(j.tolist()))] = res
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= holdshape[n]) and (n > (1 - nd)):
ind[n - 1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
j.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(flatten_inplace(j.tolist()))] = res
dtypes.append(asarray(res).dtype)
k += 1
max_dtypes = np.dtype(np.asarray(dtypes).max())
if not hasattr(arr, '_mask'):
result = np.asarray(outarr, dtype=max_dtypes)
else:
result = asarray(outarr, dtype=max_dtypes)
result.fill_value = ma.default_fill_value(result)
return result
apply_along_axis.__doc__ = np.apply_along_axis.__doc__
def apply_over_axes(func, a, axes):
"""
(This docstring will be overwritten)
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = ma.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
if apply_over_axes.__doc__ is not None:
apply_over_axes.__doc__ = np.apply_over_axes.__doc__[
:np.apply_over_axes.__doc__.find('Notes')].rstrip() + \
"""
Examples
--------
>>> a = ma.arange(24).reshape(2,3,4)
>>> a[:,0,1] = ma.masked
>>> a[:,1,:] = ma.masked
>>> print a
[[[0 -- 2 3]
[-- -- -- --]
[8 9 10 11]]
[[12 -- 14 15]
[-- -- -- --]
[20 21 22 23]]]
>>> print ma.apply_over_axes(ma.sum, a, [0,2])
[[[46]
[--]
[124]]]
Tuple axis arguments to ufuncs are equivalent:
>>> print ma.sum(a, axis=(0,2)).reshape((1,-1,1))
[[[46]
[--]
[124]]]
"""
def average(a, axis=None, weights=None, returned=False):
"""
Return the weighted average of array over the given axis.
Parameters
----------
a : array_like
Data to be averaged.
Masked entries are not taken into account in the computation.
axis : int, optional
Axis along which the average is computed. The default is to compute
the average of the flattened array.
weights : array_like, optional
The importance that each element has in the computation of the average.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If ``weights=None``, then all data in `a` are assumed to have a
weight equal to one. If `weights` is complex, the imaginary parts
are ignored.
returned : bool, optional
Flag indicating whether a tuple ``(result, sum of weights)``
should be returned as output (True), or just the result (False).
Default is False.
Returns
-------
average, [sum_of_weights] : (tuple of) scalar or MaskedArray
The average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `np.float64`
if `a` is of integer type and floats smaller than `float64`, or the
input data-type, otherwise. If returned, `sum_of_weights` is always
`float64`.
Examples
--------
>>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True])
>>> np.ma.average(a, weights=[3, 1, 0, 0])
1.25
>>> x = np.ma.arange(6.).reshape(3, 2)
>>> print x
[[ 0. 1.]
[ 2. 3.]
[ 4. 5.]]
>>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3],
... returned=True)
>>> print avg
[2.66666666667 3.66666666667]
"""
a = asarray(a)
mask = a.mask
ash = a.shape
if ash == ():
ash = (1,)
if axis is None:
if mask is nomask:
if weights is None:
n = a.sum(axis=None)
d = float(a.size)
else:
w = filled(weights, 0.0).ravel()
n = umath.add.reduce(a._data.ravel() * w)
d = umath.add.reduce(w)
del w
else:
if weights is None:
n = a.filled(0).sum(axis=None)
d = float(umath.add.reduce((~mask).ravel()))
else:
w = array(filled(weights, 0.0), float, mask=mask).ravel()
n = add.reduce(a.ravel() * w)
d = add.reduce(w)
del w
else:
if mask is nomask:
if weights is None:
d = ash[axis] * 1.0
n = add.reduce(a._data, axis)
else:
w = filled(weights, 0.0)
wsh = w.shape
if wsh == ():
wsh = (1,)
if wsh == ash:
w = np.array(w, float, copy=0)
n = add.reduce(a * w, axis)
d = add.reduce(w, axis)
del w
elif wsh == (ash[axis],):
r = [None] * len(ash)
r[axis] = slice(None, None, 1)
w = eval("w[" + repr(tuple(r)) + "] * ones(ash, float)")
n = add.reduce(a * w, axis)
d = add.reduce(w, axis, dtype=float)
del w, r
else:
raise ValueError('average: weights wrong shape.')
else:
if weights is None:
n = add.reduce(a, axis)
d = umath.add.reduce((~mask), axis=axis, dtype=float)
else:
w = filled(weights, 0.0)
wsh = w.shape
if wsh == ():
wsh = (1,)
if wsh == ash:
w = array(w, dtype=float, mask=mask, copy=0)
n = add.reduce(a * w, axis)
d = add.reduce(w, axis, dtype=float)
elif wsh == (ash[axis],):
r = [None] * len(ash)
r[axis] = slice(None, None, 1)
w = eval("w[" + repr(tuple(r)) +
"] * masked_array(ones(ash, float), mask)")
n = add.reduce(a * w, axis)
d = add.reduce(w, axis, dtype=float)
else:
raise ValueError('average: weights wrong shape.')
del w
if n is masked or d is masked:
return masked
result = n / d
del n
if isinstance(result, MaskedArray):
if ((axis is None) or (axis == 0 and a.ndim == 1)) and \
(result.mask is nomask):
result = result._data
if returned:
if not isinstance(d, MaskedArray):
d = masked_array(d)
if isinstance(d, ndarray) and (not d.shape == result.shape):
d = ones(result.shape, dtype=float) * d
if returned:
return result, d
else:
return result
def median(a, axis=None, out=None, overwrite_input=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which the medians are computed. The default (None) is
to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True, and the input
is not already an `ndarray`, an error will be raised.
Returns
-------
median : ndarray
A new array holding the result is returned unless out is
specified, in which case a reference to out is returned.
Return data-type is `float64` for integers and floats smaller than
`float64`, or the input data-type, otherwise.
See Also
--------
mean
Notes
-----
Given a vector ``V`` with ``N`` non masked values, the median of ``V``
is the middle value of a sorted copy of ``V`` (``Vs``) - i.e.
``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2``
when ``N`` is even.
Examples
--------
>>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4)
>>> np.ma.median(x)
1.5
>>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4)
>>> np.ma.median(x)
2.5
>>> np.ma.median(x, axis=-1, overwrite_input=True)
masked_array(data = [ 2. 5.],
mask = False,
fill_value = 1e+20)
"""
if not hasattr(a, 'mask') or np.count_nonzero(a.mask) == 0:
return masked_array(np.median(getdata(a, subok=True), axis=axis,
out=out, overwrite_input=overwrite_input), copy=False)
if overwrite_input:
if axis is None:
asorted = a.ravel()
asorted.sort()
else:
a.sort(axis=axis)
asorted = a
else:
asorted = sort(a, axis=axis)
if axis is None:
axis = 0
elif axis < 0:
axis += a.ndim
counts = asorted.shape[axis] - (asorted.mask).sum(axis=axis)
h = counts // 2
# create indexing mesh grid for all but reduced axis
axes_grid = [np.arange(x) for i, x in enumerate(asorted.shape)
if i != axis]
ind = np.meshgrid(*axes_grid, sparse=True, indexing='ij')
# insert indices of low and high median
ind.insert(axis, h - 1)
low = asorted[ind]
ind[axis] = h
high = asorted[ind]
# duplicate high if odd number of elements so mean does nothing
odd = counts % 2 == 1
if asorted.ndim == 1:
if odd:
low = high
else:
low[odd] = high[odd]
if np.issubdtype(asorted.dtype, np.inexact):
# avoid inf / x = masked
s = np.ma.sum([low, high], axis=0, out=out)
np.true_divide(s.data, 2., casting='unsafe', out=s.data)
else:
s = np.ma.mean([low, high], axis=0, out=out)
return s
def compress_nd(x, axis=None):
"""Supress slices from multiple dimensions which contain masked values.
Parameters
----------
x : array_like, MaskedArray
The array to operate on. If not a MaskedArray instance (or if no array
elements are masked, `x` is interpreted as a MaskedArray with `mask`
set to `nomask`.
axis : tuple of ints or int, optional
Which dimensions to supress slices from can be configured with this
parameter.
- If axis is a tuple of ints, those are the axes to supress slices from.
- If axis is an int, then that is the only axis to supress slices from.
- If axis is None, all axis are selected.
Returns
-------
compress_array : ndarray
The compressed array.
"""
x = asarray(x)
m = getmask(x)
# Set axis to tuple of ints
if isinstance(axis, int):
axis = (axis,)
elif axis is None:
axis = tuple(range(x.ndim))
elif not isinstance(axis, tuple):
raise ValueError('Invalid type for axis argument')
# Check axis input
axis = [ax + x.ndim if ax < 0 else ax for ax in axis]
if not all(0 <= ax < x.ndim for ax in axis):
raise ValueError("'axis' entry is out of bounds")
if len(axis) != len(set(axis)):
raise ValueError("duplicate value in 'axis'")
# Nothing is masked: return x
if m is nomask or not m.any():
return x._data
# All is masked: return empty
if m.all():
return nxarray([])
# Filter elements through boolean indexing
data = x._data
for ax in axis:
axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim)))
data = data[(slice(None),)*ax + (~m.any(axis=axes),)]
return data
def compress_rowcols(x, axis=None):
"""
Suppress the rows and/or columns of a 2-D array that contain
masked values.
The suppression behavior is selected with the `axis` parameter.
- If axis is None, both rows and columns are suppressed.
- If axis is 0, only rows are suppressed.
- If axis is 1 or -1, only columns are suppressed.
Parameters
----------
x : array_like, MaskedArray
The array to operate on. If not a MaskedArray instance (or if no array
elements are masked), `x` is interpreted as a MaskedArray with
`mask` set to `nomask`. Must be a 2D array.
axis : int, optional
Axis along which to perform the operation. Default is None.
Returns
-------
compressed_array : ndarray
The compressed array.
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> x
masked_array(data =
[[-- 1 2]
[-- 4 5]
[6 7 8]],
mask =
[[ True False False]
[ True False False]
[False False False]],
fill_value = 999999)
>>> np.ma.compress_rowcols(x)
array([[7, 8]])
>>> np.ma.compress_rowcols(x, 0)
array([[6, 7, 8]])
>>> np.ma.compress_rowcols(x, 1)
array([[1, 2],
[4, 5],
[7, 8]])
"""
if asarray(x).ndim != 2:
raise NotImplementedError("compress_rowcols works for 2D arrays only.")
return compress_nd(x, axis=axis)
def compress_rows(a):
"""
Suppress whole rows of a 2-D array that contain masked values.
This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see
`extras.compress_rowcols` for details.
See Also
--------
extras.compress_rowcols
"""
a = asarray(a)
if a.ndim != 2:
raise NotImplementedError("compress_rows works for 2D arrays only.")
return compress_rowcols(a, 0)
def compress_cols(a):
"""
Suppress whole columns of a 2-D array that contain masked values.
This is equivalent to ``np.ma.compress_rowcols(a, 1)``, see
`extras.compress_rowcols` for details.
See Also
--------
extras.compress_rowcols
"""
a = asarray(a)
if a.ndim != 2:
raise NotImplementedError("compress_cols works for 2D arrays only.")
return compress_rowcols(a, 1)
def mask_rowcols(a, axis=None):
"""
Mask rows and/or columns of a 2D array that contain masked values.
Mask whole rows and/or columns of a 2D array that contain
masked values. The masking behavior is selected using the
`axis` parameter.
- If `axis` is None, rows *and* columns are masked.
- If `axis` is 0, only rows are masked.
- If `axis` is 1 or -1, only columns are masked.
Parameters
----------
a : array_like, MaskedArray
The array to mask. If not a MaskedArray instance (or if no array
elements are masked). The result is a MaskedArray with `mask` set
to `nomask` (False). Must be a 2D array.
axis : int, optional
Axis along which to perform the operation. If None, applies to a
flattened version of the array.
Returns
-------
a : MaskedArray
A modified version of the input array, masked depending on the value
of the `axis` parameter.
Raises
------
NotImplementedError
If input array `a` is not 2D.
See Also
--------
mask_rows : Mask rows of a 2D array that contain masked values.
mask_cols : Mask cols of a 2D array that contain masked values.
masked_where : Mask where a condition is met.
Notes
-----
The input array's mask is modified by this function.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=np.int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(data =
[[0 0 0]
[0 -- 0]
[0 0 0]],
mask =
[[False False False]
[False True False]
[False False False]],
fill_value=999999)
>>> ma.mask_rowcols(a)
masked_array(data =
[[0 -- 0]
[-- -- --]
[0 -- 0]],
mask =
[[False True False]
[ True True True]
[False True False]],
fill_value=999999)
"""
a = array(a, subok=False)
if a.ndim != 2:
raise NotImplementedError("mask_rowcols works for 2D arrays only.")
m = getmask(a)
# Nothing is masked: return a
if m is nomask or not m.any():
return a
maskedval = m.nonzero()
a._mask = a._mask.copy()
if not axis:
a[np.unique(maskedval[0])] = masked
if axis in [None, 1, -1]:
a[:, np.unique(maskedval[1])] = masked
return a
def mask_rows(a, axis=None):
"""
Mask rows of a 2D array that contain masked values.
This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0.
See Also
--------
mask_rowcols : Mask rows and/or columns of a 2D array.
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=np.int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(data =
[[0 0 0]
[0 -- 0]
[0 0 0]],
mask =
[[False False False]
[False True False]
[False False False]],
fill_value=999999)
>>> ma.mask_rows(a)
masked_array(data =
[[0 0 0]
[-- -- --]
[0 0 0]],
mask =
[[False False False]
[ True True True]
[False False False]],
fill_value=999999)
"""
return mask_rowcols(a, 0)
def mask_cols(a, axis=None):
"""
Mask columns of a 2D array that contain masked values.
This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1.
See Also
--------
mask_rowcols : Mask rows and/or columns of a 2D array.
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=np.int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(data =
[[0 0 0]
[0 -- 0]
[0 0 0]],
mask =
[[False False False]
[False True False]
[False False False]],
fill_value=999999)
>>> ma.mask_cols(a)
masked_array(data =
[[0 -- 0]
[0 -- 0]
[0 -- 0]],
mask =
[[False True False]
[False True False]
[False True False]],
fill_value=999999)
"""
return mask_rowcols(a, 1)
def dot(a, b, strict=False):
"""
Return the dot product of two arrays.
.. note::
Works only with 2-D arrays at the moment.
This function is the equivalent of `numpy.dot` that takes masked values
into account, see `numpy.dot` for details.
Parameters
----------
a, b : ndarray
Inputs arrays.
strict : bool, optional
Whether masked data are propagated (True) or set to 0 (False) for the
computation. Default is False.
Propagating the mask means that if a masked value appears in a row or
column, the whole row or column is considered masked.
See Also
--------
numpy.dot : Equivalent function for ndarrays.
Examples
--------
>>> a = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]])
>>> b = ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]])
>>> np.ma.dot(a, b)
masked_array(data =
[[21 26]
[45 64]],
mask =
[[False False]
[False False]],
fill_value = 999999)
>>> np.ma.dot(a, b, strict=True)
masked_array(data =
[[-- --]
[-- 64]],
mask =
[[ True True]
[ True False]],
fill_value = 999999)
"""
#!!!: Works only with 2D arrays. There should be a way to get it to run with higher dimension
if strict and (a.ndim == 2) and (b.ndim == 2):
a = mask_rows(a)
b = mask_cols(b)
return a.dot(b)
#####--------------------------------------------------------------------------
#---- --- arraysetops ---
#####--------------------------------------------------------------------------
def ediff1d(arr, to_end=None, to_begin=None):
"""
Compute the differences between consecutive elements of an array.
This function is the equivalent of `numpy.ediff1d` that takes masked
values into account, see `numpy.ediff1d` for details.
See Also
--------
numpy.ediff1d : Equivalent function for ndarrays.
"""
arr = ma.asanyarray(arr).flat
ed = arr[1:] - arr[:-1]
arrays = [ed]
#
if to_begin is not None:
arrays.insert(0, to_begin)
if to_end is not None:
arrays.append(to_end)
#
if len(arrays) != 1:
# We'll save ourselves a copy of a potentially large array in the common
# case where neither to_begin or to_end was given.
ed = hstack(arrays)
#
return ed
def unique(ar1, return_index=False, return_inverse=False):
"""
Finds the unique elements of an array.
Masked values are considered the same element (masked). The output array
is always a masked array. See `numpy.unique` for more details.
See Also
--------
numpy.unique : Equivalent function for ndarrays.
"""
output = np.unique(ar1,
return_index=return_index,
return_inverse=return_inverse)
if isinstance(output, tuple):
output = list(output)
output[0] = output[0].view(MaskedArray)
output = tuple(output)
else:
output = output.view(MaskedArray)
return output
def intersect1d(ar1, ar2, assume_unique=False):
"""
Returns the unique elements common to both arrays.
Masked values are considered equal one to the other.
The output is always a masked array.
See `numpy.intersect1d` for more details.
See Also
--------
numpy.intersect1d : Equivalent function for ndarrays.
Examples
--------
>>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1])
>>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1])
>>> intersect1d(x, y)
masked_array(data = [1 3 --],
mask = [False False True],
fill_value = 999999)
"""
if assume_unique:
aux = ma.concatenate((ar1, ar2))
else:
# Might be faster than unique( intersect1d( ar1, ar2 ) )?
aux = ma.concatenate((unique(ar1), unique(ar2)))
aux.sort()
return aux[:-1][aux[1:] == aux[:-1]]
def setxor1d(ar1, ar2, assume_unique=False):
"""
Set exclusive-or of 1-D arrays with unique elements.
The output is always a masked array. See `numpy.setxor1d` for more details.
See Also
--------
numpy.setxor1d : Equivalent function for ndarrays.
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = ma.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
auxf = aux.filled()
# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True]))
# flag2 = ediff1d( flag ) == 0
flag2 = (flag[1:] == flag[:-1])
return aux[flag2]
def in1d(ar1, ar2, assume_unique=False, invert=False):
"""
Test whether each element of an array is also present in a second
array.
The output is always a masked array. See `numpy.in1d` for more details.
See Also
--------
numpy.in1d : Equivalent function for ndarrays.
Notes
-----
.. versionadded:: 1.4.0
"""
if not assume_unique:
ar1, rev_idx = unique(ar1, return_inverse=True)
ar2 = unique(ar2)
ar = ma.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = ma.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
def union1d(ar1, ar2):
"""
Union of two arrays.
The output is always a masked array. See `numpy.union1d` for more details.
See also
--------
numpy.union1d : Equivalent function for ndarrays.
"""
return unique(ma.concatenate((ar1, ar2)))
def setdiff1d(ar1, ar2, assume_unique=False):
"""
Set difference of 1D arrays with unique elements.
The output is always a masked array. See `numpy.setdiff1d` for more
details.
See Also
--------
numpy.setdiff1d : Equivalent function for ndarrays.
Examples
--------
>>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1])
>>> np.ma.setdiff1d(x, [1, 2])
masked_array(data = [3 --],
mask = [False True],
fill_value = 999999)
"""
if assume_unique:
ar1 = ma.asarray(ar1).ravel()
else:
ar1 = unique(ar1)
ar2 = unique(ar2)
return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
###############################################################################
# Covariance #
###############################################################################
def _covhelper(x, y=None, rowvar=True, allow_masked=True):
"""
Private function for the computation of covariance and correlation
coefficients.
"""
x = ma.array(x, ndmin=2, copy=True, dtype=float)
xmask = ma.getmaskarray(x)
# Quick exit if we can't process masked data
if not allow_masked and xmask.any():
raise ValueError("Cannot process masked data.")
#
if x.shape[0] == 1:
rowvar = True
# Make sure that rowvar is either 0 or 1
rowvar = int(bool(rowvar))
axis = 1 - rowvar
if rowvar:
tup = (slice(None), None)
else:
tup = (None, slice(None))
#
if y is None:
xnotmask = np.logical_not(xmask).astype(int)
else:
y = array(y, copy=False, ndmin=2, dtype=float)
ymask = ma.getmaskarray(y)
if not allow_masked and ymask.any():
raise ValueError("Cannot process masked data.")
if xmask.any() or ymask.any():
if y.shape == x.shape:
# Define some common mask
common_mask = np.logical_or(xmask, ymask)
if common_mask is not nomask:
x.unshare_mask()
y.unshare_mask()
xmask = x._mask = y._mask = ymask = common_mask
x = ma.concatenate((x, y), axis)
xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int)
x -= x.mean(axis=rowvar)[tup]
return (x, xnotmask, rowvar)
def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None):
"""
Estimate the covariance matrix.
Except for the handling of missing data this function does the same as
`numpy.cov`. For more details and examples, see `numpy.cov`.
By default, masked values are recognized as such. If `x` and `y` have the
same shape, a common mask is allocated: if ``x[i,j]`` is masked, then
``y[i,j]`` will also be masked.
Setting `allow_masked` to False will raise an exception if values are
missing in either of the input arrays.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
form as `x`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : bool, optional
Default normalization (False) is by ``(N-1)``, where ``N`` is the
number of observations given (unbiased estimate). If `bias` is True,
then normalization is by ``N``. This keyword can be overridden by
the keyword ``ddof`` in numpy versions >= 1.5.
allow_masked : bool, optional
If True, masked values are propagated pair-wise: if a value is masked
in `x`, the corresponding value is masked in `y`.
If False, raises a `ValueError` exception when some values are missing.
ddof : {None, int}, optional
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
.. versionadded:: 1.5
Raises
------
ValueError
Raised if some values are missing and `allow_masked` is False.
See Also
--------
numpy.cov
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be an integer")
# Set up ddof
if ddof is None:
if bias:
ddof = 0
else:
ddof = 1
(x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
if not rowvar:
fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof
result = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
else:
fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof
result = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
return result
def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True,
ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Except for the handling of missing data this function does the same as
`numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
allow_masked : bool, optional
If True, masked values are propagated pair-wise: if a value is masked
in `x`, the corresponding value is masked in `y`.
If False, raises an exception. Because `bias` is deprecated, this
argument needs to be treated as keyword only to avoid a warning.
ddof : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
See Also
--------
numpy.corrcoef : Equivalent function in top-level NumPy module.
cov : Estimate the covariance matrix.
Notes
-----
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
msg = 'bias and ddof have no effect and are deprecated'
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn(msg, DeprecationWarning)
# Get the data
(x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
# Compute the covariance matrix
if not rowvar:
fact = np.dot(xnotmask.T, xnotmask) * 1.
c = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
else:
fact = np.dot(xnotmask, xnotmask.T) * 1.
c = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
# Check whether we have a scalar
try:
diag = ma.diagonal(c)
except ValueError:
return 1
#
if xnotmask.all():
_denom = ma.sqrt(ma.multiply.outer(diag, diag))
else:
_denom = diagflat(diag)
n = x.shape[1 - rowvar]
if rowvar:
for i in range(n - 1):
for j in range(i + 1, n):
_x = mask_cols(vstack((x[i], x[j]))).var(axis=1)
_denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
else:
for i in range(n - 1):
for j in range(i + 1, n):
_x = mask_cols(
vstack((x[:, i], x[:, j]))).var(axis=1)
_denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
return c / _denom
#####--------------------------------------------------------------------------
#---- --- Concatenation helpers ---
#####--------------------------------------------------------------------------
class MAxisConcatenator(AxisConcatenator):
"""
Translate slice objects to concatenation along an axis.
For documentation on usage, see `mr_class`.
See Also
--------
mr_class
"""
def __init__(self, axis=0):
AxisConcatenator.__init__(self, axis, matrix=False)
def __getitem__(self, key):
if isinstance(key, str):
raise MAError("Unavailable for masked array.")
if not isinstance(key, tuple):
key = (key,)
objs = []
scalars = []
final_dtypedescr = None
for k in range(len(key)):
scalar = False
if isinstance(key[k], slice):
step = key[k].step
start = key[k].start
stop = key[k].stop
if start is None:
start = 0
if step is None:
step = 1
if isinstance(step, complex):
size = int(abs(step))
newobj = np.linspace(start, stop, num=size)
else:
newobj = np.arange(start, stop, step)
elif isinstance(key[k], str):
if (key[k] in 'rc'):
self.matrix = True
self.col = (key[k] == 'c')
continue
try:
self.axis = int(key[k])
continue
except (ValueError, TypeError):
raise ValueError("Unknown special directive")
elif type(key[k]) in np.ScalarType:
newobj = asarray([key[k]])
scalars.append(k)
scalar = True
else:
newobj = key[k]
objs.append(newobj)
if isinstance(newobj, ndarray) and not scalar:
if final_dtypedescr is None:
final_dtypedescr = newobj.dtype
elif newobj.dtype > final_dtypedescr:
final_dtypedescr = newobj.dtype
if final_dtypedescr is not None:
for k in scalars:
objs[k] = objs[k].astype(final_dtypedescr)
res = concatenate(tuple(objs), axis=self.axis)
return self._retval(res)
class mr_class(MAxisConcatenator):
"""
Translate slice objects to concatenation along the first axis.
This is the masked array version of `lib.index_tricks.RClass`.
See Also
--------
lib.index_tricks.RClass
Examples
--------
>>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])]
array([1, 2, 3, 0, 0, 4, 5, 6])
"""
def __init__(self):
MAxisConcatenator.__init__(self, 0)
mr_ = mr_class()
#####--------------------------------------------------------------------------
#---- Find unmasked data ---
#####--------------------------------------------------------------------------
def flatnotmasked_edges(a):
"""
Find the indices of the first and last unmasked values.
Expects a 1-D `MaskedArray`, returns None if all values are masked.
Parameters
----------
a : array_like
Input 1-D `MaskedArray`
Returns
-------
edges : ndarray or None
The indices of first and last non-masked value in the array.
Returns None if all values are masked.
See Also
--------
flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges,
clump_masked, clump_unmasked
Notes
-----
Only accepts 1-D arrays.
Examples
--------
>>> a = np.ma.arange(10)
>>> flatnotmasked_edges(a)
[0,-1]
>>> mask = (a < 3) | (a > 8) | (a == 5)
>>> a[mask] = np.ma.masked
>>> np.array(a[~a.mask])
array([3, 4, 6, 7, 8])
>>> flatnotmasked_edges(a)
array([3, 8])
>>> a[:] = np.ma.masked
>>> print flatnotmasked_edges(ma)
None
"""
m = getmask(a)
if m is nomask or not np.any(m):
return np.array([0, a.size - 1])
unmasked = np.flatnonzero(~m)
if len(unmasked) > 0:
return unmasked[[0, -1]]
else:
return None
def notmasked_edges(a, axis=None):
"""
Find the indices of the first and last unmasked values along an axis.
If all values are masked, return None. Otherwise, return a list
of two tuples, corresponding to the indices of the first and last
unmasked values respectively.
Parameters
----------
a : array_like
The input array.
axis : int, optional
Axis along which to perform the operation.
If None (default), applies to a flattened version of the array.
Returns
-------
edges : ndarray or list
An array of start and end indexes if there are any masked data in
the array. If there are no masked data in the array, `edges` is a
list of the first and last index.
See Also
--------
flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous,
clump_masked, clump_unmasked
Examples
--------
>>> a = np.arange(9).reshape((3, 3))
>>> m = np.zeros_like(a)
>>> m[1:, 1:] = 1
>>> am = np.ma.array(a, mask=m)
>>> np.array(am[~am.mask])
array([0, 1, 2, 3, 6])
>>> np.ma.notmasked_edges(ma)
array([0, 6])
"""
a = asarray(a)
if axis is None or a.ndim == 1:
return flatnotmasked_edges(a)
m = getmaskarray(a)
idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim))
return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]),
tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ]
def flatnotmasked_contiguous(a):
"""
Find contiguous unmasked data in a masked array along the given axis.
Parameters
----------
a : narray
The input array.
Returns
-------
slice_list : list
A sorted sequence of slices (start index, end index).
See Also
--------
flatnotmasked_edges, notmasked_contiguous, notmasked_edges,
clump_masked, clump_unmasked
Notes
-----
Only accepts 2-D arrays at most.
Examples
--------
>>> a = np.ma.arange(10)
>>> np.ma.flatnotmasked_contiguous(a)
slice(0, 10, None)
>>> mask = (a < 3) | (a > 8) | (a == 5)
>>> a[mask] = np.ma.masked
>>> np.array(a[~a.mask])
array([3, 4, 6, 7, 8])
>>> np.ma.flatnotmasked_contiguous(a)
[slice(3, 5, None), slice(6, 9, None)]
>>> a[:] = np.ma.masked
>>> print np.ma.flatnotmasked_edges(a)
None
"""
m = getmask(a)
if m is nomask:
return slice(0, a.size, None)
i = 0
result = []
for (k, g) in itertools.groupby(m.ravel()):
n = len(list(g))
if not k:
result.append(slice(i, i + n))
i += n
return result or None
def notmasked_contiguous(a, axis=None):
"""
Find contiguous unmasked data in a masked array along the given axis.
Parameters
----------
a : array_like
The input array.
axis : int, optional
Axis along which to perform the operation.
If None (default), applies to a flattened version of the array.
Returns
-------
endpoints : list
A list of slices (start and end indexes) of unmasked indexes
in the array.
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges,
clump_masked, clump_unmasked
Notes
-----
Only accepts 2-D arrays at most.
Examples
--------
>>> a = np.arange(9).reshape((3, 3))
>>> mask = np.zeros_like(a)
>>> mask[1:, 1:] = 1
>>> ma = np.ma.array(a, mask=mask)
>>> np.array(ma[~ma.mask])
array([0, 1, 2, 3, 6])
>>> np.ma.notmasked_contiguous(ma)
[slice(0, 4, None), slice(6, 7, None)]
"""
a = asarray(a)
nd = a.ndim
if nd > 2:
raise NotImplementedError("Currently limited to atmost 2D array.")
if axis is None or nd == 1:
return flatnotmasked_contiguous(a)
#
result = []
#
other = (axis + 1) % 2
idx = [0, 0]
idx[axis] = slice(None, None)
#
for i in range(a.shape[other]):
idx[other] = i
result.append(flatnotmasked_contiguous(a[idx]) or None)
return result
def _ezclump(mask):
"""
Finds the clumps (groups of data with the same values) for a 1D bool array.
Returns a series of slices.
"""
if mask.ndim > 1:
mask = mask.ravel()
idx = (mask[1:] ^ mask[:-1]).nonzero()
idx = idx[0] + 1
if mask[0]:
if len(idx) == 0:
return [slice(0, mask.size)]
r = [slice(0, idx[0])]
r.extend((slice(left, right)
for left, right in zip(idx[1:-1:2], idx[2::2])))
else:
if len(idx) == 0:
return []
r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])]
if mask[-1]:
r.append(slice(idx[-1], mask.size))
return r
def clump_unmasked(a):
"""
Return list of slices corresponding to the unmasked clumps of a 1-D array.
(A "clump" is defined as a contiguous region of the array).
Parameters
----------
a : ndarray
A one-dimensional masked array.
Returns
-------
slices : list of slice
The list of slices, one for each continuous region of unmasked
elements in `a`.
Notes
-----
.. versionadded:: 1.4.0
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges,
notmasked_contiguous, clump_masked
Examples
--------
>>> a = np.ma.masked_array(np.arange(10))
>>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
>>> np.ma.clump_unmasked(a)
[slice(3, 6, None), slice(7, 8, None)]
"""
mask = getattr(a, '_mask', nomask)
if mask is nomask:
return [slice(0, a.size)]
return _ezclump(~mask)
def clump_masked(a):
"""
Returns a list of slices corresponding to the masked clumps of a 1-D array.
(A "clump" is defined as a contiguous region of the array).
Parameters
----------
a : ndarray
A one-dimensional masked array.
Returns
-------
slices : list of slice
The list of slices, one for each continuous region of masked elements
in `a`.
Notes
-----
.. versionadded:: 1.4.0
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges,
notmasked_contiguous, clump_unmasked
Examples
--------
>>> a = np.ma.masked_array(np.arange(10))
>>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
>>> np.ma.clump_masked(a)
[slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)]
"""
mask = ma.getmask(a)
if mask is nomask:
return []
return _ezclump(mask)
###############################################################################
# Polynomial fit #
###############################################################################
def vander(x, n=None):
"""
Masked values in the input array result in rows of zeros.
"""
_vander = np.vander(x, n)
m = getmask(x)
if m is not nomask:
_vander[m] = 0
return _vander
vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__)
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Any masked values in x is propagated in y, and vice-versa.
"""
x = asarray(x)
y = asarray(y)
m = getmask(x)
if y.ndim == 1:
m = mask_or(m, getmask(y))
elif y.ndim == 2:
my = getmask(mask_rows(y))
if my is not nomask:
m = mask_or(m, my[:, 0])
else:
raise TypeError("Expected a 1D or 2D array for y!")
if w is not None:
w = asarray(w)
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
m = mask_or(m, getmask(w))
if m is not nomask:
not_m = ~m
if w is not None:
w = w[not_m]
return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov)
else:
return np.polyfit(x, y, deg, rcond, full, w, cov)
polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__)
|
py | b4021e28f7556dd26c66f95820980dff0803f2a5 | ###########################
## This program defines functions that utilize the google and yahoo geocoder APIs to
## convert addresses into coordinates
## Written by: PeterM Crosta
## 2/16/2009
#
#INPUT FILE MUST BE CSV ADDRESSES: "STREET, CITY, STATE, ZIPCODE"
## WARNING: THIS CODE PROBABLY DOES NOT WORK ANYMORE GIVEN THAT THE APIs HAVE CHANGED.
## POSTING HERE JUST TO KEEP TRACK OF VARIOUS PROGRAMS WRITTEN OVER THe YEARS
##########################
#import various modules
import sys, string
import urllib
from urllib import *
from xml.dom import minidom
import xml
import time
def google(fname, oname, zipo=0):
"""Use google to geocode. Need input and output csv files.
Input must be CSV address. zipo=0 means dont run in zip code only mode"""
#Open csv file for reading and one for writing
f = open(fname)
o = open(oname, 'w')
#This is my API. You should get your own.
api = ""
#Loop over the lines in the input file
for line in f:
#rest a bit so geocoder is not flooded with requests
time.sleep(0.5)
#make some cosmetic changes to the line that is read in
lines = line.replace('\t', '+')
lines = lines.replace('\n', '')
lines = lines.replace(' ', '+')
lines = lines.replace('#', '+')
#If running in zipcode only mode, just use the zipcode as input. Otherwise, use entire address
if zipo == 1:
code = lines.split(',')[3]
#Add leading zero to zips that begin with 0"
if len(code) == 4:
code = "0"+code
#Get xml
site = urllib.urlopen("http://maps.google.com/maps/geo?q="+code+"&output=xml&key="+api)
else:
#Get xml
site = urllib.urlopen("http://maps.google.com/maps/geo?q="+lines+"&output=xml&key="+api)
#This creates a string of the xml
x = site.read()
#assume only one response unless otherwise
multi = 1
try:
#create xml document object; very error prone
kml = xml.dom.minidom.parseString(x)
#If more than one response, count how many
if len(kml.getElementsByTagName('AddressDetails')) > 1:
multi = len(kml.getElementsByTagName('AddressDetails'))
#pull out accuracy information.
#more info http://code.google.com/apis/maps/documentation/reference.html#GGeoAddressAccuracy
acc = kml.getElementsByTagName('AddressDetails')[0].attributes["Accuracy"].value
#pull out lat and long
(lng, lat) = kml.getElementsByTagName("Point")[0].getElementsByTagName("coordinates")[0].firstChild.nodeValue.split(",")[0:2]
#extract rest of matched address depending on mode and other factors
if zipo==0:
(place, city, statezip, cntry) = kml.getElementsByTagName("Placemark")[0].getElementsByTagName("address")[0].firstChild.nodeValue.split(',')[0:4]
else:
place = " "
if len(kml.getElementsByTagName("Placemark")[0].getElementsByTagName("address")[0].firstChild.nodeValue.split(',')) == 3:
(city, statezip, cntry) = kml.getElementsByTagName("Placemark")[0].getElementsByTagName("address")[0].firstChild.nodeValue.split(',')[0:3]
else:
(statezip, cntry) = kml.getElementsByTagName("Placemark")[0].getElementsByTagName("address")[0].firstChild.nodeValue.split(',')[0:2]
city = " "
#cleaning
state = statezip.strip().split(' ')[0]
zippy = statezip.strip().split(' ')[1]
v=acc, ",", str(multi), ",", lat, ",", lng, ",", place, ",", city, ",", state, ",", zippy, "\n"
#write output to file
o.writelines(v)
except:
#or write error to file
o.write("Error\n")
#close files
f.close()
o.close()
def yahoo(fname, oname):
"""Use Yahoo to geocode. Need input and outfiles as arguments.
Input file must be CSV in order: street, city, state, zip"""
#Open csv file for reading and one for writing
f = open(fname)
o = open(oname, 'w')
#This is my API. You should get your own.
api = ""
#Loop over the lines in the input file
for line in f:
#Rest a little so geocoder isnt overloaded
time.sleep(0.5)
#make some cosmetic changes to the line that is read in
lines = line.replace('\t', '+')
lines = lines.replace('\n', '')
nocomma = lines.split(',')
street, city, state, zipcode = nocomma[0].strip(), nocomma[1].strip(), nocomma[2].strip(), nocomma[3].strip()
street = street.replace(' ', '+')
city = city.replace(' ', '+')
try:
#Get xml
site = urllib.urlopen("http://local.yahooapis.com/MapsService/V1/geocode?appid="+api+"&street="+street+"&city="+city+"&state="+state+"&zip="+zipcode)
#Create string of xml
x = site.read()
try:
#create xml document object; very error prone
kml = xml.dom.minidom.parseString(x)
#assume one response unless otherwise
multi = 1
if len(kml.getElementsByTagName('Error')) == 0:
if len(kml.getElementsByTagName('Result')) > 1:
multi = len(kml.getElementsByTagName('Result'))
#Extract address pieces from xml
acc = kml.getElementsByTagName('Result')[0].attributes["precision"].value
lat = kml.getElementsByTagName("Result")[0].getElementsByTagName("Latitude")[0].firstChild.nodeValue
lng = kml.getElementsByTagName("Result")[0].getElementsByTagName("Longitude")[0].firstChild.nodeValue
place = kml.getElementsByTagName("Result")[0].getElementsByTagName("Address")[0].firstChild.nodeValue
cityy = kml.getElementsByTagName("Result")[0].getElementsByTagName("City")[0].firstChild.nodeValue
statey = kml.getElementsByTagName("Result")[0].getElementsByTagName("State")[0].firstChild.nodeValue
zipy = kml.getElementsByTagName("Result")[0].getElementsByTagName("Zip")[0].firstChild.nodeValue
v = acc, ",", str(multi), ",", lat, ",", lng, ",", place, ",", cityy, ",", statey, ",", zipy, "\n"
#write output to file
o.writelines(v)
else:
#or write error to file
o.write("Error\n")
except:
#or write error to file
o.write("Error\n")
except:
#or write error to file
o.write("Error\n")
#close files
f.close()
o.close()
|
py | b4021e4f861fa79a5d0755f1b5773771575c2ecf | '''
A binary search Tree
'''
from __future__ import print_function
class Node:
def __init__(self, label, parent):
self.label = label
self.left = None
self.right = None
#Added in order to delete a node easier
self.parent = parent
def getLabel(self):
return self.label
def setLabel(self, label):
self.label = label
def getLeft(self):
return self.left
def setLeft(self, left):
self.left = left
def getRight(self):
return self.right
def setRight(self, right):
self.right = right
def getParent(self):
return self.parent
def setParent(self, parent):
self.parent = parent
class BinarySearchTree:
def __init__(self):
self.root = None
def insert(self, label):
# Create a new Node
new_node = Node(label, None)
# If Tree is empty
if self.empty():
self.root = new_node
else:
#If Tree is not empty
curr_node = self.root
#While we don't get to a leaf
while curr_node is not None:
#We keep reference of the parent node
parent_node = curr_node
#If node label is less than current node
if new_node.getLabel() < curr_node.getLabel():
#We go left
curr_node = curr_node.getLeft()
else:
#Else we go right
curr_node = curr_node.getRight()
#We insert the new node in a leaf
if new_node.getLabel() < parent_node.getLabel():
parent_node.setLeft(new_node)
else:
parent_node.setRight(new_node)
#Set parent to the new node
new_node.setParent(parent_node)
def delete(self, label):
if (not self.empty()):
#Look for the node with that label
node = self.getNode(label)
#If the node exists
if(node is not None):
#If it has no children
if(node.getLeft() is None and node.getRight() is None):
self.__reassignNodes(node, None)
node = None
#Has only right children
elif(node.getLeft() is None and node.getRight() is not None):
self.__reassignNodes(node, node.getRight())
#Has only left children
elif(node.getLeft() is not None and node.getRight() is None):
self.__reassignNodes(node, node.getLeft())
#Has two children
else:
#Gets the max value of the left branch
tmpNode = self.getMax(node.getLeft())
#Deletes the tmpNode
self.delete(tmpNode.getLabel())
#Assigns the value to the node to delete and keesp tree structure
node.setLabel(tmpNode.getLabel())
def getNode(self, label):
curr_node = None
#If the tree is not empty
if(not self.empty()):
#Get tree root
curr_node = self.getRoot()
#While we don't find the node we look for
#I am using lazy evaluation here to avoid NoneType Attribute error
while curr_node is not None and curr_node.getLabel() is not label:
#If node label is less than current node
if label < curr_node.getLabel():
#We go left
curr_node = curr_node.getLeft()
else:
#Else we go right
curr_node = curr_node.getRight()
return curr_node
def getMax(self, root = None):
if(root is not None):
curr_node = root
else:
#We go deep on the right branch
curr_node = self.getRoot()
if(not self.empty()):
while(curr_node.getRight() is not None):
curr_node = curr_node.getRight()
return curr_node
def getMin(self, root = None):
if(root is not None):
curr_node = root
else:
#We go deep on the left branch
curr_node = self.getRoot()
if(not self.empty()):
curr_node = self.getRoot()
while(curr_node.getLeft() is not None):
curr_node = curr_node.getLeft()
return curr_node
def empty(self):
if self.root is None:
return True
return False
def __InOrderTraversal(self, curr_node):
nodeList = []
if curr_node is not None:
nodeList.insert(0, curr_node)
nodeList = nodeList + self.__InOrderTraversal(curr_node.getLeft())
nodeList = nodeList + self.__InOrderTraversal(curr_node.getRight())
return nodeList
def getRoot(self):
return self.root
def __isRightChildren(self, node):
if(node == node.getParent().getRight()):
return True
return False
def __reassignNodes(self, node, newChildren):
if(newChildren is not None):
newChildren.setParent(node.getParent())
if(node.getParent() is not None):
#If it is the Right Children
if(self.__isRightChildren(node)):
node.getParent().setRight(newChildren)
else:
#Else it is the left children
node.getParent().setLeft(newChildren)
#This function traversal the tree. By default it returns an
#In order traversal list. You can pass a function to traversal
#The tree as needed by client code
def traversalTree(self, traversalFunction = None, root = None):
if(traversalFunction is None):
#Returns a list of nodes in preOrder by default
return self.__InOrderTraversal(self.root)
else:
#Returns a list of nodes in the order that the users wants to
return traversalFunction(self.root)
#Returns an string of all the nodes labels in the list
#In Order Traversal
def __str__(self):
list = self.__InOrderTraversal(self.root)
str = ""
for x in list:
str = str + " " + x.getLabel().__str__()
return str
def InPreOrder(curr_node):
nodeList = []
if curr_node is not None:
nodeList = nodeList + InPreOrder(curr_node.getLeft())
nodeList.insert(0, curr_node.getLabel())
nodeList = nodeList + InPreOrder(curr_node.getRight())
return nodeList
def testBinarySearchTree():
'''
Example
8
/ \
3 10
/ \ \
1 6 14
/ \ /
4 7 13
'''
'''
Example After Deletion
7
/ \
1 4
'''
t = BinarySearchTree()
t.insert(8)
t.insert(3)
t.insert(6)
t.insert(1)
t.insert(10)
t.insert(14)
t.insert(13)
t.insert(4)
t.insert(7)
#Prints all the elements of the list in order traversal
print(t.__str__())
if(t.getNode(6) is not None):
print("The label 6 exists")
else:
print("The label 6 doesn't exist")
if(t.getNode(-1) is not None):
print("The label -1 exists")
else:
print("The label -1 doesn't exist")
if(not t.empty()):
print(("Max Value: ", t.getMax().getLabel()))
print(("Min Value: ", t.getMin().getLabel()))
t.delete(13)
t.delete(10)
t.delete(8)
t.delete(3)
t.delete(6)
t.delete(14)
#Gets all the elements of the tree In pre order
#And it prints them
list = t.traversalTree(InPreOrder, t.root)
for x in list:
print(x)
if __name__ == "__main__":
testBinarySearchTree()
|
py | b4021ff4da653111ac4879a8d36c91e8873a5639 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from cow.plugins.sqlalchemy_plugin import SQLAlchemyPlugin
from cow.plugins.redis_plugin import RedisPlugin
from preggy import expect
from mock import patch
import holmes.server
from tests.unit.base import ApiTestCase
class ApiServerTestCase(ApiTestCase):
def test_healthcheck(self):
response = self.fetch('/healthcheck')
expect(response.code).to_equal(200)
expect(response.body).to_be_like('WORKING')
def test_server_handlers(self):
srv = holmes.server.HolmesApiServer()
handlers = srv.get_handlers()
expect(handlers).not_to_be_null()
expect(handlers).to_length(33)
def test_server_plugins(self):
srv = holmes.server.HolmesApiServer()
plugins = srv.get_plugins()
expect(plugins).to_length(2)
expect(plugins[0]).to_equal(SQLAlchemyPlugin)
expect(plugins[1]).to_equal(RedisPlugin)
@patch('holmes.server.HolmesApiServer')
def test_server_main_function(self, server_mock):
holmes.server.main()
expect(server_mock.run.called).to_be_true()
|
py | b4022045637a4d263e697a2138e7142dba79f0a9 | # Copyright 2021 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Core modules for AI Platform Pipeline Components."""
import os
from google.cloud import aiplatform as aiplatform_sdk
from google_cloud_pipeline_components.aiplatform import utils
try:
from kfp.v2.components import load_component_from_file
except ImportError:
from kfp.components import load_component_from_file
__all__ = [
'ImageDatasetCreateOp',
'TabularDatasetCreateOp',
'TextDatasetCreateOp',
'VideoDatasetCreateOp',
'ImageDatasetExportDataOp',
'TabularDatasetExportDataOp',
'TextDatasetExportDataOp',
'VideoDatasetExportDataOp',
'ImageDatasetImportDataOp',
'TextDatasetImportDataOp',
'VideoDatasetImportDataOp',
'CustomContainerTrainingJobRunOp',
'CustomPythonPackageTrainingJobRunOp',
'AutoMLImageTrainingJobRunOp',
'AutoMLTextTrainingJobRunOp',
'AutoMLTabularTrainingJobRunOp',
'AutoMLVideoTrainingJobRunOp',
'ModelDeployOp',
'ModelBatchPredictOp',
'ModelExportOp',
'ModelUploadOp',
'EndpointCreateOp',
'EndpointDeleteOp',
'TimeSeriesDatasetCreateOp',
'TimeSeriesDatasetExportDataOp',
'AutoMLForecastingTrainingJobRunOp',
]
TimeSeriesDatasetCreateOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__),
'dataset/create_time_series_dataset/component.yaml'))
ImageDatasetCreateOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__),
'dataset/create_image_dataset/component.yaml'))
TabularDatasetCreateOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__),
'dataset/create_tabular_dataset/component.yaml'))
TextDatasetCreateOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__),
'dataset/create_text_dataset/component.yaml'))
VideoDatasetCreateOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__),
'dataset/create_video_dataset/component.yaml'))
ImageDatasetExportDataOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__),
'dataset/export_image_dataset/component.yaml'))
TabularDatasetExportDataOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__),
'dataset/export_tabular_dataset/component.yaml'))
TimeSeriesDatasetExportDataOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__),
'dataset/export_time_series_dataset/component.yaml'))
TextDatasetExportDataOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__),
'dataset/export_text_dataset/component.yaml'))
VideoDatasetExportDataOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__),
'dataset/export_video_dataset/component.yaml'))
ImageDatasetImportDataOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__),
'dataset/import_image_dataset/component.yaml'))
TextDatasetImportDataOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__),
'dataset/import_text_dataset/component.yaml'))
VideoDatasetImportDataOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__),
'dataset/import_video_dataset/component.yaml'))
CustomContainerTrainingJobRunOp = utils.convert_method_to_component(
aiplatform_sdk.CustomContainerTrainingJob,
aiplatform_sdk.CustomContainerTrainingJob.run,
)
CustomPythonPackageTrainingJobRunOp = utils.convert_method_to_component(
aiplatform_sdk.CustomPythonPackageTrainingJob,
aiplatform_sdk.CustomPythonPackageTrainingJob.run,
)
AutoMLImageTrainingJobRunOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__),
'automl_training_job/automl_image_training_job/component.yaml'))
AutoMLTextTrainingJobRunOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__),
'automl_training_job/automl_text_training_job/component.yaml'))
AutoMLTabularTrainingJobRunOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__),
'automl_training_job/automl_tabular_training_job/component.yaml'))
AutoMLForecastingTrainingJobRunOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__),
'automl_training_job/automl_forecasting_training_job/component.yaml'))
AutoMLVideoTrainingJobRunOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__),
'automl_training_job/automl_video_training_job/component.yaml'))
ModelExportOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__), 'model/export_model/component.yaml'))
ModelDeployOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__), 'endpoint/deploy_model/component.yaml'))
ModelBatchPredictOp = load_component_from_file(
os.path.join(os.path.dirname(__file__), 'batch_predict_job/component.yaml'))
ModelUploadOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__), 'model/upload_model/component.yaml'))
EndpointCreateOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__), 'endpoint/create_endpoint/component.yaml'))
EndpointDeleteOp = load_component_from_file(
os.path.join(
os.path.dirname(__file__), 'endpoint/delete_endpoint/component.yaml'))
|
py | b40220d564850514abf4faf3a5d9473a4b4be23c | import os
from os import path as op
import subprocess
import sys
import venv
from .. import get_config, logger
COMMANDS = {'env': 'Virtualenv management'}
def _find_or_create_venv(envpath):
rootdir = get_config('ROOTDIR')
if not envpath.startswith('/'):
envpath = op.join(rootdir, envpath)
if not op.isdir(envpath):
logger.info('Virtual env not found, creating.')
venv.create(envpath, with_pip=True)
req_file = op.join(rootdir, 'requirements.txt')
if op.isfile(req_file):
subprocess.check_call([op.join(envpath, 'bin', 'python'), '-m', 'pip', 'install', '-r', req_file])
return envpath
def _add_path(var, *args):
paths = list(args) + var.split(':') if var else args
existing = set()
res = []
for path in paths:
if path not in existing:
res.append(path)
existing.add(path)
return ':'.join(res)
def ensure_venv(envpath):
current_env = os.environ.get('VIRTUAL_ENV', '')
if not current_env or current_env != envpath:
logger.info('Not in venv, entering.')
envpath = _find_or_create_venv(envpath)
interpreter = op.join(envpath, 'bin', 'python')
if interpreter == sys.executable:
raise RuntimeError(f'Already using interpreter: {interpreter}')
env = os.environ.copy()
env['PATH'] = _add_path(env.get('PATH'), op.join(envpath, 'bin'))
env['PYTHONPATH'] = _add_path(env.get('PYTHONPATH'), get_config('ROOTDIR'))
env['VIRTUAL_ENV'] = envpath
env.pop('PYTHONHOME', None)
logger.debug('Calling: %s with args: %r', interpreter, sys.argv)
os.execle(interpreter, 'python', *sys.argv, env)
def shell(*args):
rootdir = get_config('ROOTDIR')
initscript = f'''. /etc/bash.bashrc;. ~/.bashrc
[[ -f {rootdir}/.bashrc.venv ]] && . {rootdir}/.bashrc.venv || PS1="[venv] $PS1"
export PATH={os.environ["PATH"]}\n'''
rsync, wsync = os.pipe()
rfd, wfd = os.pipe2(0)
if os.fork():
os.close(wfd)
os.close(wsync)
os.read(rsync, 1)
os.execlp('bash', 'bash', '--rcfile', f'/dev/fd/{rfd}', *args)
else:
os.close(rfd)
os.close(rsync)
with os.fdopen(wfd, 'w') as wfile:
wfile.write(initscript)
os.write(wsync, b'\n')
os.close(wsync)
def subcmd_freeze(req_filename, requirements, req_lines):
for line in subprocess.check_output(['pip', 'freeze']).decode('utf-8').split('\n'):
name, _, version = line.strip().partition('==')
lower = name.lower()
if lower in requirements:
logger.debug('Found requirement: %s==%s', name, version)
requirements[lower] = (version, name)
with open(req_filename, 'w') as req_file:
for line, name, version in req_lines:
if name:
lower = name.lower()
version, orig = requirements.get(lower, (None, None))
if version is not None:
req_file.write(f'{orig}=={version}\n' if version else f'{orig}\n')
else:
req_file.write(f'{name}\n')
else:
req_file.write(line)
def subcmd_add(req_filename, requirements, req_lines, pkg):
name = pkg.partition('==')[0]
added = True
idx = 0
for idx, (line, lname, lversion) in enumerate(req_lines):
if name.lower() <= lname.lower():
if name.lower() == lname.lower():
logger.info('Package already present in requirements, upgrading')
prev_entry = lname if not lversion else f'{lname}=={lversion}'
req_lines[idx] = (line.replace(prev_entry, pkg), lname, lversion)
added = False
break
subprocess.check_call(['python', '-m', 'pip', 'install', '--upgrade', pkg])
with open(req_filename, 'w') as req_file:
if idx > 0:
req_file.write(''.join(line for line, _, _ in req_lines[:idx]))
if added:
req_file.write(f'{pkg}\n')
req_file.write(''.join(line for line, _, _ in req_lines[idx:]))
def subcmd_rm(req_filename, requirements, req_lines, pkg):
if pkg.lower() not in requirements:
logger.warning('Package not found: %s', pkg)
else:
subprocess.check_call(['python', '-m', 'pip', 'uninstall', pkg])
with open(req_filename, 'w') as req_file:
req_file.write(''.join(
line
for line, name, _ in req_lines
if name.lower() != pkg.lower()))
def cmd_env(args):
rootdir = get_config('ROOTDIR')
req_filename = op.join(rootdir, get_config('VENV.REQUIREMENTS', 'requirements.txt'))
req_lines = []
requirements = {}
try:
with open(req_filename) as req_file:
for line in req_file:
record = line.strip().partition('#')[0]
name, _, version = record.partition('==')
req_lines.append((line, name, version))
if name:
requirements[name.lower()] = (version, name)
except IOError as exc:
logger.warning('Cannot read requirements file: %s', exc)
command = args.command or 'shell'
if command == 'shell':
shell(*(getattr(args, 'arg', ())))
elif command == 'run':
try:
subprocess.check_call(args.arg)
except subprocess.CalledProcessError as exc:
logger.error('Called process returned %s', exc.returncode)
elif command == 'update':
os.execlp('python', 'python', '-m', 'pip', 'install', '-r', req_filename)
elif command == 'list':
print("\n".join(
f'{orig}=={version}' if version else name
for name, (version, orig) in requirements.items()) if requirements else "(requirements empty)")
elif command == 'freeze':
subcmd_freeze(req_filename, requirements, req_lines)
elif command == 'add':
subcmd_add(req_filename, requirements, req_lines, args.pkg.strip())
elif command == 'rm':
subcmd_rm(req_filename, requirements, req_lines, args.pkg.strip())
def setup_parser(cmd, parser):
parser_sub = parser.add_subparsers(dest='command', help='Env command')
parser_shell = parser_sub.add_parser('shell', help='Enter shell in virtualenv (default)')
parser_shell.add_argument('arg', nargs='*')
parser_run = parser_sub.add_parser('run', help='Run a command in virtualenv')
parser_run.add_argument('arg', nargs='+')
parser_update = parser_sub.add_parser('update', help='Update virtualenv')
parser_add = parser_sub.add_parser('add', help='Add a package')
parser_add.add_argument('pkg')
parser_rm = parser_sub.add_parser('rm', help='Remove a package')
parser_rm.add_argument('pkg')
parser_list = parser_sub.add_parser('list', help='List packages')
parser_freeze = parser_sub.add_parser('freeze', help='Freeze existing requirements')
parser.set_defaults(command='shell', call=cmd_env)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.