filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_19266 | # -*- coding: utf-8 -*-
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
j_hor = []
j_ver = []
j_bro = []
j_non = []
for i in range(0, 20):
hor = pd.read_csv('lowtest_hor/hor_%s.csv' % i)
ver = pd.read_csv('lowtest_ver/ver_%s.csv' % i)
bro = pd.read_csv('lowtest_brownout/brownout_%s.csv' % i)
non = pd.read_csv('lowtest_raw/jmeter_rawdata_%s.csv' % i)
hor = hor['success'].astype('int')
ver = ver['success'].astype('int')
bro = bro['success'].astype('int')
non = non['success'].astype('int')
hor = np.mean(hor)
ver = np.mean(ver)
bro = np.mean(bro)
non = np.mean(non)
j_hor.append(hor)
j_ver.append(ver)
j_bro.append(bro)
j_non.append(non)
hor_save = pd.DataFrame(j_hor)
ver_save = pd.DataFrame(j_ver)
bro_save = pd.DataFrame(j_bro)
hor_save.to_csv('csv/success_hor.csv', header=False)
ver_save.to_csv('csv/success_ver.csv', header=False)
bro_save.to_csv('csv/success_bro.csv', header=False)
plt.xlabel('round')
plt.ylabel('success')
plt.plot(j_hor, label='hor', color='red')
plt.plot(j_ver, label='ver', color='blue')
plt.plot(j_bro, label='brownout', color='green')
# plt.plot(j_non, label='raw', color='coral')
plt.legend()
plt.savefig('img/success_compare.png')
plt.show()
|
the-stack_0_19267 | from ex115 import fmt, dados
while True:
opc = fmt.menuOpc()
if opc == 1:
dados.reader()
elif opc == 2:
fmt.dataManagement()
elif opc == 3:
print('Obrigado e até a próxima!')
break
else:
print('Digite uma opção válida.')
|
the-stack_0_19268 | import numpy as np
import jax.numpy as jnp
from jax.numpy import interp
from jax import jit, partial, random, vmap
from tqdm import tqdm
import warnings
warnings.filterwarnings("ignore")
np.printoptions(precision=2)
'''
Constants
'''
# time line, starts at 20 ends at 80
T_min = 0
T_max = 60
T_R = 45
# discounting factor
beta = 1/(1+0.02)
# utility function parameter
gamma = 2.0
# relative importance of housing consumption and non durable consumption
alpha = 0.7
# parameter used to calculate the housing consumption
kappa = 0.3
# uB associated parameter
B = 2
# social welfare after the unemployment
welfare = 20
# tax rate before and after retirement
tau_L = 0.2
tau_R = 0.1
# number of states S
nS = 8
# number of states e
nE = 2
# housing state
nO = 2
# experience state
nZ = 2
'''
Economic state calibration
'''
# probability of survival
Pa = jnp.array(np.load("constant/prob.npy"))
# deterministic income
detEarning = jnp.array(np.load("constant/detEarningHigh.npy"))
############################################################################################################ high skill feature
# fix the deterministic income
detEarning = jnp.concatenate([detEarning[:46], detEarning[46:]-25])
# stock transaction fee
Kc = 0.02
# stock participation cost
c_k = 20
############################################################################################################ high skill feature
# Define transition matrix of economical states S
Ps = np.genfromtxt('constant/Ps.csv',delimiter=',')
Ps = jnp.array(Ps)
# The possible GDP growth, stock return, bond return
gkfe = np.genfromtxt('constant/gkfe.csv',delimiter=',')
gkfe = jnp.array(gkfe)
# GDP growth depending on current S state
gGDP = gkfe[:,0]/100
# risk free interest rate depending on current S state
r_b = gkfe[:,1]/100
# stock return depending on current S state
r_k = gkfe[:,2]/100
# unemployment rate depending on current S state
Pe = gkfe[:,7:]/100
Pe = Pe[:,::-1]
'''
calculate stationary distribution to prepare for simulation
'''
# calculate the stationary distribution of econ state and employment state
S_distribution = jnp.ones(nS)/nS
for _ in range(100):
S_distribution = jnp.matmul(S_distribution, Ps)
#P(0,1)
P01 = jnp.dot(Pe[:,0],S_distribution)
#P(1,0)
P10 = jnp.dot(Pe[:,1],S_distribution)
jnp.array([[1-P01, P01],[P10, 1-P10]])
E_distribution = jnp.ones(2)/2
for _ in range(100):
E_distribution = jnp.matmul(E_distribution, jnp.array([[1-P01, P01],[P10, 1-P10]]))
'''
401k related constants
'''
# 401k amount growth rate
r_bar = 0.02
# income fraction goes into 401k
yi = 0.04
Pa = Pa[:T_max]
Nt = [np.sum(Pa[t:]) for t in range(T_min,T_max)]
# factor used to calculate the withdraw amount
Dn = [(r_bar*(1+r_bar)**N)/((1+r_bar)**N - 1) for N in Nt]
Dn[-1] = 1
Dn = jnp.array(Dn)
# cash accumulated before retirement
nEarning = yi*E_distribution[1]*(1+jnp.dot(S_distribution,gGDP))*detEarning[:45]
n_balance = np.zeros(T_R)
for t in range(T_R):
nMultiplier = jnp.array([(1+r_bar)**(t-i) for i in range(t)])
n_balance[t] = (nEarning[:t] * nMultiplier).sum()
# cash payouts after retirement
n_payout = []
amount = n_balance[-1]
for t in range(45, 60):
n_payout.append(amount*Dn[t])
amount = amount - amount*Dn[t]
n_balance = jnp.append(n_balance,amount)
n_payout = jnp.array(n_payout)
'''
housing related constants
'''
# variable associated with housing and mortgage
# age limit of buying a house
ageLimit = 30
mortgageLength = 30
# mortgage rate
rh = 0.045
# housing unit
H = 1000
# max rent unit
Rl = 500
# housing price constant
pt = 2*250/1000
# 30k rent 1000 sf
pr = 2*10/1000 * 2
# constant cost
c_h = 5
c_s = H*pt*0.4
# Dm is used to update the mortgage payment
Dm = [(1+rh) - rh*(1+rh)**(T_max - t)/((1+rh)**(T_max-t)-1) for t in range(T_min, T_max)]
Dm[-1] = 0
Dm = jnp.array(Dm)
# 30 year mortgage
Ms = []
M = H*pt*0.8
m = M*(1+rh) - Dm[30]*M
for i in range(30, T_max):
Ms.append(M)
M = M*(1+rh) - m
Ms.append(0)
Ms = jnp.array(Ms)
'''
Discretize the state space
Discretize the action space
'''
# actions dicretization(hp, cp, kp)
numGrid = 20
As = np.array(np.meshgrid(np.linspace(0.001,0.999,numGrid), np.linspace(0,1,numGrid), [0,1])).T.reshape(-1,3)
As = jnp.array(As)
# wealth discretization
wealthLevel = 300
polynomialDegree = 3
ws = jnp.linspace(0, np.power(wealthLevel,1/polynomialDegree), numGrid)**polynomialDegree
# age of last time bought a house value only count when o = 1.
aBuy = np.array(range(ageLimit))
# dimentions of the state
dim = (ws.size, aBuy.size, nS, nE, nO, nZ)
dimSize = len(dim)
xgrid = np.array([[w,ab,s,e,o,z] for w in ws
for ab in aBuy
for s in range(nS)
for e in range(nE)
for o in range(nO)
for z in range(nZ)]).reshape(dim + (dimSize,))
Xs = xgrid.reshape((np.prod(dim),dimSize))
Xs = jnp.array(Xs)
Vgrid = np.zeros(dim + (T_max,))
# start of function definitions
nX = Xs.shape[0]
nA = As.shape[0]
'''
Functions Definitions
'''
#Define the earning function, which applies for both employment status and 8 econ states
@partial(jit, static_argnums=(0,))
def y(t, x):
'''
x = [w,ab,s,e,o,z]
x = [0,1, 2,3,4,5]
'''
if t < T_R:
return detEarning[t] * (1+gGDP[jnp.array(x[2], dtype = jnp.int8)]) * x[3] + (1-x[3]) * welfare
else:
return detEarning[-1]
#Earning after tax and fixed by transaction in and out from 401k account
@partial(jit, static_argnums=(0,))
def yAT(t,x):
yt = y(t, x)
if t < T_R:
# yi portion of the income will be put into the 401k if employed
return (1-tau_L)*(yt * (1-yi))*x[3] + (1-x[3])*yt
else:
# t >= T_R, n/discounting amount will be withdraw from the 401k
return (1-tau_R)*yt + n_payout[t-T_R]
#Define the utility function
@jit
def u(c):
return jnp.nan_to_num(x = (jnp.power(c, 1-gamma) - 1)/(1 - gamma), nan = -jnp.inf)
#Define the bequeath function, which is a function of bequeath wealth
@jit
def uB(tb):
return B*u(tb)
#Reward function depends on the housing and non-housing consumption
@jit
def R(a):
'''
Input:
a = [c,b,k,h,action]
a = [0,1,2,3,4]
'''
c = a[:,0]
b = a[:,1]
k = a[:,2]
h = a[:,3]
C = jnp.power(c, alpha) * jnp.power(h, 1-alpha)
return u(C) + (-1/((c > 0) * (b >= 0) * (k >= 0) * (h > 0)) + 1)
# pc*qc / (ph*qh) = alpha/(1-alpha)
@partial(jit, static_argnums=(0,))
def feasibleActions(t, x):
'''
x = [w,ab,s,e,o,z]
x = [0,1, 2,3,4,5]
a = [c,b,k,h,action]
a = [0,1,2,3,4]
'''
# owner
sell = As[:,2]
ab = jnp.array(x[1], dtype = jnp.int8)
# last term is the tax deduction of the interest portion of mortgage payment
payment = ((t-ab) > 0)*((t-ab) <= mortgageLength)*(((t<=T_R)*tau_L + (t>T_R)*tau_R)*Ms[t-ab]*rh - m)
# this is the fire sell term, as long as we could afford the payment, do not sell
sell = (yAT(t,x) + x[0] + payment > 0)*jnp.zeros(nA) + (yAT(t,x) + x[0] + payment <= 0)*jnp.ones(nA)
budget1 = yAT(t,x) + x[0] + (1-sell)*payment + sell*(H*pt - Ms[t-ab] - c_s)
h = jnp.ones(nA)*H*(1+kappa)*(1-sell) + sell*jnp.clip(budget1*As[:,0]*(1-alpha)/pr, a_max = Rl)
c = budget1*As[:,0]*(1-sell) + sell*(budget1*As[:,0] - h*pr)
budget2 = budget1*(1-As[:,0])
k = budget2*As[:,1]
k = k - (1-x[5])*(k>0)*c_k
k = k*(1-Kc)
b = budget2*(1-As[:,1])
owner_action = jnp.column_stack((c,b,k,h,sell))
# renter
buy = As[:,2]*(t < ageLimit)
budget1 = yAT(t,x) + x[0] - buy*(H*pt*0.2 + c_h)
h = jnp.clip(budget1*As[:,0]*(1-alpha)/pr, a_max = Rl)*(1-buy) + buy*jnp.ones(nA)*H*(1+kappa)
c = (budget1*As[:,0] - h*pr)*(1-buy) + buy*budget1*As[:,0]
budget2 = budget1*(1-As[:,0])
k = budget2*As[:,1]
k = k - (1-x[5])*(k>0)*c_k
k = k*(1-Kc)
b = budget2*(1-As[:,1])
renter_action = jnp.column_stack((c,b,k,h,buy))
actions = x[4]*owner_action + (1-x[4])*renter_action
return actions
@partial(jit, static_argnums=(0,))
def transition(t,a,x):
'''
Input:
x = [w,ab,s,e,o,z]
x = [0,1, 2,3,4,5]
a = [c,b,k,h,action]
a = [0,1,2,3,4]
Output:
w_next
ab_next
s_next
e_next
o_next
z_next
prob_next
'''
nA = a.shape[0]
s = jnp.array(x[2], dtype = jnp.int8)
e = jnp.array(x[3], dtype = jnp.int8)
# actions taken
b = a[:,1]
k = a[:,2]
action = a[:,4]
w_next = ((1+r_b[s])*b + jnp.outer(k,(1+r_k)).T).T.flatten().repeat(nE)
ab_next = (1-x[4])*(t*(action == 1)).repeat(nS*nE) + x[4]*(x[1]*jnp.ones(w_next.size))
s_next = jnp.tile(jnp.arange(nS),nA).repeat(nE)
e_next = jnp.column_stack((e.repeat(nA*nS),(1-e).repeat(nA*nS))).flatten()
z_next = x[5]*jnp.ones(w_next.size) + ((1-x[5]) * (k > 0)).repeat(nS*nE)
# job status changing probability and econ state transition probability
pe = Pe[s, e]
ps = jnp.tile(Ps[s], nA)
prob_next = jnp.column_stack(((1-pe)*ps,pe*ps)).flatten()
# owner
o_next_own = (x[4] - action).repeat(nS*nE)
# renter
o_next_rent = action.repeat(nS*nE)
o_next = x[4] * o_next_own + (1-x[4]) * o_next_rent
return jnp.column_stack((w_next,ab_next,s_next,e_next,o_next,z_next,prob_next))
# used to calculate dot product
@jit
def dotProduct(p_next, uBTB):
return (p_next*uBTB).reshape((p_next.shape[0]//(nS*nE), (nS*nE))).sum(axis = 1)
# define approximation of fit
@jit
def fit(v, xpp):
value = vmap(partial(jnp.interp,xp = ws))(x = xpp[:,0], fp = v[:,jnp.array(xpp[:,1], dtype = int),
jnp.array(xpp[:,2], dtype = int),
jnp.array(xpp[:,3], dtype = int),
jnp.array(xpp[:,4], dtype = int),
jnp.array(xpp[:,5], dtype = int)].T)
return jnp.nan_to_num(x = value, nan = -jnp.inf)
@partial(jit, static_argnums=(0,))
def V(t,V_next,x):
'''
x = [w,ab,s,e,o,z]
x = [0,1, 2,3,4,5]
xp:
w_next 0
ab_next 1
s_next 2
e_next 3
o_next 4
z_next 5
prob_next 6
'''
actions = feasibleActions(t,x)
xp = transition(t,actions,x)
# bequeath utility, wealth level, the retirement account, heir sell the house at a cost of 25k
TB = xp[:,0] + n_balance[t] + xp[:,4]*(H*pt-Ms[jnp.array(t-xp[:,1], dtype = jnp.int8)]*(1+rh) - 25)
bequeathU = uB(TB)
if t == T_max-1:
Q = R(actions) + beta * dotProduct(xp[:,6], bequeathU)
else:
Q = R(actions) + beta * dotProduct(xp[:,6], Pa[t]*fit(V_next, xp) + (1-Pa[t])*bequeathU)
Q = Q + (-jnp.inf)*(x[1] >= t)
v = Q.max()
return v
@partial(jit, static_argnums=(0,))
def V_solve(t,V_next,x):
'''
x = [w,ab,s,e,o,z]
x = [0,1, 2,3,4,5]
xp:
w_next 0
ab_next 1
s_next 2
e_next 3
o_next 4
z_next 5
prob_next 6
'''
actions = feasibleActions(t,x)
xp = transition(t,actions,x)
# bequeath utility, wealth level, the retirement account, heir sell the house at a cost of 25k
TB = xp[:,0] + n_balance[t] + xp[:,4]*(H*pt-Ms[jnp.array(t-xp[:,1], dtype = jnp.int8)]*(1+rh) - 25)
bequeathU = uB(TB)
if t == T_max-1:
Q = R(actions) + beta * dotProduct(xp[:,6], bequeathU)
else:
Q = R(actions) + beta * dotProduct(xp[:,6], Pa[t]*fit(V_next, xp) + (1-Pa[t])*bequeathU)
Q = Q + (-jnp.inf)*(x[1] >= t)
v = Q.max()
cbkha = actions[Q.argmax()]
return v, cbkha
###################################solving the model##################################################
import os.path
if os.path.exists("richHigh.npy"):
print("Model Solved! ")
else:
for t in tqdm(range(T_max-1,T_min-1, -1)):
if t == T_max-1:
v = vmap(partial(V,t,Vgrid[:,:,:,:,:,:,t]))(Xs)
else:
v = vmap(partial(V,t,Vgrid[:,:,:,:,:,:,t+1]))(Xs)
Vgrid[:,:,:,:,:,:,t] = v.reshape(dim)
np.save("richHigh",Vgrid)
|
the-stack_0_19269 | import html
import os
import re
from typing import Iterable
from docutils import nodes
from docutils.parsers.rst import directives
from jinja2 import Template
from sphinxcontrib.plantuml import (
generate_name, # Need for plantuml filename calculation
)
from sphinxcontrib.needs.diagrams_common import calculate_link, create_legend
from sphinxcontrib.needs.filter_common import (
FilterBase,
filter_single_need,
process_filters,
)
from sphinxcontrib.needs.logging import get_logger
logger = get_logger(__name__)
class Needflow(nodes.General, nodes.Element):
pass
class NeedflowDirective(FilterBase):
"""
Directive to get flow charts.
"""
optional_arguments = 1
final_argument_whitespace = True
option_spec = {
"show_legend": directives.flag,
"show_filters": directives.flag,
"show_link_names": directives.flag,
"link_types": directives.unchanged_required,
"config": directives.unchanged_required,
"scale": directives.unchanged_required,
"highlight": directives.unchanged_required,
"align": directives.unchanged_required,
"debug": directives.flag,
}
# Update the options_spec with values defined in the FilterBase class
option_spec.update(FilterBase.base_option_spec)
def run(self):
env = self.state.document.settings.env
if not hasattr(env, "need_all_needflows"):
env.need_all_needflows = {}
# be sure, global var is available. If not, create it
if not hasattr(env, "needs_all_needs"):
env.needs_all_needs = {}
id = env.new_serialno("needflow")
targetid = f"needflow-{env.docname}-{id}"
targetnode = nodes.target("", "", ids=[targetid])
all_link_types = ",".join(x["option"] for x in env.config.needs_extra_links)
link_types = list(split_link_types(self.options.get("link_types", all_link_types)))
config_names = self.options.get("config", None)
configs = []
if config_names:
for config_name in config_names.split(","):
config_name = config_name.strip()
if config_name and config_name in env.config.needs_flow_configs:
configs.append(env.config.needs_flow_configs[config_name])
scale = self.options.get("scale", "100").replace("%", "")
if not scale.isdigit():
raise Exception(f'Needflow scale value must be a number. "{scale}" found')
if int(scale) < 1 or int(scale) > 300:
raise Exception(f'Needflow scale value must be between 1 and 300. "{scale}" found')
highlight = self.options.get("highlight", "")
caption = None
if self.arguments:
caption = self.arguments[0]
# Add the need and all needed information
env.need_all_needflows[targetid] = {
"docname": env.docname,
"lineno": self.lineno,
"target_node": targetnode,
"caption": caption,
"show_filters": "show_filters" in self.options,
"show_legend": "show_legend" in self.options,
"show_link_names": "show_link_names" in self.options,
"debug": "debug" in self.options,
"config_names": config_names,
"config": "\n".join(configs),
"scale": scale,
"highlight": highlight,
"align": self.options.get("align", None),
"link_types": link_types,
"env": env,
}
env.need_all_needflows[targetid].update(self.collect_filter_attributes())
return [targetnode] + [Needflow("")]
def split_link_types(link_types: str) -> Iterable[str]:
def is_valid(link_type) -> bool:
if len(link_type) == 0 or link_type.isspace():
logger.warning("Scruffy link_type definition found in needflow." "Defined link_type contains spaces only.")
return False
return True
return filter(
is_valid,
(x.strip() for x in re.split(";|,", link_types)),
)
def make_entity_name(name):
"""Creates a valid PlantUML entity name from the given value."""
invalid_chars = "-=!#$%^&*[](){}/~'`<>:;"
for char in invalid_chars:
name = name.replace(char, "_")
return name
def process_needflow(app, doctree, fromdocname):
# Replace all needflow nodes with a list of the collected needs.
# Augment each need with a backlink to the original location.
env = app.builder.env
link_types = env.config.needs_extra_links
allowed_link_types_options = [link.upper() for link in env.config.needs_flow_link_types]
# NEEDFLOW
for node in doctree.traverse(Needflow):
if not app.config.needs_include_needs:
# Ok, this is really dirty.
# If we replace a node, docutils checks, if it will not lose any attributes.
# But this is here the case, because we are using the attribute "ids" of a node.
# However, I do not understand, why losing an attribute is such a big deal, so we delete everything
# before docutils claims about it.
for att in ("ids", "names", "classes", "dupnames"):
node[att] = []
node.replace_self([])
continue
id = node.attributes["ids"][0]
current_needflow = env.need_all_needflows[id]
all_needs = env.needs_all_needs
option_link_types = [link.upper() for link in current_needflow["link_types"]]
for lt in option_link_types:
if lt not in [link["option"].upper() for link in link_types]:
logger.warning(
"Unknown link type {link_type} in needflow {flow}. Allowed values: {link_types}".format(
link_type=lt, flow=current_needflow["target_node"], link_types=",".join(link_types)
)
)
content = []
try:
if "sphinxcontrib.plantuml" not in app.config.extensions:
raise ImportError
from sphinxcontrib.plantuml import plantuml
except ImportError:
content = nodes.error()
para = nodes.paragraph()
text = nodes.Text("PlantUML is not available!", "PlantUML is not available!")
para += text
content.append(para)
node.replace_self(content)
continue
plantuml_block_text = ".. plantuml::\n" "\n" " @startuml" " @enduml"
puml_node = plantuml(plantuml_block_text)
puml_node["uml"] = "@startuml\n"
puml_connections = ""
# Adding config
config = current_needflow["config"]
if config and len(config) >= 3:
# Remove all empty lines
config = "\n".join([line.strip() for line in config.split("\n") if line.strip()])
puml_node["uml"] += "\n' Config\n\n"
puml_node["uml"] += config
puml_node["uml"] += "\n\n"
all_needs = list(all_needs.values())
found_needs = process_filters(app, all_needs, current_needflow)
processed_need_part_ids = []
puml_node["uml"] += "\n' Nodes definition \n\n"
for need_info in found_needs:
# Check if need_part was already handled during handling of parent need.
# If this is the case, it is already part of puml-code and we do not need to create a node.
if not (need_info["is_part"] and need_info["id_complete"] in processed_need_part_ids):
# Check if we need to embed need_parts into parent need, because they are also part of search result.
node_part_code = ""
valid_need_parts = [x for x in found_needs if x["is_part"] and x["id_parent"] == need_info["id"]]
for need_part in valid_need_parts:
part_link = calculate_link(app, need_part, fromdocname)
diagram_template = Template(env.config.needs_diagram_template)
part_text = diagram_template.render(**need_part)
part_colors = []
if need_part["type_color"]:
# We set # later, as the user may not have given a color and the node must get highlighted
part_colors.append(need_part["type_color"].replace("#", ""))
if current_needflow["highlight"] and filter_single_need(
app, need_part, current_needflow["highlight"], all_needs
):
part_colors.append("line:FF0000")
node_part_code += '{style} "{node_text}" as {id} [[{link}]] #{color}\n'.format(
id=make_entity_name(need_part["id_complete"]),
node_text=part_text,
link=part_link,
color=";".join(part_colors),
style="rectangle",
)
processed_need_part_ids.append(need_part["id_complete"])
link = calculate_link(app, need_info, fromdocname)
diagram_template = Template(env.config.needs_diagram_template)
node_text = diagram_template.render(**need_info)
if need_info["is_part"]:
need_id = need_info["id_complete"]
else:
need_id = need_info["id"]
colors = []
if need_info["type_color"]:
# We set # later, as the user may not have given a color and the node must get highlighted
colors.append(need_info["type_color"].replace("#", ""))
if current_needflow["highlight"] and filter_single_need(
app, need_info, current_needflow["highlight"], all_needs
):
colors.append("line:FF0000")
# Only add subelements and their {...} container, if we really need them.
# Otherwise plantuml may not set style correctly, if {..} is empty
if node_part_code:
node_part_code = f"{{\n {node_part_code} }}"
style = need_info["type_style"]
node_code = '{style} "{node_text}" as {id} [[{link}]] #{color} {need_parts}\n'.format(
id=make_entity_name(need_id),
node_text=node_text,
link=link,
color=";".join(colors),
style=style,
need_parts=node_part_code,
)
puml_node["uml"] += node_code
for link_type in link_types:
# Skip link-type handling, if it is not part of a specified list of allowed link_types or
# if not part of the overall configuration of needs_flow_link_types
if (current_needflow["link_types"] and link_type["option"].upper() not in option_link_types) or (
not current_needflow["link_types"] and link_type["option"].upper() not in allowed_link_types_options
):
continue
for link in need_info[link_type["option"]]:
# If source or target of link is a need_part, a specific style is needed
if "." in link or "." in need_info["id_complete"]:
final_link = link
if current_needflow["show_link_names"] or env.config.needs_flow_show_links:
desc = link_type["outgoing"] + "\\n"
comment = f": {desc}"
else:
comment = ""
if "style_part" in link_type and link_type["style_part"]:
link_style = "[{style}]".format(style=link_type["style_part"])
else:
link_style = "[dotted]"
else:
final_link = link
if current_needflow["show_link_names"] or env.config.needs_flow_show_links:
comment = ": {desc}".format(desc=link_type["outgoing"])
else:
comment = ""
if "style" in link_type and link_type["style"]:
link_style = "[{style}]".format(style=link_type["style"])
else:
link_style = ""
# Do not create an links, if the link target is not part of the search result.
if final_link not in [x["id"] for x in found_needs if x["is_need"]] and final_link not in [
x["id_complete"] for x in found_needs if x["is_part"]
]:
continue
if "style_start" in link_type and link_type["style_start"]:
style_start = link_type["style_start"]
else:
style_start = "-"
if "style_end" in link_type and link_type["style_end"]:
style_end = link_type["style_end"]
else:
style_end = "->"
puml_connections += "{id} {style_start}{link_style}{style_end} {link}{comment}\n".format(
id=make_entity_name(need_info["id_complete"]),
link=make_entity_name(final_link),
comment=comment,
link_style=link_style,
style_start=style_start,
style_end=style_end,
)
puml_node["uml"] += "\n' Connection definition \n\n"
puml_node["uml"] += puml_connections
# Create a legend
if current_needflow["show_legend"]:
puml_node["uml"] += create_legend(app.config.needs_types)
puml_node["uml"] += "\n@enduml"
puml_node["incdir"] = os.path.dirname(current_needflow["docname"])
puml_node["filename"] = os.path.split(current_needflow["docname"])[1] # Needed for plantuml >= 0.9
scale = int(current_needflow["scale"])
# if scale != 100:
puml_node["scale"] = scale
puml_node = nodes.figure("", puml_node)
if current_needflow["align"]:
puml_node["align"] = current_needflow["align"]
else:
puml_node["align"] = "center"
if current_needflow["caption"]:
# Make the caption to a link to the original file.
try:
if "SVG" in app.config.plantuml_output_format.upper():
file_ext = "svg"
else:
file_ext = "png"
except Exception:
file_ext = "png"
gen_flow_link = generate_name(app, puml_node.children[0], file_ext)
current_file_parts = fromdocname.split("/")
subfolder_amount = len(current_file_parts) - 1
img_locaton = "../" * subfolder_amount + "_images/" + gen_flow_link[0].split("/")[-1]
flow_ref = nodes.reference("t", current_needflow["caption"], refuri=img_locaton)
puml_node += nodes.caption("", "", flow_ref)
# Add lineno to node
puml_node.line = current_needflow["lineno"]
content.append(puml_node)
if len(content) == 0:
nothing_found = "No needs passed the filters"
para = nodes.paragraph()
nothing_found_node = nodes.Text(nothing_found, nothing_found)
para += nothing_found_node
content.append(para)
if current_needflow["show_filters"]:
para = nodes.paragraph()
filter_text = "Used filter:"
filter_text += (
" status(%s)" % " OR ".join(current_needflow["status"]) if len(current_needflow["status"]) > 0 else ""
)
if len(current_needflow["status"]) > 0 and len(current_needflow["tags"]) > 0:
filter_text += " AND "
filter_text += (
" tags(%s)" % " OR ".join(current_needflow["tags"]) if len(current_needflow["tags"]) > 0 else ""
)
if (len(current_needflow["status"]) > 0 or len(current_needflow["tags"]) > 0) and len(
current_needflow["types"]
) > 0:
filter_text += " AND "
filter_text += (
" types(%s)" % " OR ".join(current_needflow["types"]) if len(current_needflow["types"]) > 0 else ""
)
filter_node = nodes.emphasis(filter_text, filter_text)
para += filter_node
content.append(para)
if current_needflow["debug"]:
debug_container = nodes.container()
if isinstance(puml_node, nodes.figure):
data = puml_node.children[0]["uml"]
else:
data = puml_node["uml"]
data = "\n".join([html.escape(line) for line in data.split("\n")])
debug_para = nodes.raw("", f"<pre>{data}</pre>", format="html")
debug_container += debug_para
content += debug_container
node.replace_self(content)
|
the-stack_0_19270 | #! /usr/bin/env python
# Import the needed types.
from comp0037_planner_controller.occupancy_grid import OccupancyGrid
from comp0037_planner_controller.fifo_planner import FIFOPlanner
# Create the occupancy grid. Syntax is: number of cells in X, number of cells in Y,
# length of each cell in m
occupancyGrid = OccupancyGrid(21, 21, 0.5)
# The cells are indexed starting from 0.
# Set the state of the cells in the range [11,1]-[11,19] to be occupied.
# This corresponds to the "easy case" in the lectures
for y in xrange(1, 19):
occupancyGrid.setCell(11, y, 1)
# Start and goal cells
start = (3, 18)
goal = (20, 0)
# Create the planner. The first field is the title which will appear in the
# graphics window, the second the occupancy grid used.
planner = FIFOPlanner('Breadth First Search', occupancyGrid);
# This causes the planner to slow down and pause for things like key entries
planner.setRunInteractively(True)
# This specifies the height of the window drawn showing the occupancy grid. Everything
# should scale automatically to properly preserve the aspect ratio
planner.setWindowHeightInPixels(400)
# Search and see if a path can be found. Returns True if a path from the start to the
# goal was found and False otherwise
goalReached = planner.search(start, goal)
# Extract the path. This is based on the last search carried out.
path = planner.extractPathToGoal()
# Note that you can run multiple planners - each one will create and update its own window.
# See the minkowski_sum_tester as an example
|
the-stack_0_19271 | # This example requires the 'members' privileged intents
import disnake
class MyClient(disnake.Client):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.role_message_id = 0 # ID of the message that can be reacted to to add/remove a role.
self.emoji_to_role = {
disnake.PartialEmoji(name="🔴"): 0, # ID of the role associated with unicode emoji '🔴'.
disnake.PartialEmoji(name="🟡"): 0, # ID of the role associated with unicode emoji '🟡'.
disnake.PartialEmoji(
name="green", id=0
): 0, # ID of the role associated with a partial emoji's ID.
}
async def on_raw_reaction_add(self, payload: disnake.RawReactionActionEvent):
"""Gives a role based on a reaction emoji."""
# Make sure that the message the user is reacting to is the one we care about.
if payload.message_id != self.role_message_id:
return
guild = self.get_guild(payload.guild_id)
if guild is None:
# Check if we're still in the guild and it's cached.
return
try:
role_id = self.emoji_to_role[payload.emoji]
except KeyError:
# If the emoji isn't the one we care about then exit as well.
return
role = guild.get_role(role_id)
if role is None:
# Make sure the role still exists and is valid.
return
try:
# Finally, add the role.
await payload.member.add_roles(role)
except disnake.HTTPException:
# If we want to do something in case of errors we'd do it here.
pass
async def on_raw_reaction_remove(self, payload: disnake.RawReactionActionEvent):
"""Removes a role based on a reaction emoji."""
# Make sure that the message the user is reacting to is the one we care about.
if payload.message_id != self.role_message_id:
return
guild = self.get_guild(payload.guild_id)
if guild is None:
# Check if we're still in the guild and it's cached.
return
try:
role_id = self.emoji_to_role[payload.emoji]
except KeyError:
# If the emoji isn't the one we care about then exit as well.
return
role = guild.get_role(role_id)
if role is None:
# Make sure the role still exists and is valid.
return
# The payload for `on_raw_reaction_remove` does not provide `.member`
# so we must get the member ourselves from the payload's `.user_id`.
member = guild.get_member(payload.user_id)
if member is None:
# Make sure the member still exists and is valid.
return
try:
# Finally, remove the role.
await member.remove_roles(role)
except disnake.HTTPException:
# If we want to do something in case of errors we'd do it here.
pass
intents = disnake.Intents.default()
intents.members = True
client = MyClient(intents=intents)
client.run("token")
|
the-stack_0_19272 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the RPC HTTP basics."""
import http.client
import urllib.parse
from test_framework.test_framework import TrumpCoinTestFramework
from test_framework.util import assert_equal, str_to_b64str
class HTTPBasicsTest (TrumpCoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
def setup_network(self):
self.setup_nodes()
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urllib.parse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock is not None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock is not None) #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock is not None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock is not None) #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock is None) #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock is not None) #connection must be closed because trumpcoind should use keep-alive by default
# Check excessive request size
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
the-stack_0_19276 | import warnings
import numpy as np
import pandas as pd
import xgboost as xgb
from joblib import Parallel, delayed
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import BallTree
from sklearn.preprocessing import OneHotEncoder
# lib utils
from xgbse._base import XGBSEBaseEstimator, DummyLogisticRegression
from xgbse.converters import convert_data_to_xgb_format, convert_y
# at which percentiles will the KM predict
from xgbse.non_parametric import get_time_bins, calculate_interval_failures
KM_PERCENTILES = np.linspace(0, 1, 11)
DEFAULT_PARAMS = {
"objective": "survival:aft",
"eval_metric": "aft-nloglik",
"aft_loss_distribution": "normal",
"aft_loss_distribution_scale": 1,
"tree_method": "hist",
"learning_rate": 5e-2,
"max_depth": 8,
"booster": "dart",
"subsample": 0.5,
"min_child_weight": 50,
"colsample_bynode": 0.5,
}
DEFAULT_PARAMS_LR = {"C": 1e-3, "max_iter": 500}
def _repeat_array(x, n):
"""
Repeats an array x n times. Resulting array of ((x*n) x 1) shape.
Args:
x (np.array): An array to be repeated
n (Int): Number of times to repeat array x
Returns:
(np.array): Array x repeated n times.
"""
return np.array([x] * n).T
def _build_multi_task_targets(E, T, time_bins):
"""
Builds compatible multi task targets. This function creates a times array
from time 0 to T, where T is the event/censor last
observed time. If time_bins > T, times greater than the last observed
time T are considered equal to -1.
Args:
E ([np.array, pd.Series]): Array of censors(0)/events(1).
T ([np.array, pd.Series]): Array of times.
time_bins ([np.array]): Specified time bins to split targets.
Returns:
targets (pd.Series): A Series with multi task targets (for data existent just up to time T=t, all times over t are considered equal to -1).
time_bins (np.array): Time bins to be used for multi task survival analysis.
"""
events = _repeat_array(E, len(time_bins))
times = _repeat_array(T, len(time_bins)) < time_bins
targets = times.astype(int)
shifted_array = np.roll(targets, 1)
shifted_array[:, 0] = 0
shifted_array = shifted_array + targets
shifted_array[shifted_array == 2] = -1
shifted_array[np.logical_not(events) & times] = -1
return shifted_array, time_bins
# class to fit a BCE on the leaves of a XGB
class XGBSEDebiasedBCE(XGBSEBaseEstimator):
"""
Class to train a set of logistic regressions on top of the embedding produced by xgboost models.
Each logistic regression predicts survival at different user-defined discrete time windows.
The classifiers remove individuals as they are censored, with targets that are indicators of
surviving at each window.
Adapted from source:
http://quinonero.net/Publications/predicting-clicks-facebook.pdf
"""
def __init__(
self,
xgb_params=DEFAULT_PARAMS,
lr_params=DEFAULT_PARAMS_LR,
n_jobs=-1,
):
"""
Construct XGBSEDebiasedBCE instance
Args:
xgb_params (Dict): parameters for XGBoost model, see
https://xgboost.readthedocs.io/en/latest/parameter.html
lr_params (Dict): parameters for Logistic Regression model, see
https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html
n_jobs (Int): Number of CPU cores used to fit logistic regressions via joblib.
"""
self.xgb_params = xgb_params
self.lr_params = lr_params
self.n_jobs = n_jobs
self.persist_train = False
def fit(
self,
X,
y,
num_boost_round=1000,
validation_data=None,
early_stopping_rounds=None,
verbose_eval=0,
persist_train=False,
index_id=None,
time_bins=None,
):
"""
Transform feature space by fitting a XGBoost model and outputting its leaf indices.
Leaves are transformed and considered as dummy variables to fit multiple logistic
regression models to each evaluated time bin.
Args:
X ([pd.DataFrame, np.array]): features to be used while fitting XGBoost model
y (structured array(numpy.bool_, numpy.number)): binary event indicator as first field,
and time of event or time of censoring as second field.
num_boost_round (Int): Number of boosting iterations.
validation_data (Tuple): Validation data in the format of a list of tuples [(X, y)]
if user desires to use early stopping
early_stopping_rounds (Int): Activates early stopping.
Validation metric needs to improve at least once
in every **early_stopping_rounds** round(s) to continue training.
See xgboost.train documentation.
verbose_eval ([Bool, Int]): level of verbosity. See xgboost.train documentation.
persist_train (Bool): whether or not to persist training data to use explainability
through prototypes
index_id (pd.Index): user defined index if intended to use explainability
through prototypes
time_bins (np.array): specified time windows to use when making survival predictions
Returns:
XGBSEDebiasedBCE: Trained XGBSEDebiasedBCE instance
"""
E_train, T_train = convert_y(y)
if time_bins is None:
time_bins = get_time_bins(T_train, E_train)
self.time_bins = time_bins
# converting data to xgb format
dtrain = convert_data_to_xgb_format(X, y, self.xgb_params["objective"])
# converting validation data to xgb format
evals = ()
if validation_data:
X_val, y_val = validation_data
dvalid = convert_data_to_xgb_format(
X_val, y_val, self.xgb_params["objective"]
)
evals = [(dvalid, "validation")]
# training XGB
self.bst = xgb.train(
self.xgb_params,
dtrain,
num_boost_round=num_boost_round,
early_stopping_rounds=early_stopping_rounds,
evals=evals,
verbose_eval=verbose_eval,
)
# predicting and encoding leaves
self.encoder = OneHotEncoder()
leaves = self.bst.predict(dtrain, pred_leaf=True)
leaves_encoded = self.encoder.fit_transform(leaves)
# convert targets for using with logistic regression
self.targets, self.time_bins = _build_multi_task_targets(
E_train, T_train, self.time_bins
)
# fitting LR for several targets
self.lr_estimators_ = self._fit_all_lr(leaves_encoded, self.targets)
if persist_train:
self.persist_train = True
if index_id is None:
index_id = X.index.copy()
index_leaves = self.bst.predict(dtrain, pred_leaf=True)
self.tree = BallTree(index_leaves, metric="hamming")
self.index_id = index_id
return self
def _fit_one_lr(self, leaves_encoded, target):
"""
Fits a single logistic regression to predict survival probability
at a certain time bin as target. Encoded leaves are used as features.
Args:
leaves_encoded (np.array): A tensor of one hot encoded leaves.
target (np.array): An array of time targets for a specific
Returns:
lr (sklearn.linear_model.LogisticRegression): A fitted Logistic
Regression model. This model outputs calibrated survival probabilities
on a time T.
"""
# masking
mask = target != -1
# by default we use a logistic regression
classifier = LogisticRegression(**self.lr_params)
if len(target[mask]) == 0:
# If there's no observation in a time bucket we raise an error
raise ValueError("Error: No observations in a time bucket")
elif len(np.unique(target[mask])) == 1:
# If there's only one class in a time bucket
# we create a dummy classifier that predicts that class and send a warning
warnings.warn(
"Warning: Only one class found in a time bucket", RuntimeWarning
)
classifier = DummyLogisticRegression()
classifier.fit(leaves_encoded[mask, :], target[mask])
return classifier
def _fit_all_lr(self, leaves_encoded, targets):
"""
Fits multiple Logistic Regressions to predict survival probability
for a list of time bins as target. Encoded leaves are used as features.
Args:
leaves_encoded (np.array): A tensor of one hot encoded leaves.
targets (np.array): An array of time targets for a specific time bin.
Returns:
lr_estimators (List): A list of fitted Logistic Regression models.
These models output calibrated survival probabilities for all times
in pre specified time bins.
"""
with Parallel(n_jobs=self.n_jobs) as parallel:
lr_estimators = parallel(
delayed(self._fit_one_lr)(leaves_encoded, targets[:, i])
for i in range(targets.shape[1])
)
return lr_estimators
def _predict_from_lr_list(self, lr_estimators, leaves_encoded, time_bins):
"""
Predicts survival probabilities from a list of multiple fitted
Logistic Regressions models. Encoded leaves are used as features.
Args:
lr_estimators (List): A list of fitted Logistic Regression models.
These models output calibrated survival probabilities for all times
in pre specified time bins.
leaves_encoded (np.array): A tensor of one hot encoded leaves.
time_bins (np.array): Specified time bins to split targets.
Returns:
preds (pd.DataFrame): A dataframe of estimated survival probabilities
for all times (columns), from the time_bins array, for all samples
(rows).
"""
with Parallel(n_jobs=self.n_jobs) as parallel:
preds = parallel(
delayed(m.predict_proba)(leaves_encoded) for m in lr_estimators
)
# organizing interval predictions from LRs
preds = np.array(preds)[:, :, 1].T
preds = pd.DataFrame(preds, columns=time_bins)
# converting these interval predictions
# to cumulative survival curve
preds = (1 - preds).cumprod(axis=1)
return preds
def predict(self, X, return_interval_probs=False):
"""
Predicts survival probabilities using the XGBoost + Logistic Regression pipeline.
Args:
X (pd.DataFrame): Dataframe of features to be used as input for the
XGBoost model.
return_interval_probs (Bool): Boolean indicating if interval probabilities are
supposed to be returned. If False the cumulative survival is returned.
Default is False.
Returns:
pd.DataFrame: A dataframe of survival probabilities
for all times (columns), from a time_bins array, for all samples of X
(rows). If return_interval_probs is True, the interval probabilities are returned
instead of the cumulative survival probabilities.
"""
# converting to xgb format
d_matrix = xgb.DMatrix(X)
# getting leaves and extracting neighbors
leaves = self.bst.predict(d_matrix, pred_leaf=True)
leaves_encoded = self.encoder.transform(leaves)
# predicting from logistic regression artifacts
preds_df = self._predict_from_lr_list(
self.lr_estimators_, leaves_encoded, self.time_bins
)
if return_interval_probs:
preds_df = calculate_interval_failures(preds_df)
return preds_df
|
the-stack_0_19278 | from unittest import TestCase
from os.path import dirname, join
import json
from pytezos.michelson.program import MichelsonProgram
from pytezos.michelson.types.big_map import big_map_diff_to_lazy_diff
from pytezos.michelson.forge import forge_micheline, unforge_micheline
folder = 'dexter_usdtz_xtz'
entrypoint = 'removeLiquidity'
class MainnetOperationTestCaseNYQJME(TestCase):
@classmethod
def setUpClass(cls):
with open(join(dirname(__file__), f'', '__script__.json')) as f:
script = json.loads(f.read())
cls.program = MichelsonProgram.match(script['code'])
with open(join(dirname(__file__), f'', f'updateCountryRestrictions.json')) as f:
operation = json.loads(f.read())
cls.entrypoint = f'updateCountryRestrictions'
cls.operation = operation
# cls.maxDiff = None
def test_parameters_nyqjme(self):
original_params = self.program.parameter.from_parameters(self.operation['parameters'])
py_obj = original_params.to_python_object()
# pprint(py_obj)
readable_params = self.program.parameter.from_parameters(original_params.to_parameters(mode='readable'))
self.assertEqual(py_obj, readable_params.to_python_object())
self.program.parameter.from_python_object(py_obj)
def test_lazy_storage_nyqjme(self):
storage = self.program.storage.from_micheline_value(self.operation['storage'])
lazy_diff = big_map_diff_to_lazy_diff(self.operation['big_map_diff'])
extended_storage = storage.merge_lazy_diff(lazy_diff)
py_obj = extended_storage.to_python_object(try_unpack=True, lazy_diff=True)
# pprint(py_obj)
def test_parameters_forging(self):
expected = self.operation['parameters'].get('value', {'prim': 'Unit'})
actual = unforge_micheline(forge_micheline(expected))
self.assertEqual(expected, actual)
|
the-stack_0_19280 | # Loads a target data then defines tables for it
spark.read \
.option("header", True) \
.csv("./testdata/hospital.csv") \
.write \
.saveAsTable("hospital")
scavenger.misc \
.options({"db_name": "default", "table_name": "hospital", "row_id": "tid"}) \
.flatten() \
.write \
.saveAsTable("hospital_flatten")
spark.table("hospital").show(1)
spark.table("hospital_flatten").show(1)
# Loads a ground truth data then defines tables for it
spark.read \
.option("header", True) \
.csv("./testdata/hospital_clean.csv") \
.write \
.saveAsTable("hospital_clean")
spark.table("hospital_flatten") \
.join(spark.table("hospital_clean"), ["tid", "attribute"], "inner") \
.where("not(value <=> correct_val)") \
.write \
.saveAsTable("error_cells_ground_truth")
spark.table("hospital_clean").show(1)
spark.table("error_cells_ground_truth").show(1)
# Detects error cells then repairs them
from repair.detectors import NullErrorDetector, ConstraintErrorDetector
error_detectors = [
ConstraintErrorDetector(constraint_path="./testdata/hospital_constraints.txt"),
NullErrorDetector()
]
repaired_df = scavenger.repair \
.setDbName("default") \
.setTableName("hospital") \
.setRowId("tid") \
.setErrorDetectors(error_detectors) \
.setDiscreteThreshold(100) \
.setRuleBasedModelEnabled(True) \
.option("hp.no_progress_loss", "100") \
.run()
# Computes performance numbers (precision & recall)
# - Precision: the fraction of correct repairs, i.e., repairs that match
# the ground truth, over the total number of repairs performed
# - Recall: correct repairs over the total number of errors
pdf = repaired_df.join(
spark.table("hospital_clean").where("attribute != 'Score'"),
["tid", "attribute"], "inner")
rdf = repaired_df.join(
spark.table("error_cells_ground_truth").where("attribute != 'Score'"),
["tid", "attribute"], "right_outer")
# Compares predicted values with the correct ones
pdf.orderBy("attribute").show()
precision = pdf.where("repaired <=> correct_val").count() / pdf.count()
recall = rdf.where("repaired <=> correct_val").count() / rdf.count()
f1 = (2.0 * precision * recall) / (precision + recall)
print("Precision={} Recall={} F1={}".format(precision, recall, f1))
|
the-stack_0_19282 | from collections import namedtuple
import re
from .registry import DEFAULT_LOOKUP
# export resolve_lookups at this level
from .registry import resolve_lookups # NOQA
from .registry import register_lookup_handler # NOQA
LOOKUP_REGEX = re.compile("""
\$\{ # opening brace for the lookup
((?P<type>[._\-a-zA-Z0-9]*(?=\s)) # type of lookup, must be followed by a
# space to allow for defaulting to
# "output" type
?\s* # any number of spaces separating the
# type from the input
(?P<input>[@\+\/,\._\-a-zA-Z0-9\:\s=]+) # the input value to the lookup
)\} # closing brace of the lookup
""", re.VERBOSE)
Lookup = namedtuple("Lookup", ("type", "input", "raw"))
def extract_lookups_from_string(value):
"""Extract any lookups within a string.
Args:
value (str): string value we're extracting lookups from
Returns:
list: list of :class:`stacker.lookups.Lookup` if any
"""
lookups = set()
for match in LOOKUP_REGEX.finditer(value):
groupdict = match.groupdict()
raw = match.groups()[0]
lookup_type = groupdict.get("type") or DEFAULT_LOOKUP
lookup_input = groupdict.get("input")
lookups.add(Lookup(lookup_type, lookup_input, raw))
return lookups
def extract_lookups(value):
"""Recursively extracts any stack lookups within the data structure.
Args:
value (one of str, list, dict): a structure that contains lookups to
output values
Returns:
list: list of lookups if any
"""
lookups = set()
if isinstance(value, basestring):
lookups = lookups.union(extract_lookups_from_string(value))
elif isinstance(value, list):
for v in value:
lookups = lookups.union(extract_lookups(v))
elif isinstance(value, dict):
for v in value.values():
lookups = lookups.union(extract_lookups(v))
return lookups
|
the-stack_0_19283 | import unittest
from pprint import pprint
from lichecker import LicenseChecker
# these packages dont define license in setup.py
# manually verified and injected
license_overrides = {
"kthread": "MIT",
'yt-dlp': "Unlicense",
'pyxdg': 'GPL-2.0',
'ptyprocess': 'ISC license',
'psutil': 'BSD3'
}
# explicitly allow these packages that would fail otherwise
whitelist = [
'idna' # BSD-like
]
# validation flags
allow_nonfree = False
allow_viral = False
allow_unknown = False
allow_unlicense = True
allow_ambiguous = False
pkg_name = "RAKEkeywords"
class TestLicensing(unittest.TestCase):
@classmethod
def setUpClass(self):
licheck = LicenseChecker(pkg_name,
license_overrides=license_overrides,
whitelisted_packages=whitelist,
allow_ambiguous=allow_ambiguous,
allow_unlicense=allow_unlicense,
allow_unknown=allow_unknown,
allow_viral=allow_viral,
allow_nonfree=allow_nonfree)
print("Package", pkg_name)
print("Version", licheck.version)
print("License", licheck.license)
print("Transient Requirements (dependencies of dependencies)")
pprint(licheck.transient_dependencies)
self.licheck = licheck
def test_license_compliance(self):
print("Package Versions")
pprint(self.licheck.versions)
print("Dependency Licenses")
pprint(self.licheck.licenses)
self.licheck.validate()
|
the-stack_0_19284 | import os
import time
import cv2
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.utils.data as data
from dataset import TotalText, Ctw1500Text, Icdar15Text, Mlt2017Text, TD500Text
from network.textnet import TextNet
from util.augmentation import BaseTransform
from cfglib.config import config as cfg, update_config, print_config
from cfglib.option import BaseOptions
from util.visualize import visualize_detection, visualize_gt
from util.misc import to_device, mkdirs,rescale_result
from util.eval import deal_eval_total_text, deal_eval_ctw1500, deal_eval_icdar15, \
deal_eval_TD500, data_transfer_ICDAR, data_transfer_TD500, data_transfer_MLT2017
import multiprocessing
multiprocessing.set_start_method("spawn", force=True)
def osmkdir(out_dir):
import shutil
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
os.makedirs(out_dir)
def write_to_file(contours, file_path):
"""
:param contours: [[x1, y1], [x2, y2]... [xn, yn]]
:param file_path: target file path
"""
# according to total-text evaluation method, output file shoud be formatted to: y0,x0, ..... yn,xn
with open(file_path, 'w') as f:
for cont in contours:
cont = np.stack([cont[:, 0], cont[:, 1]], 1)
cont = cont.flatten().astype(str).tolist()
cont = ','.join(cont)
f.write(cont + '\n')
def inference(model, test_loader, output_dir):
total_time = 0.
if cfg.exp_name != "MLT2017":
osmkdir(output_dir)
else:
if not os.path.exists(output_dir):
mkdirs(output_dir)
for i, (image, meta) in enumerate(test_loader):
input_dict = dict()
input_dict['img'] = to_device(image)
# get detection result
start = time.time()
torch.cuda.synchronize()
output_dict = model(input_dict)
end = time.time()
if i > 0:
total_time += end - start
fps = (i + 1) / total_time
else:
fps = 0.0
idx = 0 # test mode can only run with batch_size == 1
print('detect {} / {} images: {}.'.format(i + 1, len(test_loader), meta['image_id'][idx]))
# visualization
img_show = image[idx].permute(1, 2, 0).cpu().numpy()
img_show = ((img_show * cfg.stds + cfg.means) * 255).astype(np.uint8)
show_boundary, heat_map = visualize_detection(img_show, output_dict, meta=meta)
contours = output_dict["py_preds"][-1].int().cpu().numpy()
gt_contour = []
label_tag = meta['label_tag'][idx].int().cpu().numpy()
for annot, n_annot in zip(meta['annotation'][idx], meta['n_annotation'][idx]):
if n_annot.item() > 0:
gt_contour.append(annot[:n_annot].int().cpu().numpy())
gt_vis = visualize_gt(img_show, gt_contour, label_tag)
show_map = np.concatenate([heat_map, gt_vis], axis=1)
show_map = cv2.resize(show_map, (320 * 3, 320))
im_vis = np.concatenate([show_map, show_boundary], axis=0)
path = os.path.join(cfg.vis_dir, '{}_test'.format(cfg.exp_name), meta['image_id'][idx].split(".")[0]+".jpg")
cv2.imwrite(path, im_vis)
H, W = meta['Height'][idx].item(), meta['Width'][idx].item()
img_show, contours = rescale_result(img_show, contours, H, W)
# write to file
if cfg.exp_name == "Icdar2015":
fname = "res_" + meta['image_id'][idx].replace('jpg', 'txt')
contours = data_transfer_ICDAR(contours)
write_to_file(contours, os.path.join(output_dir, fname))
elif cfg.exp_name == "MLT2017":
out_dir = os.path.join(output_dir, str(cfg.checkepoch))
if not os.path.exists(out_dir):
mkdirs(out_dir)
fname = meta['image_id'][idx].split("/")[-1].replace('ts', 'res')
fname = fname.split(".")[0] + ".txt"
data_transfer_MLT2017(contours, os.path.join(out_dir, fname))
elif cfg.exp_name == "TD500":
fname = "res_" + meta['image_id'][idx].split(".")[0]+".txt"
data_transfer_TD500(contours, os.path.join(output_dir, fname))
else:
fname = meta['image_id'][idx].replace('jpg', 'txt')
write_to_file(contours, os.path.join(output_dir, fname))
def main(vis_dir_path):
osmkdir(vis_dir_path)
if cfg.exp_name == "Totaltext":
testset = TotalText(
data_root='data/total-text-mat',
ignore_list=None,
is_training=False,
transform=BaseTransform(size=cfg.test_size, mean=cfg.means, std=cfg.stds)
)
elif cfg.exp_name == "Ctw1500":
testset = Ctw1500Text(
data_root='data/ctw1500',
is_training=False,
transform=BaseTransform(size=cfg.test_size, mean=cfg.means, std=cfg.stds)
)
elif cfg.exp_name == "Icdar2015":
testset = Icdar15Text(
data_root='data/Icdar2015',
is_training=False,
transform=BaseTransform(size=cfg.test_size, mean=cfg.means, std=cfg.stds)
)
elif cfg.exp_name == "MLT2017":
testset = Mlt2017Text(
data_root='data/MLT2017',
is_training=False,
transform=BaseTransform(size=cfg.test_size, mean=cfg.means, std=cfg.stds)
)
elif cfg.exp_name == "TD500":
testset = TD500Text(
data_root='data/TD500',
is_training=False,
transform=BaseTransform(size=cfg.test_size, mean=cfg.means, std=cfg.stds)
)
else:
print("{} is not justify".format(cfg.exp_name))
test_loader = data.DataLoader(testset, batch_size=1, shuffle=False, num_workers=cfg.num_workers)
# Model
model = TextNet(is_training=False, backbone=cfg.net)
model_path = os.path.join(cfg.save_dir, cfg.exp_name,
'TextBPN_{}_{}.pth'.format(model.backbone_name, cfg.checkepoch))
model.load_model(model_path)
model = model.to(cfg.device) # copy to cuda
model.eval()
if cfg.cuda:
cudnn.benchmark = True
print('Start testing TextBPN.')
output_dir = os.path.join(cfg.output_dir, cfg.exp_name)
inference(model, test_loader, output_dir)
if cfg.exp_name == "Totaltext":
deal_eval_total_text(debug=True)
elif cfg.exp_name == "Ctw1500":
deal_eval_ctw1500(debug=True)
elif cfg.exp_name == "Icdar2015":
deal_eval_icdar15(debug=True)
elif cfg.exp_name == "TD500":
deal_eval_TD500(debug=True)
else:
print("{} is not justify".format(cfg.exp_name))
if __name__ == "__main__":
# parse arguments
option = BaseOptions()
args = option.initialize()
update_config(cfg, args)
print_config(cfg)
vis_dir = os.path.join(cfg.vis_dir, '{}_test'.format(cfg.exp_name))
if not os.path.exists(vis_dir):
mkdirs(vis_dir)
# main
main(vis_dir)
|
the-stack_0_19286 | # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Generate Binarized MNIST-like files, smaller and with random data.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
from tensorflow_datasets.core.utils import py_utils
from tensorflow_datasets.testing import test_utils
flags.DEFINE_string(
"tfds_dir", py_utils.tfds_dir(), "Path to tensorflow_datasets directory"
)
FLAGS = flags.FLAGS
def examples_dir():
return os.path.join(FLAGS.tfds_dir, "testing", "test_data", "fake_examples")
def mnist_dir(name):
return os.path.join(examples_dir(), name)
_TRAIN_DATA_FILENAME = "binarized_mnist_train.amat"
_VALID_DATA_FILENAME = "binarized_mnist_valid.amat"
_TEST_DATA_FILENAME = "binarized_mnist_test.amat"
def make_images(num_images):
return np.random.randint(256, size=(28 * 28 * num_images), dtype=np.uint8).reshape(
(num_images, -1)
)
def write_image_file(filename, num_images):
with tf.io.gfile.GFile(filename, "wb") as f:
np.savetxt(f, make_images(num_images), delimiter=" ")
def main(_):
output_dir = mnist_dir("binarized_mnist")
test_utils.remake_dir(output_dir)
write_image_file(os.path.join(output_dir, _TRAIN_DATA_FILENAME), 10)
write_image_file(os.path.join(output_dir, _VALID_DATA_FILENAME), 2)
write_image_file(os.path.join(output_dir, _TEST_DATA_FILENAME), 2)
if __name__ == "__main__":
app.run(main)
|
the-stack_0_19287 | """
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
"""
class Solution:
def levelOrder(self, root: 'Node') -> List[List[int]]:
import collections
res = []
if not root:
return res
q = collections.deque()
q.append(root)
# BFS
while q:
tmp = []
for _ in range(len(q)):
node = q.popleft()
tmp.append(node.val)
# 使用extend在列表末尾一次追加多个值
q.extend(node.children)
res.append(tmp)
return res |
the-stack_0_19291 | from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import os
import shutil
import logging
import time
from flexget import plugin
from flexget.event import event
from flexget.config_schema import one_or_more
from flexget.utils.template import RenderError
from flexget.utils.pathscrub import pathscrub
def get_directory_size(directory):
"""
:param directory: Path
:return: Size in bytes (recursively)
"""
dir_size = 0
for (path, _, files) in os.walk(directory):
for file in files:
filename = os.path.join(path, file)
dir_size += os.path.getsize(filename)
return dir_size
def get_siblings(ext, main_file_no_ext, main_file_ext, abs_path):
siblings = {}
files = os.listdir(abs_path)
for filename in files:
# skip the main file
if filename == main_file_no_ext + main_file_ext:
continue
filename_lower = filename.lower()
if not filename_lower.startswith(main_file_no_ext.lower()) or not filename_lower.endswith(ext.lower()):
continue
# we have to use the length of the main file (no ext) to extract the rest of the filename
# for the future renaming
file_ext = filename[len(main_file_no_ext):]
file_path = os.path.join(abs_path, filename)
if os.path.exists(file_path):
siblings[file_path] = file_ext
return siblings
class BaseFileOps(object):
# Defined by subclasses
log = None
along = {
'type': 'object',
'properties': {
'extensions': one_or_more({'type': 'string'}),
'subdirs': one_or_more({'type': 'string'})
},
'additionalProperties': False,
'required': ['extensions']
}
def prepare_config(self, config):
if config is True:
return {}
elif config is False:
return
if 'along' not in config:
return config
extensions = config['along'].get('extensions')
subdirs = config['along'].get('subdirs')
if extensions and not isinstance(extensions, list):
config['along']['extensions'] = [extensions]
if subdirs and not isinstance(subdirs, list):
config['along']['subdirs'] = [subdirs]
return config
def on_task_output(self, task, config):
config = self.prepare_config(config)
if config is None:
return
for entry in task.accepted:
if 'location' not in entry:
self.log.verbose('Cannot handle %s because it does not have the field location.', entry['title'])
continue
src = entry['location']
src_isdir = os.path.isdir(src)
try:
# check location
if not os.path.exists(src):
self.log.warning('location `%s` does not exists (anymore).' % src)
continue
if src_isdir:
if not config.get('allow_dir'):
self.log.warning('location `%s` is a directory.' % src)
continue
elif not os.path.isfile(src):
self.log.warning('location `%s` is not a file.' % src)
continue
# search for namesakes
siblings = {} # dict of (path=ext) pairs
if not src_isdir and 'along' in config:
parent = os.path.dirname(src)
filename_no_ext, filename_ext = os.path.splitext(os.path.basename(src))
for ext in config['along']['extensions']:
siblings.update(get_siblings(ext, filename_no_ext, filename_ext, parent))
files = os.listdir(parent)
files_lower = list(map(str.lower, files))
for subdir in config['along'].get('subdirs', []):
try:
idx = files_lower.index(subdir)
except ValueError:
continue
subdir_path = os.path.join(parent, files[idx])
if not os.path.isdir(subdir_path):
continue
for ext in config['along']['extensions']:
siblings.update(get_siblings(ext, filename_no_ext, filename_ext, subdir_path))
# execute action in subclasses
self.handle_entry(task, config, entry, siblings)
except (OSError, IOError) as err:
entry.fail(str(err))
continue
def clean_source(self, task, config, entry):
min_size = entry.get('clean_source', config.get('clean_source', -1))
if min_size < 0:
return
base_path = os.path.split(entry.get('old_location', entry['location']))[0]
# everything here happens after a successful execution of the main action: the entry has been moved in a
# different location, or it does not exists anymore. so from here we can just log warnings and move on.
if not os.path.isdir(base_path):
self.log.warning('Cannot delete path `%s` because it does not exists (anymore).', base_path)
return
dir_size = get_directory_size(base_path) / 1024 / 1024
if dir_size >= min_size:
self.log.info('Path `%s` left because it exceeds safety value set in clean_source option.', base_path)
return
if task.options.test:
self.log.info('Would delete `%s` and everything under it.', base_path)
return
try:
shutil.rmtree(base_path)
self.log.info('Path `%s` has been deleted because was less than clean_source safe value.', base_path)
except Exception as err:
self.log.warning('Unable to delete path `%s`: %s', base_path, err)
def handle_entry(self, task, config, entry, siblings):
raise NotImplementedError()
class DeleteFiles(BaseFileOps):
"""Delete all accepted files."""
schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'allow_dir': {'type': 'boolean'},
'along': BaseFileOps.along,
'clean_source': {'type': 'number'}
},
'additionalProperties': False
}
]
}
log = logging.getLogger('delete')
def handle_entry(self, task, config, entry, siblings):
src = entry['location']
src_isdir = os.path.isdir(src)
if task.options.test:
if src_isdir:
self.log.info('Would delete `%s` and all its content.', src)
else:
self.log.info('Would delete `%s`', src)
for s, _ in siblings.items():
self.log.info('Would also delete `%s`', s)
return
# IO errors will have the entry mark failed in the base class
if src_isdir:
shutil.rmtree(src)
self.log.info('`%s` and all its content has been deleted.', src)
else:
os.remove(src)
self.log.info('`%s` has been deleted.', src)
# further errors will not have any effect (the entry does not exists anymore)
for s, _ in siblings.items():
try:
os.remove(s)
self.log.info('`%s` has been deleted as well.', s)
except Exception as err:
self.log.warning(str(err))
if not src_isdir:
self.clean_source(task, config, entry)
class TransformingOps(BaseFileOps):
# Defined by subclasses
move = None
destination_field = None
def handle_entry(self, task, config, entry, siblings):
src = entry['location']
src_isdir = os.path.isdir(src)
src_path, src_name = os.path.split(src)
# get the proper path and name in order of: entry, config, above split
dst_path = entry.get(self.destination_field, config.get('to', src_path))
if config.get('rename'):
dst_name = config['rename']
elif entry.get('filename') and entry['filename'] != src_name:
# entry specifies different filename than what was split from the path
# since some inputs fill in filename it must be different in order to be used
dst_name = entry['filename']
else:
dst_name = src_name
try:
dst_path = entry.render(dst_path)
except RenderError as err:
raise plugin.PluginError('Path value replacement `%s` failed: %s' % (dst_path, err.args[0]))
try:
dst_name = entry.render(dst_name)
except RenderError as err:
raise plugin.PluginError('Filename value replacement `%s` failed: %s' % (dst_name, err.args[0]))
# Clean invalid characters with pathscrub plugin
dst_path = pathscrub(os.path.expanduser(dst_path))
dst_name = pathscrub(dst_name, filename=True)
# Join path and filename
dst = os.path.join(dst_path, dst_name)
if dst == entry['location']:
raise plugin.PluginWarning('source and destination are the same.')
if not os.path.exists(dst_path):
if task.options.test:
self.log.info('Would create `%s`', dst_path)
else:
self.log.info('Creating destination directory `%s`', dst_path)
os.makedirs(dst_path)
if not os.path.isdir(dst_path) and not task.options.test:
raise plugin.PluginWarning('destination `%s` is not a directory.' % dst_path)
# unpack_safety
if config.get('unpack_safety', entry.get('unpack_safety', True)):
count = 0
while True:
if count > 60 * 30:
raise plugin.PluginWarning('The task has been waiting unpacking for 30 minutes')
size = os.path.getsize(src)
time.sleep(1)
new_size = os.path.getsize(src)
if size != new_size:
if not count % 10:
self.log.verbose('File `%s` is possibly being unpacked, waiting ...', src_name)
else:
break
count += 1
src_file, src_ext = os.path.splitext(src)
dst_file, dst_ext = os.path.splitext(dst)
# Check dst contains src_ext
if config.get('keep_extension', entry.get('keep_extension', True)):
if not src_isdir and dst_ext != src_ext:
self.log.verbose('Adding extension `%s` to dst `%s`', src_ext, dst)
dst += src_ext
dst_file += dst_ext # this is used for sibling files. dst_ext turns out not to be an extension!
funct_name = 'move' if self.move else 'copy'
funct_done = 'moved' if self.move else 'copied'
if task.options.test:
self.log.info('Would %s `%s` to `%s`', funct_name, src, dst)
for s, ext in siblings.items():
# we cannot rely on splitext for extensions here (subtitles may have the language code)
d = dst_file + ext
self.log.info('Would also %s `%s` to `%s`', funct_name, s, d)
else:
# IO errors will have the entry mark failed in the base class
if self.move:
shutil.move(src, dst)
elif src_isdir:
shutil.copytree(src, dst)
else:
shutil.copy(src, dst)
self.log.info('`%s` has been %s to `%s`', src, funct_done, dst)
# further errors will not have any effect (the entry has been successfully moved or copied out)
for s, ext in siblings.items():
# we cannot rely on splitext for extensions here (subtitles may have the language code)
d = dst_file + ext
try:
if self.move:
shutil.move(s, d)
else:
shutil.copy(s, d)
self.log.info('`%s` has been %s to `%s` as well.', s, funct_done, d)
except Exception as err:
self.log.warning(str(err))
entry['old_location'] = entry['location']
entry['location'] = dst
if self.move and not src_isdir:
self.clean_source(task, config, entry)
class CopyFiles(TransformingOps):
"""Copy all accepted files."""
schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'to': {'type': 'string', 'format': 'path'},
'rename': {'type': 'string'},
'allow_dir': {'type': 'boolean'},
'unpack_safety': {'type': 'boolean'},
'keep_extension': {'type': 'boolean'},
'along': TransformingOps.along
},
'additionalProperties': False
}
]
}
move = False
destination_field = 'copy_to'
log = logging.getLogger('copy')
class MoveFiles(TransformingOps):
"""Move all accepted files."""
schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'to': {'type': 'string', 'format': 'path'},
'rename': {'type': 'string'},
'allow_dir': {'type': 'boolean'},
'unpack_safety': {'type': 'boolean'},
'keep_extension': {'type': 'boolean'},
'along': TransformingOps.along,
'clean_source': {'type': 'number'}
},
'additionalProperties': False
}
]
}
move = True
destination_field = 'move_to'
log = logging.getLogger('move')
@event('plugin.register')
def register_plugin():
plugin.register(DeleteFiles, 'delete', api_ver=2)
plugin.register(CopyFiles, 'copy', api_ver=2)
plugin.register(MoveFiles, 'move', api_ver=2)
|
the-stack_0_19293 | # Fazer um progqama que peça o salário de uma pessoa e aumente:
# 10% para salários superiores a 1.250,00$
# 15% para salários inferiores
# Pedir o salário
salario = float(input('Me informe seu salário:R$ '))
# Calcular o reajuste
reajuste_10 = salario * 10 / 100
reajuste_15 = salario * 15 / 100
# Mostra o reajuste deacorddo com seu salários
if salario >= 1250:
print(f'Seu salário vai ficar {salario + reajuste_10}.')
else:
print(f'Seu salário vai ficar {salario + reajuste_15}.') |
the-stack_0_19294 | #!/usr/bin/python3
import subprocess
import tempfile
import shutil
import os
import time
def main():
subprocess.check_call(["cargo", "build", "--release"])
test_dir = None
server = None
try:
test_dir = tempfile.mkdtemp()
in_dir = os.path.join(test_dir, "in")
# Write config file for the server
server_config = os.path.join(test_dir, "mbackupd.toml")
with open(server_config, "w") as f:
f.write(
"""
verbosity="Info"
bind="127.0.0.1:31782"
[[users]]
name="backup"
password="hunter1"
access_level="Put"
[[users]]
name="restore"
password="hunter2"
access_level="Get"
[[users]]
name="admin"
password="hunter3"
access_level="Delete"
"""
)
# Start the server
server_data = os.path.join(test_dir, "server")
os.mkdir(server_data)
server = subprocess.Popen(
[
"target/release/mbackupd",
"--config",
server_config,
"--data-dir",
server_data,
]
)
# Write configuration for the client
client_config = os.path.join(test_dir, "mbackup.toml")
with open(client_config, "w") as f:
f.write(
"""
user="backup"
password="hunter1"
encryption_key="correcthorsebatterystaple"
server="http://localhost:31782"
hostname="test"
backup_dirs=["%s"]
cache_db="%s"
"""%(in_dir, os.path.join(test_dir, "cache.db"))
)
# Create some test files and links
d1 = os.path.join(in_dir, "k")
os.makedirs(d1)
a = os.path.join(d1, "a")
b = os.path.join(d1, "b")
c = os.path.join(d1, "c")
e = os.path.join(d1, "e")
f = os.path.join(d1, "f")
g = os.path.join(d1, "g")
h = os.path.join(d1, "h")
i = os.path.join(d1, "i")
with open(a, "w") as fi:
fi.write("test1")
with open(b, "w") as fi:
fi.write("test1")
with open(c, "w") as fi:
fi.write("test2" * 1024 * 1024)
with open(e, "w") as fi:
fi.write("test3")
with open(f, "w") as fi:
fi.write("x" * 1024 * 1024 * 50)
os.symlink(i, h)
# Backup the files and validate the files
time.sleep(0.5)
subprocess.check_call(["target/release/mbackup", "-c", client_config, "backup"])
subprocess.check_call(
[
"target/release/mbackup",
"-c",
client_config,
"--user",
"restore",
"--password",
"hunter2",
"validate",
"--full",
]
)
# Recover from backup
r1 = os.path.join(test_dir, "r1")
subprocess.check_call(
[
"target/release/mbackup",
"-c",
client_config,
"--user",
"restore",
"--password",
"hunter2",
"restore",
"1",
"--pattern",
"/",
"--dest",
r1,
]
)
# Validate backup config
with open(os.path.join(r1, a[1:]), "r") as fi:
if fi.read() != "test1":
raise Exception("Bad restore 1")
with open(os.path.join(r1, b[1:]), "r") as fi:
if fi.read() != "test1":
raise Exception("Bad restore 2 ")
with open(os.path.join(r1, c[1:]), "r") as fi:
if fi.read() != "test2" * 1024 * 1024:
raise Exception("Bad restore 3")
with open(os.path.join(r1, e[1:]), "r") as fi:
if fi.read() != "test3":
raise Exception("Bad restore 4")
with open(os.path.join(r1, f[1:]), "r") as fi:
if fi.read() != "x" * 1024 * 1024 * 50:
raise Exception("Bad restore 5")
if os.readlink(os.path.join(r1, h[1:])) != i:
raise Exception("Bad restore link 1")
# Modify state
with open(g, "w") as fi:
fi.write("test4")
os.unlink(b)
os.unlink(e)
# Backup new state
subprocess.check_call(["target/release/mbackup", "-c", client_config, "backup"])
# Remove the old root, prune all unused items and validate the content
subprocess.check_call(
[
"target/release/mbackup",
"-c",
client_config,
"--user",
"admin",
"--password",
"hunter3",
"delete-root",
"1",
]
)
subprocess.check_call(
[
"target/release/mbackup",
"-c",
client_config,
"--user",
"admin",
"--password",
"hunter3",
"prune",
]
)
subprocess.check_call(
[
"target/release/mbackup",
"-c",
client_config,
"--user",
"restore",
"--password",
"hunter2",
"validate"
]
)
subprocess.check_call(
[
"target/release/mbackup",
"-c",
client_config,
"--user",
"restore",
"--password",
"hunter2",
"validate",
"--full",
]
)
# Recover from the second backup
r2 = os.path.join(test_dir, "r2")
subprocess.check_call(
[
"target/release/mbackup",
"-c",
client_config,
"--user",
"restore",
"--password",
"hunter2",
"restore",
"2",
"--pattern",
"/",
"--dest",
r2,
]
)
# And check the content
with open(os.path.join(r2, a[1:]), "r") as fi:
if fi.read() != "test1":
raise Exception("Bad restore 6")
if os.path.exists(os.path.join(r2, b[1:])):
raise Exception("Bad restore 7")
with open(os.path.join(r2, c[1:]), "r") as fi:
if fi.read() != "test2" * 1024 * 1024:
raise Exception("Bad restore 8")
if os.path.exists(os.path.join(r2, e[1:])):
raise Exception("Bad restore 9")
with open(os.path.join(r2, f[1:]), "r") as fi:
if fi.read() != "x" * 1024 * 1024 * 50:
raise Exception("Bad restore 10")
with open(os.path.join(r2, g[1:]), "r") as fi:
if fi.read() != "test4":
raise Exception("Bad restore 11")
if os.readlink(os.path.join(r1, h[1:])) != i:
raise Exception("Bad restore link 2")
# Recreate e
with open(e, "w") as fi:
fi.write("test3")
# Preform backup
subprocess.check_call(["target/release/mbackup", "-c", client_config, "backup"])
r3 = os.path.join(test_dir, "r3")
# And restorm from the backup
subprocess.check_call(
[
"target/release/mbackup",
"-c",
client_config,
"--user",
"restore",
"--password",
"hunter2",
"restore",
"3",
"--pattern",
"/",
"--dest",
r3,
]
)
# Check that e is as we expect,
# the recover of e would fail here if the cache invalidation timings in the server did not work
# as we would think the server would allready have e when performing the backup
with open(os.path.join(r3, e[1:]), "r") as fi:
if fi.read() != "test3":
raise Exception("Bad restore 12")
# Delete all the content
subprocess.check_call(
[
"target/release/mbackup",
"-c",
client_config,
"--user",
"admin",
"--password",
"hunter3",
"prune",
"--age",
"0",
]
)
# And kill the server
if server.returncode != None:
raise Exception("Server terminated early")
server.terminate()
server.wait()
server = 0
# Check that the prune got rid of most of the data
usage = 0
for dirpath, dirnames, filenames in os.walk(server_data):
for f in filenames:
usage += os.path.getsize(os.path.join(dirpath, f))
if usage > 1024 * 1024:
raise Exception("Prune did not remove enough data")
finally:
# Kill the server
if server:
server.terminate()
server.wait()
# And empty the test folder
if test_dir:
shutil.rmtree(test_dir)
if __name__ == "__main__":
main()
|
the-stack_0_19296 | import json
from django.urls import reverse
from rest_framework.test import APITestCase
from rest_framework import status
from authors.apps.authentication.models import User
from authors.apps.authentication.tests.utils import TEST_USER
from django.core import mail
from authors.apps.authentication.views import VerifyAccount
from django.utils.http import urlsafe_base64_encode
from django.utils.encoding import force_bytes
from authors.apps.authentication.utils import generate_token
from rest_framework.test import force_authenticate
from rest_framework.test import APIRequestFactory
from authors.apps.articles.models import Article
user = {
"user": {
"username": "test",
"email": "[email protected]",
"password": "Test123."
}
}
class ArticleCRUDTestCase(APITestCase):
"""Test Cases to test ratings feature"""
def setUp(self):
"""Initialize default data."""
self.user = {
"user": {
"username": "test",
"email": "[email protected]",
"password": "Test123."
}
}
self.user1 = {
"user": {
"username": "Jacob",
"email": "[email protected]",
"password": "Test123."
}
}
self.article1 = {
"article": {
"title": "How to train your dragon",
"description": "Ever wonder how?",
"body": "You have to believe",
}
}
self.article2 = {
"article": {
"title": "How to feed your dragon",
"description": "Wanna know how?",
"body": "You don't believe?",
}
}
def login_user(self, user=user):
"""
login user
"""
response = self.client.post(
reverse("authentication:login"),
user,
format='json')
response.render()
user = json.loads(response.content)
return user
def create_a_user(self, username='test', email='[email protected]',
password='Test123.'):
"""
Create a test user
"""
user = User.objects.create_user(username, email, password)
user.save()
return user
def create_article(self):
"""
Create a test article
"""
user = User.objects.get()
article = Article.objects.create(
title="How to train your dragon",
description="Ever wonder how?",
body="You have to believe",
author=user.profile)
article.save()
return article
def verify_user(self, user):
"""Verify user"""
token = generate_token.make_token(user)
uid = urlsafe_base64_encode(force_bytes(user.pk))
request = APIRequestFactory().get(
reverse("authentication:verify", args=[uid, token]))
verify_account = VerifyAccount.as_view()
verify_account(request, uidb64=uid, token=token)
return user
def test_auth_user_can_like(self):
user = self.create_a_user()
self.verify_user(user)
auth_user = self.login_user()
user = User.objects.get()
article = self.create_article()
res = self.client.put('/api/articles/'+article.slug+'/like/',
HTTP_AUTHORIZATION='Bearer ' +
auth_user['user']['token'],
format='json'
)
self.assertEquals(res.status_code, 200)
def test_auth_user_can_dislike(self):
user = self.create_a_user()
self.verify_user(user)
auth_user = self.login_user()
user = User.objects.get()
article = self.create_article()
res = self.client.put('/api/articles/'+article.slug+'/dislike/',
HTTP_AUTHORIZATION='Bearer ' +
auth_user['user']['token'],
format='json'
)
self.assertEquals(res.status_code, 200)
def test_like_404_article(self):
user = self.create_a_user()
self.verify_user(user)
auth_user = self.login_user()
user = User.objects.get()
slug = 'fake-slug-13qedffd23'
res = self.client.put('/api/articles/'+slug+'/like/',
HTTP_AUTHORIZATION='Bearer ' +
auth_user['user']['token'],
format='json'
)
self.assertEquals(res.status_code, 404)
def test_dislike_404_article(self):
user = self.create_a_user()
self.verify_user(user)
auth_user = self.login_user()
user = User.objects.get()
slug = 'fake-slug-13qedffd23'
res = self.client.put('/api/articles/'+slug+'/dislike/',
HTTP_AUTHORIZATION='Bearer ' +
auth_user['user']['token'],
format='json'
)
self.assertEquals(res.status_code, 404)
def test_like_disliked_article(self):
user = self.create_a_user()
self.verify_user(user)
auth_user = self.login_user()
user = User.objects.get()
article = self.create_article()
self.client.put('/api/articles/'+article.slug+'/dislike/',
HTTP_AUTHORIZATION='Bearer ' +
auth_user['user']['token'],
format='json'
)
res = self.client.put('/api/articles/'+article.slug+'/like/',
HTTP_AUTHORIZATION='Bearer ' +
auth_user['user']['token'],
format='json'
)
self.assertEquals(res.status_code, 200)
def test_dislike_liked_article(self):
user = self.create_a_user()
self.verify_user(user)
auth_user = self.login_user()
user = User.objects.get()
article = self.create_article()
self.client.put('/api/articles/'+article.slug+'/like/',
HTTP_AUTHORIZATION='Bearer ' +
auth_user['user']['token'],
format='json'
)
res = self.client.put('/api/articles/'+article.slug+'/dislike/',
HTTP_AUTHORIZATION='Bearer ' +
auth_user['user']['token'],
format='json'
)
self.assertEquals(res.status_code, 200)
|
the-stack_0_19297 | #!/usr/local/bin/python3.3
def echo(message):
print(message)
return
echo('Direct Call')
x = echo
x('Indirect Call')
def indirect(func, arg):
func(arg)
indirect(echo, "Argument Call")
schedule = [(echo, 'Spam'), (echo, 'Ham')]
for (func, arg) in schedule:
func(arg)
def make(label):
def echo(message):
print(label + ': ' + message)
return echo
F = make('Spam')
F('Eggs')
F('Ham')
def func(a):
b = 'spam'
return b * a
print(func(8))
print(dir(func))
func.handles = 'Bottom-Press'
func.count = 0
print(dir(func))
def func(a: 'spam', b: (1, 10), c: float) -> int:
return a+b+c
print(func.__annotations__)
|
the-stack_0_19299 | # The MIT License (MIT)
# Copyright (c) 2021 Tom J. Sun
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import lcd
import machine
from pmu import axp192
from context import Context
from login import Login
from home import Home
import settings
pmu = axp192()
# Enable power management so that if power button is held down 6 secs,
# it shuts off as expected
pmu.enablePMICSleepMode(True)
ctx = Context()
ctx.display.flash_text(
settings.load('splash', ( 'Krux' ), strip=False),
color=lcd.WHITE,
word_wrap=False,
padding=8
)
while True:
if not Login(ctx).run():
break
if not Home(ctx).run():
break
ctx.display.flash_text(( 'Shutting down..' ))
ctx.clear()
pmu.setEnterSleepMode()
machine.reset()
|
the-stack_0_19302 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import odoo
import odoo.tests
@odoo.tests.tagged('-at_install', 'post_install')
class TestUiCustomizeTheme(odoo.tests.HttpCase):
def test_01_attachment_website_unlink(self):
''' Some ir.attachment needs to be unlinked when a website is unlink,
otherwise some flows will just crash. That's the case when 2 website
have their theme color customized. Removing a website will make its
customized attachment generic, thus having 2 attachments with the
same URL available for other websites, leading to singleton errors
(among other).
But no all attachment should be deleted, eg we don't want to delete
a SO or invoice PDF coming from an ecommerce order.
'''
Website = self.env['website']
Page = self.env['website.page']
Attachment = self.env['ir.attachment']
website_default = Website.browse(1)
website_test = Website.create({'name': 'Website Test'})
# simulate attachment state when editing 2 theme through customize
custom_url = '/TEST/website/static/src/scss/options/colors/user_theme_color_palette.custom.web.assets_common.scss'
scss_attachment = Attachment.create({
'name': custom_url,
'type': 'binary',
'mimetype': 'text/scss',
'datas': '',
'url': custom_url,
'website_id': website_default.id
})
scss_attachment.copy({'website_id': website_test.id})
# simulate PDF from ecommerce order
# Note: it will only have its website_id flag if the website has a domain
# equal to the current URL (fallback or get_current_website())
so_attachment = Attachment.create({
'name': 'SO036.pdf',
'type': 'binary',
'mimetype': 'application/pdf',
'datas': '',
'website_id': website_test.id
})
# avoid sql error on page website_id restrict
Page.search([('website_id', '=', website_test.id)]).unlink()
website_test.unlink()
self.assertEqual(Attachment.search_count([('url', '=', custom_url)]), 1, 'Should not left duplicates when deleting a website')
self.assertTrue(so_attachment.exists(), 'Most attachment should not be deleted')
self.assertFalse(so_attachment.website_id, 'Website should be removed')
@odoo.tests.tagged('-at_install', 'post_install')
class TestUiHtmlEditor(odoo.tests.HttpCase):
def test_html_editor_multiple_templates(self):
Website = self.env['website']
View = self.env['ir.ui.view']
Page = self.env['website.page']
self.generic_view = View.create({
'name': 'Generic',
'type': 'qweb',
'arch': '''
<div>content</div>
''',
'key': 'test.generic_view',
})
self.generic_page = Page.create({
'view_id': self.generic_view.id,
'url': '/generic',
})
generic_page = Website.viewref('test.generic_view')
# Use an empty page layout with oe_structure id for this test
oe_structure_layout = '''
<t name="Generic" t-name="test.generic_view">
<t t-call="website.layout">
<div id="oe_structure_test_ui" class="oe_structure oe_empty"/>
</t>
</t>
'''
generic_page.arch = oe_structure_layout
self.start_tour("/", 'html_editor_multiple_templates', login='admin')
self.assertEqual(View.search_count([('key', '=', 'test.generic_view')]), 2, "homepage view should have been COW'd")
self.assertTrue(generic_page.arch == oe_structure_layout, "Generic homepage view should be untouched")
self.assertEqual(len(generic_page.inherit_children_ids.filtered(lambda v: 'oe_structure' in v.name)), 0, "oe_structure view should have been deleted when aboutus was COW")
specific_page = Website.with_context(website_id=1).viewref('test.generic_view')
self.assertTrue(specific_page.arch != oe_structure_layout, "Specific homepage view should have been changed")
self.assertEqual(len(specific_page.inherit_children_ids.filtered(lambda v: 'oe_structure' in v.name)), 1, "oe_structure view should have been created on the specific tree")
def test_html_editor_scss(self):
self.start_tour("/", 'test_html_editor_scss', login='admin')
@odoo.tests.tagged('-at_install', 'post_install')
class TestUiTranslate(odoo.tests.HttpCase):
def test_admin_tour_rte_translator(self):
fr_BE = self.env.ref('base.lang_fr_BE')
fr_BE.active = True
self.env.ref('website.default_website').language_ids |= fr_BE
self.start_tour("/", 'rte_translator', login='admin', timeout=120)
@odoo.tests.common.tagged('post_install', '-at_install')
class TestUi(odoo.tests.HttpCase):
def test_01_admin_tour_homepage(self):
self.start_tour("/?enable_editor=1", 'homepage', login='admin')
def test_02_restricted_editor(self):
self.restricted_editor = self.env['res.users'].create({
'name': 'Restricted Editor',
'login': 'restricted',
'password': 'restricted',
'groups_id': [(6, 0, [
self.ref('base.group_user'),
self.ref('website.group_website_publisher')
])]
})
self.start_tour("/", 'restricted_editor', login='restricted')
def test_03_backend_dashboard(self):
self.start_tour("/", 'backend_dashboard', login='admin')
def test_04_website_navbar_menu(self):
website = self.env['website'].search([], limit=1)
self.env['website.menu'].create({
'name': 'Test Tour Menu',
'url': '/test-tour-menu',
'parent_id': website.menu_id.id,
'sequence': 0,
'website_id': website.id,
})
self.start_tour("/", 'website_navbar_menu')
def test_05_specific_website_editor(self):
website_default = self.env['website'].search([], limit=1)
new_website = self.env['website'].create({'name': 'New Website'})
website_editor_assets_view = self.env.ref('website.assets_wysiwyg')
self.env['ir.ui.view'].create({
'name': 'Editor Extension',
'type': 'qweb',
'inherit_id': website_editor_assets_view.id,
'website_id': new_website.id,
'arch': """
<xpath expr="." position="inside">
<script type="text/javascript">document.body.dataset.hello = 'world';</script>
</xpath>
""",
})
self.start_tour("/website/force/%s" % website_default.id, "generic_website_editor", login='admin')
self.start_tour("/website/force/%s" % new_website.id, "specific_website_editor", login='admin')
def test_06_public_user_editor(self):
website_default = self.env['website'].search([], limit=1)
website_default.homepage_id.arch = """
<t name="Homepage" t-name="website.homepage">
<t t-call="website.layout">
<textarea class="o_public_user_editor_test_textarea o_wysiwyg_loader"/>
</t>
</t>
"""
self.start_tour("/", "public_user_editor", login=None)
def test_07_snippet_version(self):
website_snippets = self.env.ref('website.snippets')
self.env['ir.ui.view'].create([{
'name': 'Test snip',
'type': 'qweb',
'key': 'website.s_test_snip',
'arch': """
<section class="s_test_snip">
<t t-snippet-call="website.s_share"/>
</section>
""",
}, {
'type': 'qweb',
'inherit_id': website_snippets.id,
'arch': """
<xpath expr="//t[@t-snippet='website.s_parallax']" position="after">
<t t-snippet="website.s_test_snip" t-thumbnail="/website/static/src/img/snippets_thumbs/s_website_form.svg"/>
</xpath>
""",
}])
self.start_tour("/", 'snippet_version', login='admin')
def test_08_website_style_custo(self):
self.start_tour("/", "website_style_edition", login="admin")
|
the-stack_0_19303 | from __future__ import absolute_import
from __future__ import unicode_literals
from collections import namedtuple
from itertools import chain
from couchdbkit.exceptions import DocTypeError, ResourceNotFound
from corehq.apps.app_manager.exceptions import BuildNotFoundException
from corehq.util.python_compatibility import soft_assert_type_text
from corehq.util.quickcache import quickcache
from django.http import Http404
from django.core.cache import cache
from django.utils.translation import ugettext_lazy as _
from corehq.apps.es import AppES
from corehq.apps.es.aggregations import TermsAggregation, NestedAggregation
from dimagi.utils.couch.database import iter_docs
import six
from six.moves import map
AppBuildVersion = namedtuple('AppBuildVersion', ['app_id', 'build_id', 'version', 'comment'])
@quickcache(['domain'], timeout=1 * 60 * 60)
def domain_has_apps(domain):
from .models import Application
results = Application.get_db().view('app_manager/applications_brief',
startkey=[domain],
endkey=[domain, {}],
include_docs=False,
limit=1,
).all()
return len(results) > 0
def get_latest_released_app_doc(domain, app_id):
"""Get the latest starred build for the application"""
from .models import Application
key = ['^ReleasedApplications', domain, app_id]
app = Application.get_db().view(
'app_manager/applications',
startkey=key + [{}],
endkey=key,
descending=True,
include_docs=True,
limit=1,
).first()
return app['doc'] if app else None
def get_latest_released_app(domain, app_id):
app = get_latest_released_app_doc(domain, app_id)
if app:
return wrap_app(app)
return None
def get_latest_released_build_id(domain, app_id):
"""Get the latest starred build id for an application"""
app = _get_latest_released_build_view_result(domain, app_id)
return app['id'] if app else None
def get_latest_released_app_version(domain, app_id):
app = _get_latest_released_build_view_result(domain, app_id)
return app['key'][3] if app else None
def _get_latest_released_build_view_result(domain, app_id):
from .models import Application
key = ['^ReleasedApplications', domain, app_id]
return Application.get_db().view(
'app_manager/applications',
startkey=key + [{}],
endkey=key,
descending=True,
include_docs=False,
limit=1,
).first()
def _get_latest_build_view(domain, app_id, include_docs):
from .models import Application
return Application.get_db().view(
'app_manager/saved_app',
startkey=[domain, app_id, {}],
endkey=[domain, app_id],
descending=True,
include_docs=include_docs,
limit=1,
).first()
def get_latest_build_doc(domain, app_id):
"""Get the latest saved build of the application, regardless of star."""
res = _get_latest_build_view(domain, app_id, include_docs=True)
return res['doc'] if res else None
def get_latest_build_id(domain, app_id):
"""Get id of the latest build of the application, regardless of star."""
res = _get_latest_build_view(domain, app_id, include_docs=False)
return res['id'] if res else None
def get_latest_build_version(domain, app_id):
"""Get id of the latest build of the application, regardless of star."""
res = _get_latest_build_view(domain, app_id, include_docs=False)
return res['value']['version'] if res else None
def get_build_by_version(domain, app_id, version, return_doc=False):
from .models import Application
kwargs = {}
if version:
version = int(version)
if return_doc:
kwargs = {'include_docs': True, 'reduce': False}
res = Application.get_db().view(
'app_manager/saved_app',
key=[domain, app_id, version],
limit=1,
**kwargs
).first()
return res['doc'] if return_doc and res else res
def get_build_doc_by_version(domain, app_id, version):
return get_build_by_version(domain, app_id, version, return_doc=True)
def wrap_app(app_doc, wrap_cls=None):
"""Will raise DocTypeError if it can't figure out the correct class"""
from corehq.apps.app_manager.util import get_correct_app_class
cls = wrap_cls or get_correct_app_class(app_doc)
return cls.wrap(app_doc)
def get_current_app_version(domain, app_id):
from .models import Application
result = Application.get_db().view(
'app_manager/applications_brief',
key=[domain, app_id],
).one(except_all=True)
return result['value']['version']
def get_current_app_doc(domain, app_id):
from .models import Application
app = Application.get_db().get(app_id)
if app.get('domain', None) != domain:
raise ResourceNotFound()
return app
def get_current_app(domain, app_id):
return wrap_app(get_current_app_doc(domain, app_id))
def get_app_cached(domain, app_id):
"""Cached version of ``get_app`` for use in phone
api calls where most requests will be for app builds
which are read-only.
This only caches app builds."""
key = 'app_build_cache_{}_{}'.format(domain, app_id)
app = cache.get(key)
if not app:
app = get_app(domain, app_id)
if app.copy_of:
cache.set(key, app, 24 * 3600)
return app
def get_app(domain, app_id, wrap_cls=None, latest=False, target=None):
"""
Utility for getting an app, making sure it's in the domain specified, and
wrapping it in the right class (Application or RemoteApp).
'target' is only used if latest=True. It should be set to one of:
'build', 'release', or 'save'
Here are some common usages and the simpler dbaccessor alternatives:
current_app = get_app(domain, app_id)
= get_current_app_doc(domain, app_id)
latest_released_build = get_app(domain, app_id, latest=True)
= get_latest_released_app_doc(domain, app_id)
latest_build = get_app(domain, app_id, latest=True, target='build')
= get_latest_build_doc(domain, app_id)
Use wrap_app() if you need the wrapped object.
"""
from .models import Application
if not app_id:
raise Http404()
try:
app = Application.get_db().get(app_id)
except ResourceNotFound:
raise Http404()
if latest:
if not domain:
domain = app['domain']
if app.get('copy_of'):
# The id passed in corresponds to a build
app_id = app.get('copy_of')
if target == 'build':
app = get_latest_build_doc(domain, app_id) or app
elif target == 'save':
# If the app_id passed in was the working copy, just use that app.
# If it's a build, get the working copy.
if app.get('copy_of'):
app = get_current_app_doc(domain, app_id)
else:
app = get_latest_released_app_doc(domain, app_id) or app
if domain and app['domain'] != domain:
raise Http404()
try:
return wrap_app(app, wrap_cls=wrap_cls)
except DocTypeError:
raise Http404()
def get_apps_in_domain(domain, include_remote=True):
from .models import Application
from corehq.apps.app_manager.util import get_correct_app_class
docs = [row['doc'] for row in Application.get_db().view(
'app_manager/applications',
startkey=[domain, None],
endkey=[domain, None, {}],
include_docs=True
)]
apps = [get_correct_app_class(doc).wrap(doc) for doc in docs]
if not include_remote:
apps = [app for app in apps if not app.is_remote_app()]
return apps
def get_brief_apps_in_domain(domain, include_remote=True):
from .models import Application
from corehq.apps.app_manager.util import get_correct_app_class
docs = [row['value'] for row in Application.get_db().view(
'app_manager/applications_brief',
startkey=[domain],
endkey=[domain, {}]
)]
apps = [get_correct_app_class(doc).wrap(doc) for doc in docs]
if not include_remote:
apps = [app for app in apps if not app.is_remote_app()]
return sorted(apps, key=lambda app: app.name)
def get_brief_app(domain, app_id):
from .models import Application
from corehq.apps.app_manager.util import get_correct_app_class
result = Application.get_db().view(
'app_manager/applications_brief',
key=[domain, app_id],
).one(except_all=True)
doc = result['value']
return get_correct_app_class(doc).wrap(doc)
def get_app_ids_in_domain(domain):
from .models import Application
return [row['id'] for row in Application.get_db().view(
'app_manager/applications',
startkey=[domain, None],
endkey=[domain, None, {}]
)]
def get_apps_by_id(domain, app_ids):
from .models import Application
from corehq.apps.app_manager.util import get_correct_app_class
if isinstance(app_ids, six.string_types):
soft_assert_type_text(app_ids)
app_ids = [app_ids]
docs = iter_docs(Application.get_db(), app_ids)
return [get_correct_app_class(doc).wrap(doc) for doc in docs]
def get_built_app_ids(domain):
"""
Returns the app ids of all apps in the domain that have at least one build.
"""
from .models import Application
result = Application.get_db().view(
'app_manager/saved_app',
startkey=[domain],
endkey=[domain, {}],
include_docs=False,
)
app_ids = [data.get('value', {}).get('copy_of') for data in result]
app_ids = list(set(app_ids))
return [app_id for app_id in app_ids if app_id]
def get_built_app_ids_for_app_id(domain, app_id, version=None):
"""
Returns all the built apps for an application id. If version is specified returns all apps after that
version.
"""
from .models import Application
key = [domain, app_id]
skip = 1 if version else 0
results = Application.get_db().view(
'app_manager/saved_app',
startkey=key + [version],
endkey=key + [{}],
reduce=False,
include_docs=False,
skip=skip
).all()
return [result['id'] for result in results]
def get_built_app_ids_with_submissions_for_app_id(domain, app_id, version=None):
"""
Returns all the built apps for an application id that have submissions.
If version is specified returns all apps after that version.
"""
from .models import Application
key = [domain, app_id]
skip = 1 if version else 0
results = Application.get_db().view(
'apps_with_submissions/view',
startkey=key + [version],
endkey=key + [{}],
reduce=False,
include_docs=False,
skip=skip
).all()
return [result['id'] for result in results]
def get_built_app_ids_with_submissions_for_app_ids_and_versions(domain, app_ids, app_ids_and_versions=None):
"""
Returns all the built app_ids for a domain that has submissions.
If version is specified returns all apps after that version.
:domain:
:app_ids_and_versions: A dictionary mapping an app_id to build version
"""
app_ids_and_versions = app_ids_and_versions or {}
results = []
for app_id in app_ids:
results.extend(
get_built_app_ids_with_submissions_for_app_id(domain, app_id, app_ids_and_versions.get(app_id))
)
return results
def get_auto_generated_built_apps(domain, app_id):
"""
Returns all the built apps that were automatically generated for an application id.
"""
from .models import Application
results = Application.get_db().view(
'saved_apps_auto_generated/view',
startkey=[domain, app_id],
endkey=[domain, app_id, {}],
reduce=False,
include_docs=False,
).all()
return [doc['value'] for doc in results]
def get_latest_app_ids_and_versions(domain, app_id=None):
"""
Returns all the latest app_ids and versions in a dictionary.
:param domain: The domain to get the app from
:param app_id: The app_id to get the latest version from. If not specified gets latest versions of all
apps in the domain
:returns: {app_id: latest_version}
"""
from .models import Application
key = [domain]
results = Application.get_db().view(
'app_manager/applications_brief',
startkey=key + [{}],
endkey=key,
descending=True,
reduce=False,
include_docs=True,
).all()
latest_ids_and_versions = {}
if app_id:
results = [r for r in results if r['value']['_id'] == app_id]
for result in results:
app_id = result['value']['_id']
version = result['value']['version']
# Since we have sorted, we know the first instance is the latest version
if app_id not in latest_ids_and_versions:
latest_ids_and_versions[app_id] = version
return latest_ids_and_versions
def get_all_apps(domain):
"""
Returns an iterable over all the apps ever built and current Applications.
Used for subscription management when apps use subscription only features
that shouldn't be present in built apps as well as app definitions.
"""
def _saved_apps():
from .models import Application
from corehq.apps.app_manager.util import get_correct_app_class
saved_app_ids = Application.get_db().view(
'app_manager/saved_app',
startkey=[domain],
endkey=[domain, {}],
include_docs=False,
wrapper=lambda row: row['id'],
)
correct_wrap = lambda app_doc: get_correct_app_class(app_doc).wrap(app_doc)
return map(correct_wrap, iter_docs(Application.get_db(), saved_app_ids))
return chain(get_apps_in_domain(domain), _saved_apps())
def get_all_app_ids(domain):
"""
Returns a list of all the app_ids ever built and current Applications.
"""
from .models import Application
results = Application.get_db().view(
'app_manager/saved_app',
startkey=[domain],
endkey=[domain, {}],
include_docs=False,
).all()
return [result['id'] for result in results]
def get_all_built_app_ids_and_versions(domain, app_id=None):
"""
Returns a list of all the app_ids ever built and their version.
[[AppBuildVersion(app_id, build_id, version, comment)], ...]
If app_id is provided, limit to bulds for that app.
"""
return [
AppBuildVersion(
app_id=result['key'][1],
build_id=result['id'],
version=result['key'][2],
comment=result['value']['build_comment'],
)
for result in get_all_built_app_results(domain, app_id)
]
def get_all_built_app_results(domain, app_id=None):
from .models import Application
startkey = [domain]
endkey = [domain, {}]
if app_id:
startkey = [domain, app_id]
endkey = [domain, app_id, {}]
return Application.get_db().view(
'app_manager/saved_app',
startkey=startkey,
endkey=endkey,
include_docs=False,
).all()
def get_available_versions_for_app(domain, app_id):
from .models import Application
result = Application.get_db().view('app_manager/saved_app',
startkey=[domain, app_id, {}],
endkey=[domain, app_id],
descending=True)
return [doc['value']['version'] for doc in result]
def get_version_build_id(domain, app_id, version):
build = get_build_by_version(domain, app_id, version)
if not build:
raise BuildNotFoundException(_("Build for version requested not found"))
return build['id']
def get_case_types_from_apps(domain):
"""
Get the case types of modules in applications in the domain.
:returns: A set of case_types
"""
case_types_agg = NestedAggregation('modules', 'modules').aggregation(
TermsAggregation('case_types', 'modules.case_type.exact'))
q = (AppES()
.domain(domain)
.is_build(False)
.size(0)
.aggregation(case_types_agg))
return set(q.run().aggregations.modules.case_types.keys) - {''}
def get_case_sharing_apps_in_domain(domain, exclude_app_id=None):
apps = get_apps_in_domain(domain, include_remote=False)
return [a for a in apps if a.case_sharing and exclude_app_id != a.id]
|
the-stack_0_19304 | from django.contrib.contenttypes.models import ContentType
from rest_framework.test import APITestCase
from blitz_api.factories import UserFactory
from ..models import Package, Coupon
class CouponTests(APITestCase):
@classmethod
def setUpClass(cls):
super(CouponTests, cls).setUpClass()
cls.package_type = ContentType.objects.get_for_model(Package)
cls.user = UserFactory()
cls.coupon = Coupon.objects.create(
value=13,
code="ASD1234E",
start_time="2019-01-06T15:11:05-05:00",
end_time="2020-01-06T15:11:06-05:00",
max_use=100,
max_use_per_user=2,
details="Any package for fjeanneau clients",
owner=cls.user,
)
cls.coupon.applicable_product_types.add(cls.package_type)
cls.coupon.save()
def test_create(self):
"""
Ensure that we can create a coupon.
"""
coupon = Coupon.objects.create(
value=13,
code="12345678",
start_time="2019-01-06T15:11:05-05:00",
end_time="2020-01-06T15:11:06-05:00",
max_use=100,
max_use_per_user=2,
details="Any package for fjeanneau clients",
owner=self.user,
)
self.assertEqual(str(coupon), "12345678")
|
the-stack_0_19305 | # !/usr/bin/env python
# title :EYEDIAP_utils.py
# description :Script with utility methods to read EYEDIAP files. Adapted from scripts provided by EYEDIAP.
# author :Cristina Palmero
# date :30092018
# version :2.0
# usage : -
# notes : -
# python_version :3.5.5
# ==============================================================================
import numpy as np
import math
import cv2 as cv
import tensorflow as tf
from keras import backend as K
from itertools import chain
def read_screen_track_file(screen_track_file):
"""
Read the ground truth values, i.e. the 3D position of the screen target
"""
screen_track_vals = np.loadtxt(screen_track_file, skiprows=1, delimiter=';')[:, -3:]
return screen_track_vals
def read_ball_track_file(ball_track_file):
"""
Read the ground truth values, i.e. the 3D position of the floating target
"""
ball_track_vals = np.loadtxt(ball_track_file, skiprows=1, delimiter=';')[:, -3:]
return ball_track_vals
def read_head_track_file(head_track_file):
"""
Read the head pose parameters: the frame-by-frame rotation and translation
"""
head_track_vals = np.loadtxt(head_track_file, skiprows=1, delimiter=';')[:, 1:]
R = head_track_vals[:, :9].reshape(-1, 3, 3)
T = head_track_vals[:, 9:12]
return R, T
def readCalibrationFile(calibration_file):
"""
Reads the calibration parameters
"""
cal = {}
fh = open(calibration_file, 'r')
# Read the [resolution] section
fh.readline().strip()
cal['size'] = [int(val) for val in fh.readline().strip().split(';')]
cal['size'] = cal['size'][0], cal['size'][1]
# Read the [intrinsics] section
fh.readline().strip()
vals = []
for i in range(3):
vals.append([float(val) for val in fh.readline().strip().split(';')])
cal['intrinsics'] = np.array(vals).reshape(3, 3)
# Read the [R] section
fh.readline().strip()
vals = []
for i in range(3):
vals.append([float(val) for val in fh.readline().strip().split(';')])
cal['R'] = np.array(vals).reshape(3, 3)
# Read the [T] section
fh.readline().strip()
vals = []
for i in range(3):
vals.append([float(val) for val in fh.readline().strip().split(';')])
cal['T'] = np.array(vals).reshape(3, 1)
fh.close()
return cal
|
the-stack_0_19306 | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Network ID for integration tests
import swagger_client
NETWORK_ID = 'integ_net'
# Gateway ID for integration tests
GATEWAY_ID = 'integ_gate'
DEFAULT_NETWORK_DNSD_CONFIG = swagger_client.NetworkDnsConfig(
enable_caching=False,
records=[],
)
DEFAULT_NETWORK_CELLULAR_CONFIG = swagger_client.NetworkCellularConfigs(
ran=swagger_client.NetworkRanConfigs(
earfcndl=44590,
bandwidth_mhz=20,
subframe_assignment=2,
special_subframe_pattern=7,
),
epc=swagger_client.NetworkEpcConfigs(
mcc='001',
mnc='01',
tac=1,
lte_auth_op='EREREREREREREREREREREQ==',
lte_auth_amf='gAA=',
default_rule_id='default_rule_1',
relay_enabled=True,
),
)
DEFAULT_GATEWAY_CONFIG = swagger_client.MagmadGatewayConfig(
checkin_interval=10,
checkin_timeout=15,
autoupgrade_enabled=False,
autoupgrade_poll_interval=300,
tier='default',
)
DEFAULT_GATEWAY_CELLULAR_CONFIG = swagger_client.GatewayCellularConfigs(
ran=swagger_client.GatewayRanConfigs(
pci=260,
transmit_enabled=True,
),
epc=swagger_client.GatewayEpcConfigs(
nat_enabled=True,
ip_block='192.168.128.0/24',
),
)
|
the-stack_0_19308 | import aiohttp
from aiohttp.web import HTTPException
import traceback
from automat.config import config
from automat.util.logutil import LoggingUtil
logger = LoggingUtil.init_logging(__name__,
config.get('logging_level'),
config.get('logging_format')
)
async def async_get_json(url, headers={}, timeout=5*6):
"""
Gets json response from url asyncronously.
"""
client_timeout = aiohttp.ClientTimeout(connect=timeout)
async with aiohttp.ClientSession(timeout=client_timeout) as session:
try:
async with session.get(url, headers=headers) as response:
if response.status != 200:
error = f"Failed to get response from {url}. Status code {response.status}"
logger.error(error)
return {
'error': error
}, response.status
return await response.json(), 200
except HTTPException as e:
logger.error(f'error contacting {url} -- {e}')
logger.debug(traceback.print_exc())
return {
'error': f"Backend server at {url} caused {e}"
}, 500
except Exception as e:
logger.error(f"Failed to get response from {url}.")
logger.debug(traceback.print_exc())
return {
'error': f'Internal server error {e}'
}, 500
async def async_post_json(url, headers={}, body='', timeout=5*6):
client_timeout = aiohttp.ClientTimeout(connect=timeout)
async with aiohttp.ClientSession(timeout=client_timeout) as session:
try:
async with session.post(url, data=body, headers=headers) as response:
if response.status != 200:
try:
content = await response.json()
except:
content = await response.content.read()
content = {
'error': content.decode('utf-8')
}
logger.error(f'{url} returned {response.status}. {content}')
return content, response.status
return await response.json(), 200
except Exception as e:
logger.error(f"Failed to get response from {url}.")
return {
'error': f"Server returned {e}"
}, 500
async def async_get_text(url,headers={}):
"""
Gets text response from url asyncronously
"""
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=headers) as response:
if response.status != 200:
logger.error(f'Failed to get response from {url}, returned status : {response.status}')
return ''
return await response.text()
async def async_get_response(url, headers={}, timeout=5*60):
"""
Returns the whole reponse object
"""
client_timeout = aiohttp.ClientTimeout(connect=timeout)
async with aiohttp.ClientSession(timeout=client_timeout) as session:
async with session.get(url, headers=headers) as response:
try:
json = await response.json()
except:
json = {}
try:
text = await response.text()
except:
text = ''
try:
raw = await response.read()
except:
raw = ''
return {
'headers' : response.headers,
'json': json,
'text': text,
'raw': raw,
'status': response.status
}
|
the-stack_0_19310 | from neutronclient.v2_0 import client as neutron_client
from cloudshell.cp.core.models import VmDetailsProperty,VmDetailsNetworkInterface,VmDetailsData
class VmDetailsProvider(object):
def __init__(self, instance_service):
"""
:param NovaInstanceService instance_service:
"""
self.instance_service = instance_service
def create(self, instance, openstack_session, management_vlan_id, logger):
"""
:param logger:
:param management_vlan_id:
:param instance:
:param keystoneauth1.session.Session openstack_session:
:return: cloudshell.cp.core.models.VmDetanoilsData
"""
# must be reloaded to acquire a floating ip
instance = self.instance_service.get_instance_from_instance_id(openstack_session=openstack_session,
instance_id=instance.id,
logger=logger)
logger.info("Reloading vm with id: {0}".format(instance.id))
vm_instance_data = self._get_vm_instance_data(instance, openstack_session)
vm_network_data = self._get_vm_network_data(instance, openstack_session, management_vlan_id)
return VmDetailsData(vmInstanceData=vm_instance_data,vmNetworkData=vm_network_data)
def _get_vm_instance_data(self, instance, openstack_session):
image = self.instance_service.get_image(openstack_session, instance.image['id'])
flavor = self.instance_service.get_flavor(openstack_session, instance.flavor['id'])
data = [
VmDetailsProperty(key='Image',value=image.name),
VmDetailsProperty(key='Flavour',value=flavor.name),
VmDetailsProperty(key='Availability Zone', value=instance._info['OS-EXT-AZ:availability_zone']),
VmDetailsProperty(key='CPU', value='%s vCPU' % flavor.vcpus),
VmDetailsProperty(key='Memory', value='%s GB' % flavor.ram),
VmDetailsProperty(key='Disk Size', value='%s GB' % flavor.disk)
]
return data
@staticmethod
def _get_vm_network_data(instance, openstack_session, management_vlan_id):
network_interfaces = []
client = neutron_client.Client(session=openstack_session, insecure=True)
list_networks = client.list_networks()
networks = list_networks['networks']
for network_name in instance.networks:
interfaces = instance.interface_list()
net = filter(lambda x: x['name'] == network_name, networks)[0]
network_id = net['id']
segmentation_id = net['provider:segmentation_id']
interface = filter(lambda x: x.net_id == network_id, interfaces)[0]
interface_mac = interface.to_dict().get('mac_addr')
is_primary_and_predefined = network_id == management_vlan_id
private_ip = instance.networks[network_name][0]
network_data =[VmDetailsProperty(key="IP",value= private_ip),
VmDetailsProperty(key="MAC Address", value=interface_mac),
VmDetailsProperty(key="VLAN Name", value=network_name,hidden=True)]
public_ip = ''
addresses = instance.to_dict().get('addresses')
if addresses:
for key, val in addresses.iteritems():
if key == network_name:
floating_ip = filter(lambda x: x['OS-EXT-IPS:type'] == 'floating', val)
if floating_ip:
public_ip = floating_ip[0]['addr']
network_data.append(VmDetailsProperty(key="Floating IP",value= public_ip))
current_interface = VmDetailsNetworkInterface(interfaceId=interface_mac, networkId=segmentation_id,
isPrimary=is_primary_and_predefined,
isPredefined=is_primary_and_predefined, networkData=network_data,privateIpAddress=private_ip,publicIpAddress=public_ip)
network_interfaces.append(current_interface)
return sorted(network_interfaces, key=lambda x: x.networkId, reverse=False)
|
the-stack_0_19312 | from CronMatch import cron_match
from datetime import datetime
from time import sleep
from threading import Thread
from threading import Lock
from threading import Event
class Scheduler:
_running = False
_entries = []
_last_tick = None
_tick_thread = None
_lock = None
fn = None
_exit = None
def __init__(self):
self._lock = Lock()
self.start()
def load_cron(self, cron, fn):
with self._lock:
entries = cron.split('\n')
for e in entries:
self._entries.append((e, fn))
def stop(self):
with self._lock:
if self._running:
self._exit.set()
self._running = False
self._tick_thread = None
def start(self):
with self._lock:
if self._running is False:
self._tick_thread = Thread(target=self._tick)
self._exit = Event()
self._running = True
self._tick_thread.start()
def _check_cron(self):
with self._lock:
for entry in self._entries:
cron = entry[0]
fn = entry[1]
match = cron_match(cron)
if match is not False:
fn(match)
def _trim_datetime(self, dt = None):
if dt is None:
dt = datetime.now()
return datetime(dt.year, dt.month, dt.day, dt.hour, dt.minute, 0, 0)
def _tick(self):
while self._running:
t = datetime.utcnow()
rem = t.second + (t.microsecond / 1000000)
self._exit.wait(60.001 - rem)
# Make extra sure that we don't continue twice in the same minute
while self._trim_datetime() == self._last_tick:
self._exit.wait(.1)
if self._running is False:
break
self._last_tick = self._trim_datetime()
self._check_cron()
self._running = False
self._exit = None |
the-stack_0_19315 | #!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'orbit_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
# Minimum number of messages for translation to be considered at all
MIN_NUM_MESSAGES = 10
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f', '-a']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
try:
specifiers.append(s[percent+1])
except:
print('Failed to get specifier')
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors, numerus):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
#assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation for '%s': '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
else:
if source_f != translation_f:
if numerus and source_f == (set(), ['n']) and translation_f == (set(), []) and translation.find('%') == -1:
# Allow numerus translations to omit %n specifier (usually when it only has one possible value)
return True
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors, numerus)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# check if document is (virtually) empty, and remove it if so
num_messages = 0
for context in root.findall('context'):
for message in context.findall('message'):
num_messages += 1
if num_messages < MIN_NUM_MESSAGES:
print('Removing %s, as it contains only %i messages' % (filepath, num_messages))
continue
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
# fetch_all_translations()
postprocess_translations()
|
the-stack_0_19316 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ..datasource import tensor as astensor
from .core import TensorFFTMixin, validate_fft, TensorHermitianFFT
class TensorIHFFT(TensorHermitianFFT, TensorFFTMixin):
_op_type_ = OperandDef.IHFFT
def __init__(self, n=None, axis=-1, norm=None, dtype=None, **kw):
super().__init__(_n=n, _axis=axis, _norm=norm, _dtype=dtype, **kw)
@classmethod
def _get_shape(cls, op, shape):
new_shape = list(shape)
shape = op.n if op.n is not None else shape[op.axis]
if shape % 2 == 0:
shape = (shape // 2) + 1
else:
shape = (shape + 1) // 2
new_shape[op.axis] = shape
return tuple(new_shape)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal that has Hermitian symmetry.
Parameters
----------
a : array_like
Input tensor.
n : int, optional
Length of the inverse FFT, the number of points along
transformation axis in the input to use. If `n` is smaller than
the length of the input, the input is cropped. If it is larger,
the input is padded with zeros. If `n` is not given, the length of
the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex Tensor
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is ``n//2 + 1``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time
domain and is real in the frequency domain. So here it's `hfft` for
which you must supply the length of the result if it is to be odd:
* even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error,
* odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error.
Examples
--------
>>> import mars.tensor as mt
>>> spectrum = mt.array([ 15, -4, 0, -1, 0, -4])
>>> mt.fft.ifft(spectrum).execute()
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> mt.fft.ihfft(spectrum).execute()
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
a = astensor(a)
validate_fft(a, axis=axis, norm=norm)
op = TensorIHFFT(n=n, axis=axis, norm=norm, dtype=np.dtype(np.complex_))
return op(a)
|
the-stack_0_19317 | #!/usr/bin/env python3
from cereal import car
from selfdrive.config import Conversions as CV
from selfdrive.controls.lib.drive_helpers import EventTypes as ET, create_event
from selfdrive.controls.lib.vehicle_model import VehicleModel
from selfdrive.car.toyota.carstate import CarState, get_can_parser, get_cam_can_parser
from selfdrive.car.toyota.values import Ecu, ECU_FINGERPRINT, CAR, NO_STOP_TIMER_CAR, TSS2_CAR, FINGERPRINTS
from selfdrive.car import STD_CARGO_KG, scale_rot_inertia, scale_tire_stiffness, is_ecu_disconnected, gen_empty_fingerprint
from selfdrive.swaglog import cloudlog
from selfdrive.car.interfaces import CarInterfaceBase
ButtonType = car.CarState.ButtonEvent.Type
GearShifter = car.CarState.GearShifter
class CarInterface(CarInterfaceBase):
def __init__(self, CP, CarController):
self.CP = CP
self.VM = VehicleModel(CP)
self.frame = 0
self.gas_pressed_prev = False
self.brake_pressed_prev = False
self.cruise_enabled_prev = False
# *** init the major players ***
self.CS = CarState(CP)
self.cp = get_can_parser(CP)
self.cp_cam = get_cam_can_parser(CP)
self.CC = None
if CarController is not None:
self.CC = CarController(self.cp.dbc_name, CP.carFingerprint, CP.enableCamera, CP.enableDsu, CP.enableApgs)
@staticmethod
def compute_gb(accel, speed):
return float(accel) / 3.0
@staticmethod
def get_params(candidate, fingerprint=gen_empty_fingerprint(), has_relay=False, car_fw=[]):
ret = car.CarParams.new_message()
ret.carName = "toyota"
ret.carFingerprint = candidate
ret.isPandaBlack = has_relay
ret.safetyModel = car.CarParams.SafetyModel.toyota
ret.enableCruise = True
ret.steerActuatorDelay = 0.12 # Default delay, Prius has larger delay
ret.steerLimitTimer = 0.4
if candidate not in [CAR.PRIUS, CAR.RAV4, CAR.RAV4H]: # These cars use LQR/INDI
ret.lateralTuning.init('pid')
ret.lateralTuning.pid.kiBP, ret.lateralTuning.pid.kpBP = [[0.], [0.]]
if candidate == CAR.PRIUS:
stop_and_go = True
ret.safetyParam = 66 # see conversion factor for STEER_TORQUE_EPS in dbc file
ret.wheelbase = 2.70
ret.steerRatio = 13.4 # unknown end-to-end spec
tire_stiffness_factor = 0.6371 # hand-tune
ret.mass = 3375. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.init('indi')
ret.lateralTuning.indi.innerLoopGain = 4.0
ret.lateralTuning.indi.outerLoopGain = 3.0
ret.lateralTuning.indi.timeConstant = 1.0
ret.lateralTuning.indi.actuatorEffectiveness = 1.0
# TODO: Determine if this is better than INDI
# ret.lateralTuning.init('lqr')
# ret.lateralTuning.lqr.scale = 1500.0
# ret.lateralTuning.lqr.ki = 0.01
# ret.lateralTuning.lqr.a = [0., 1., -0.22619643, 1.21822268]
# ret.lateralTuning.lqr.b = [-1.92006585e-04, 3.95603032e-05]
# ret.lateralTuning.lqr.c = [1., 0.]
# ret.lateralTuning.lqr.k = [-110.73572306, 451.22718255]
# ret.lateralTuning.lqr.l = [0.03233671, 0.03185757]
# ret.lateralTuning.lqr.dcGain = 0.002237852961363602
ret.steerActuatorDelay = 0.5
elif candidate in [CAR.RAV4, CAR.RAV4H]:
stop_and_go = True if (candidate in CAR.RAV4H) else False
ret.safetyParam = 73
ret.wheelbase = 2.65
ret.steerRatio = 16.88 # 14.5 is spec end-to-end
tire_stiffness_factor = 0.5533
ret.mass = 3650. * CV.LB_TO_KG + STD_CARGO_KG # mean between normal and hybrid
ret.lateralTuning.init('lqr')
ret.lateralTuning.lqr.scale = 1500.0
ret.lateralTuning.lqr.ki = 0.05
ret.lateralTuning.lqr.a = [0., 1., -0.22619643, 1.21822268]
ret.lateralTuning.lqr.b = [-1.92006585e-04, 3.95603032e-05]
ret.lateralTuning.lqr.c = [1., 0.]
ret.lateralTuning.lqr.k = [-110.73572306, 451.22718255]
ret.lateralTuning.lqr.l = [0.3233671, 0.3185757]
ret.lateralTuning.lqr.dcGain = 0.002237852961363602
elif candidate == CAR.COROLLA:
stop_and_go = False
ret.safetyParam = 100
ret.wheelbase = 2.70
ret.steerRatio = 18.27
tire_stiffness_factor = 0.444 # not optimized yet
ret.mass = 2860. * CV.LB_TO_KG + STD_CARGO_KG # mean between normal and hybrid
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.2], [0.05]]
ret.lateralTuning.pid.kf = 0.00003 # full torque for 20 deg at 80mph means 0.00007818594
elif candidate == CAR.LEXUS_RX:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.79
ret.steerRatio = 14.8
tire_stiffness_factor = 0.5533
ret.mass = 4387. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.05]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate == CAR.LEXUS_RXH:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.79
ret.steerRatio = 16. # 14.8 is spec end-to-end
tire_stiffness_factor = 0.444 # not optimized yet
ret.mass = 4481. * CV.LB_TO_KG + STD_CARGO_KG # mean between min and max
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00006 # full torque for 10 deg at 80mph means 0.00007818594
elif candidate == CAR.LEXUS_RX_TSS2:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.79
ret.steerRatio = 14.8
tire_stiffness_factor = 0.5533 # not optimized yet
ret.mass = 4387. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate in [CAR.CHR, CAR.CHRH]:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.63906
ret.steerRatio = 13.6
tire_stiffness_factor = 0.7933
ret.mass = 3300. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.723], [0.0428]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate in [CAR.CAMRY, CAR.CAMRYH]:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.82448
ret.steerRatio = 13.7
tire_stiffness_factor = 0.7933
ret.mass = 3400. * CV.LB_TO_KG + STD_CARGO_KG #mean between normal and hybrid
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate == CAR.HIGHLANDER_TSS2:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.84988 # 112.2 in = 2.84988 m
ret.steerRatio = 16.0
tire_stiffness_factor = 0.8
ret.mass = 4700. * CV.LB_TO_KG + STD_CARGO_KG # 4260 + 4-5 people
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.18], [0.015]] # community tuning
ret.lateralTuning.pid.kf = 0.00012 # community tuning
elif candidate in [CAR.HIGHLANDER, CAR.HIGHLANDERH]:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.78
ret.steerRatio = 16.0
tire_stiffness_factor = 0.8
ret.mass = 4607. * CV.LB_TO_KG + STD_CARGO_KG #mean between normal and hybrid limited
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.18], [0.015]] # community tuning
ret.lateralTuning.pid.kf = 0.00012 # community tuning
elif candidate == CAR.AVALON:
stop_and_go = False
ret.safetyParam = 73
ret.wheelbase = 2.82
ret.steerRatio = 14.8 #Found at https://pressroom.toyota.com/releases/2016+avalon+product+specs.download
tire_stiffness_factor = 0.7983
ret.mass = 3505. * CV.LB_TO_KG + STD_CARGO_KG # mean between normal and hybrid
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.17], [0.03]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate == CAR.RAV4_TSS2:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.68986
ret.steerRatio = 14.3
tire_stiffness_factor = 0.7933
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.mass = 3370. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate == CAR.RAV4H_TSS2:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.68986
ret.steerRatio = 14.3
tire_stiffness_factor = 0.7933
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.mass = 3800. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate == CAR.PRIUS_2019:
stop_and_go = True
ret.safetyParam = 66
ret.wheelbase = 2.70
ret.steerRatio = 15.74 # unknown end-to-end spec
tire_stiffness_factor = 0.6371 # hand-tune
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.mass = 3045. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate == CAR.PRIUS_2019:
stop_and_go = True
ret.safetyParam = 66
ret.wheelbase = 2.70
ret.steerRatio = 15.74 # unknown end-to-end spec
tire_stiffness_factor = 0.6371 # hand-tune
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.mass = 3045. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate in [CAR.COROLLA_TSS2, CAR.COROLLAH_TSS2]:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.63906
ret.steerRatio = 13.9
tire_stiffness_factor = 0.444 # not optimized yet
ret.mass = 3060. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate in [CAR.LEXUS_ES_TSS2, CAR.LEXUS_ESH_TSS2]:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.8702
ret.steerRatio = 16.0 # not optimized
tire_stiffness_factor = 0.444 # not optimized yet
ret.mass = 3704. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate == CAR.SIENNA:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 3.03
ret.steerRatio = 16.0
tire_stiffness_factor = 0.444
ret.mass = 4590. * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.3], [0.05]]
ret.lateralTuning.pid.kf = 0.00007818594
elif candidate == CAR.LEXUS_IS:
stop_and_go = False
ret.safetyParam = 77
ret.wheelbase = 2.79908
ret.steerRatio = 13.3
tire_stiffness_factor = 0.444
ret.mass = 3736.8 * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.3], [0.05]]
ret.lateralTuning.pid.kf = 0.00006
elif candidate == CAR.LEXUS_CTH:
stop_and_go = True
ret.safetyParam = 100
ret.wheelbase = 2.60
ret.steerRatio = 18.6
tire_stiffness_factor = 0.517
ret.mass = 3108 * CV.LB_TO_KG + STD_CARGO_KG # mean between min and max
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.3], [0.05]]
ret.lateralTuning.pid.kf = 0.00007
elif candidate == CAR.LEXUS_NXH:
stop_and_go = True
ret.safetyParam = 73
ret.wheelbase = 2.66
ret.steerRatio = 14.7
tire_stiffness_factor = 0.444 # not optimized yet
ret.mass = 4070 * CV.LB_TO_KG + STD_CARGO_KG
ret.lateralTuning.pid.kpV, ret.lateralTuning.pid.kiV = [[0.6], [0.1]]
ret.lateralTuning.pid.kf = 0.00006
ret.steerRateCost = 1.
ret.centerToFront = ret.wheelbase * 0.44
# TODO: get actual value, for now starting with reasonable value for
# civic and scaling by mass and wheelbase
ret.rotationalInertia = scale_rot_inertia(ret.mass, ret.wheelbase)
# TODO: start from empirically derived lateral slip stiffness for the civic and scale by
# mass and CG position, so all cars will have approximately similar dyn behaviors
ret.tireStiffnessFront, ret.tireStiffnessRear = scale_tire_stiffness(ret.mass, ret.wheelbase, ret.centerToFront,
tire_stiffness_factor=tire_stiffness_factor)
# no rear steering, at least on the listed cars above
ret.steerRatioRear = 0.
ret.steerControlType = car.CarParams.SteerControlType.torque
# steer, gas, brake limitations VS speed
ret.steerMaxBP = [16. * CV.KPH_TO_MS, 45. * CV.KPH_TO_MS] # breakpoints at 1 and 40 kph
ret.steerMaxV = [1., 1.] # 2/3rd torque allowed above 45 kph
ret.brakeMaxBP = [0.]
ret.brakeMaxV = [1.]
ret.enableCamera = is_ecu_disconnected(fingerprint[0], FINGERPRINTS, ECU_FINGERPRINT, candidate, Ecu.fwdCamera) or has_relay
# Detect smartDSU, which intercepts ACC_CMD from the DSU allowing openpilot to send it
smartDsu = 0x2FF in fingerprint[0]
# In TSS2 cars the camera does long control
ret.enableDsu = is_ecu_disconnected(fingerprint[0], FINGERPRINTS, ECU_FINGERPRINT, candidate, Ecu.dsu) and candidate not in TSS2_CAR
ret.enableApgs = False # is_ecu_disconnected(fingerprint[0], FINGERPRINTS, ECU_FINGERPRINT, candidate, Ecu.apgs)
ret.enableGasInterceptor = 0x201 in fingerprint[0]
# if the smartDSU is detected, openpilot can send ACC_CMD (and the smartDSU will block it from the DSU) or not (the DSU is "connected")
ret.openpilotLongitudinalControl = ret.enableCamera and (smartDsu or ret.enableDsu or candidate in TSS2_CAR)
cloudlog.warning("ECU Camera Simulated: %r", ret.enableCamera)
cloudlog.warning("ECU DSU Simulated: %r", ret.enableDsu)
cloudlog.warning("ECU APGS Simulated: %r", ret.enableApgs)
cloudlog.warning("ECU Gas Interceptor: %r", ret.enableGasInterceptor)
# min speed to enable ACC. if car can do stop and go, then set enabling speed
# to a negative value, so it won't matter.
ret.minEnableSpeed = -1. if (stop_and_go or ret.enableGasInterceptor) else 19. * CV.MPH_TO_MS
# removing the DSU disables AEB and it's considered a community maintained feature
# intercepting the DSU is a community feature since it requires unofficial hardware
ret.communityFeature = ret.enableGasInterceptor or ret.enableDsu or smartDsu
ret.longitudinalTuning.deadzoneBP = [0., 9.]
ret.longitudinalTuning.deadzoneV = [0., .15]
ret.longitudinalTuning.kpBP = [0., 5., 35.]
ret.longitudinalTuning.kiBP = [0., 35.]
ret.stoppingControl = False
ret.startAccel = 0.0
if ret.enableGasInterceptor:
ret.gasMaxBP = [0., 9., 35]
ret.gasMaxV = [0.2, 0.5, 0.7]
ret.longitudinalTuning.kpV = [1.2, 0.8, 0.5]
ret.longitudinalTuning.kiV = [0.18, 0.12]
else:
ret.gasMaxBP = [0.]
ret.gasMaxV = [0.5]
ret.longitudinalTuning.kpV = [3.6, 2.4, 1.5]
ret.longitudinalTuning.kiV = [0.54, 0.36]
return ret
# returns a car.CarState
def update(self, c, can_strings):
# ******************* do can recv *******************
self.cp.update_strings(can_strings)
self.cp_cam.update_strings(can_strings)
self.CS.update(self.cp, self.cp_cam)
# create message
ret = car.CarState.new_message()
ret.canValid = self.cp.can_valid and self.cp_cam.can_valid
# speeds
ret.vEgo = self.CS.v_ego
ret.vEgoRaw = self.CS.v_ego_raw
ret.aEgo = self.CS.a_ego
ret.yawRate = self.VM.yaw_rate(self.CS.angle_steers * CV.DEG_TO_RAD, self.CS.v_ego)
ret.standstill = self.CS.standstill
ret.wheelSpeeds.fl = self.CS.v_wheel_fl
ret.wheelSpeeds.fr = self.CS.v_wheel_fr
ret.wheelSpeeds.rl = self.CS.v_wheel_rl
ret.wheelSpeeds.rr = self.CS.v_wheel_rr
# gear shifter
ret.gearShifter = self.CS.gear_shifter
# gas pedal
ret.gas = self.CS.pedal_gas
if self.CP.enableGasInterceptor:
# use interceptor values to disengage on pedal press
ret.gasPressed = self.CS.pedal_gas > 15
else:
ret.gasPressed = self.CS.pedal_gas > 0
# brake pedal
ret.brake = self.CS.user_brake
ret.brakePressed = self.CS.brake_pressed != 0
ret.brakeLights = self.CS.brake_lights
# steering wheel
ret.steeringAngle = self.CS.angle_steers
ret.steeringRate = self.CS.angle_steers_rate
ret.steeringTorque = self.CS.steer_torque_driver
ret.steeringTorqueEps = self.CS.steer_torque_motor
ret.steeringPressed = self.CS.steer_override
ret.steeringRateLimited = self.CC.steer_rate_limited if self.CC is not None else False
# cruise state
ret.cruiseState.enabled = self.CS.pcm_acc_active
# schrauger - set min cruise to 20mph instead of 28
#ret.cruiseState.speed = self.CS.v_cruise_pcm * CV.KPH_TO_MS
speed = self.CS.v_cruise_pcm * CV.KPH_TO_MS
if speed < (46 * CV.KPH_TO_MS): # ~28mph
speed = 32 * CV.KPH_TO_MS # ~20mph
ret.cruiseState.speed = speed
ret.cruiseState.available = bool(self.CS.main_on)
ret.cruiseState.speedOffset = 0.
if self.CP.carFingerprint in NO_STOP_TIMER_CAR or self.CP.enableGasInterceptor:
# ignore standstill in hybrid vehicles, since pcm allows to restart without
# receiving any special command
# also if interceptor is detected
ret.cruiseState.standstill = False
else:
ret.cruiseState.standstill = self.CS.pcm_acc_status == 7
buttonEvents = []
if self.CS.left_blinker_on != self.CS.prev_left_blinker_on:
be = car.CarState.ButtonEvent.new_message()
be.type = ButtonType.leftBlinker
be.pressed = self.CS.left_blinker_on != 0
buttonEvents.append(be)
if self.CS.right_blinker_on != self.CS.prev_right_blinker_on:
be = car.CarState.ButtonEvent.new_message()
be.type = ButtonType.rightBlinker
be.pressed = self.CS.right_blinker_on != 0
buttonEvents.append(be)
ret.buttonEvents = buttonEvents
ret.leftBlinker = bool(self.CS.left_blinker_on)
ret.rightBlinker = bool(self.CS.right_blinker_on)
ret.doorOpen = not self.CS.door_all_closed
ret.seatbeltUnlatched = not self.CS.seatbelt
ret.genericToggle = self.CS.generic_toggle
ret.stockAeb = self.CS.stock_aeb
# events
events = []
if self.cp_cam.can_invalid_cnt >= 200 and self.CP.enableCamera:
events.append(create_event('invalidGiraffeToyota', [ET.PERMANENT]))
if not ret.gearShifter == GearShifter.drive and self.CP.openpilotLongitudinalControl:
events.append(create_event('wrongGear', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if ret.doorOpen:
events.append(create_event('doorOpen', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if ret.seatbeltUnlatched:
events.append(create_event('seatbeltNotLatched', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if self.CS.esp_disabled and self.CP.openpilotLongitudinalControl:
events.append(create_event('espDisabled', [ET.NO_ENTRY, ET.SOFT_DISABLE]))
if not self.CS.main_on and self.CP.openpilotLongitudinalControl:
events.append(create_event('wrongCarMode', [ET.NO_ENTRY, ET.USER_DISABLE]))
if ret.gearShifter == GearShifter.reverse and self.CP.openpilotLongitudinalControl:
events.append(create_event('reverseGear', [ET.NO_ENTRY, ET.IMMEDIATE_DISABLE]))
if self.CS.steer_error:
events.append(create_event('steerTempUnavailable', [ET.NO_ENTRY, ET.WARNING]))
if self.CS.low_speed_lockout and self.CP.openpilotLongitudinalControl:
events.append(create_event('lowSpeedLockout', [ET.NO_ENTRY, ET.PERMANENT]))
if ret.vEgo < self.CP.minEnableSpeed and self.CP.openpilotLongitudinalControl:
events.append(create_event('speedTooLow', [ET.NO_ENTRY]))
if c.actuators.gas > 0.1:
# some margin on the actuator to not false trigger cancellation while stopping
events.append(create_event('speedTooLow', [ET.IMMEDIATE_DISABLE]))
if ret.vEgo < 0.001:
# while in standstill, send a user alert
events.append(create_event('manualRestart', [ET.WARNING]))
# enable request in prius is simple, as we activate when Toyota is active (rising edge)
if ret.cruiseState.enabled and not self.cruise_enabled_prev:
events.append(create_event('pcmEnable', [ET.ENABLE]))
elif not ret.cruiseState.enabled:
events.append(create_event('pcmDisable', [ET.USER_DISABLE]))
# disable on pedals rising edge or when brake is pressed and speed isn't zero
if (ret.gasPressed and not self.gas_pressed_prev) or \
(ret.brakePressed and (not self.brake_pressed_prev or ret.vEgo > 0.001)):
events.append(create_event('pedalPressed', [ET.NO_ENTRY, ET.USER_DISABLE]))
if ret.gasPressed:
events.append(create_event('pedalPressed', [ET.PRE_ENABLE]))
ret.events = events
self.gas_pressed_prev = ret.gasPressed
self.brake_pressed_prev = ret.brakePressed
self.cruise_enabled_prev = ret.cruiseState.enabled
return ret.as_reader()
# pass in a car.CarControl
# to be called @ 100hz
def apply(self, c):
can_sends = self.CC.update(c.enabled, self.CS, self.frame,
c.actuators, c.cruiseControl.cancel,
c.hudControl.visualAlert, c.hudControl.leftLaneVisible,
c.hudControl.rightLaneVisible, c.hudControl.leadVisible,
c.hudControl.leftLaneDepart, c.hudControl.rightLaneDepart)
self.frame += 1
return can_sends
|
the-stack_0_19320 | from setuptools import find_packages, setup
def get_version():
version = {}
with open("dagster_papertrail/version.py") as fp:
exec(fp.read(), version) # pylint: disable=W0122
return version["__version__"]
if __name__ == "__main__":
setup(
name="dagster-papertrail",
version=get_version(),
author="Elementl",
author_email="[email protected]",
license="Apache-2.0",
description="Package for papertrail Dagster framework components.",
url="https://github.com/dagster-io/dagster/tree/master/python_modules/libraries/dagster-papertrail",
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
packages=find_packages(exclude=["test"]),
install_requires=["dagster"],
zip_safe=False,
)
|
the-stack_0_19321 | # OBSS SAHI Tool
# Code written by Fatih C Akyon, 2020.
from typing import List
def get_bbox_from_shapely(shapely_object):
"""
Accepts shapely box/poly object and returns its bounding box in coco and voc formats
"""
minx, miny, maxx, maxy = shapely_object.bounds
width = maxx - minx
height = maxy - miny
coco_bbox = [minx, miny, width, height]
coco_bbox = [round(point) for point in coco_bbox] if coco_bbox else coco_bbox
voc_bbox = [minx, miny, maxx, maxy]
voc_bbox = [round(point) for point in voc_bbox] if voc_bbox else voc_bbox
return coco_bbox, voc_bbox
class ShapelyAnnotation:
"""
Creates ShapelyAnnotation (as shapely MultiPolygon).
Can convert this instance annotation to various formats.
"""
def __init__(self, slice_bbox=None):
self.slice_bbox = slice_bbox
@property
def area(self):
return int(self.__area)
def to_list(self):
"""
[
[(x1, y1), (x2, y2), (x3, y3), ...],
[(x1, y1), (x2, y2), (x3, y3), ...],
...
]
"""
list_of_list_of_points: List = []
for shapely_polygon in self.multipolygon.geoms:
# create list_of_points for selected shapely_polygon
if shapely_polygon.area != 0:
x_coords = shapely_polygon.exterior.coords.xy[0]
y_coords = shapely_polygon.exterior.coords.xy[1]
# fix coord by slice_bbox
if self.slice_bbox:
minx = self.slice_bbox[0]
miny = self.slice_bbox[1]
x_coords = [x_coord - minx for x_coord in x_coords]
y_coords = [y_coord - miny for y_coord in y_coords]
list_of_points = list(zip(x_coords, y_coords))
else:
list_of_points = []
# append list_of_points to list_of_list_of_points
list_of_list_of_points.append(list_of_points)
# return result
return list_of_list_of_points
def to_opencv_contours(self):
"""
[
[[[1, 1]], [[325, 125]], [[250, 200]], [[5, 200]]],
[[[1, 1]], [[325, 125]], [[250, 200]], [[5, 200]]]
]
"""
opencv_contours: List = []
for shapely_polygon in self.multipolygon.geoms:
# create opencv_contour for selected shapely_polygon
if shapely_polygon.area != 0:
x_coords = shapely_polygon.exterior.coords.xy[0]
y_coords = shapely_polygon.exterior.coords.xy[1]
# fix coord by slice_bbox
if self.slice_bbox:
minx = self.slice_bbox[0]
miny = self.slice_bbox[1]
x_coords = [x_coord - minx for x_coord in x_coords]
y_coords = [y_coord - miny for y_coord in y_coords]
opencv_contour = [[[int(x_coords[ind]), int(y_coords[ind])]] for ind in range(len(x_coords))]
else:
opencv_contour: List = []
# append opencv_contour to opencv_contours
opencv_contours.append(opencv_contour)
# return result
return opencv_contours
def to_coco_bbox(self):
"""
[xmin, ymin, width, height]
"""
if self.multipolygon.area != 0:
coco_bbox, _ = get_bbox_from_shapely(self.multipolygon)
# fix coord by slice box
if self.slice_bbox:
minx = round(self.slice_bbox[0])
miny = round(self.slice_bbox[1])
coco_bbox[0] = round(coco_bbox[0] - minx)
coco_bbox[1] = round(coco_bbox[1] - miny)
else:
coco_bbox: List = []
return coco_bbox
def to_voc_bbox(self):
"""
[xmin, ymin, xmax, ymax]
"""
if self.multipolygon.area != 0:
_, voc_bbox = get_bbox_from_shapely(self.multipolygon)
# fix coord by slice box
if self.slice_bbox:
minx = self.slice_bbox[0]
miny = self.slice_bbox[1]
voc_bbox[0] = round(voc_bbox[0] - minx)
voc_bbox[2] = round(voc_bbox[2] - minx)
voc_bbox[1] = round(voc_bbox[1] - miny)
voc_bbox[3] = round(voc_bbox[3] - miny)
else:
voc_bbox = []
return voc_bbox
|
the-stack_0_19322 | import json
import mover
import defs
class ScenarioReader:
def __init__(self, jsonFile):
with open(jsonFile) as f:
self.scenario = json.load(f)
self.domainSize = self.scenario["domainSize"] if "domainSize" in self.scenario else [100,100]
self.name = self.scenario["name"] if "name" in self.scenario else "Name"
self.movers = []
def getScenarioName(self):
return self.name
def getMoversForPlane(self):
for mov in self.scenario['objects']:
movementType = (defs.getMovementTypeFromString(mov["movementType"])) if "movementType" in mov else defs.MovementType.STATIC
startPoint = mov["startingPixel"]
paceLength = mov["paceLength"] if "paceLength" in mov else 0
radius = mov["radius"] if "radius" in mov else 10
movementDirection = (defs.getMovementDirectionFromString(mov["movementDirection"])) if "movementDirection" in mov else defs.MovementDirection.VERTICALUP
m = mover.Mover(movementType, startPoint, self.domainSize, pacelength=paceLength, radius=radius, movementDirection=movementDirection)
self.movers.append(m)
return self.movers
|
the-stack_0_19323 |
import json
import pathlib
import re
import argparse
from pprint import pformat
from align.cell_fabric import transformation
from intel_p1222p2.IntelP1222p2Canvas import IntelP1222p2Canvas
def check_results(ckt_name, skip_layers=None):
ckt_name_json = ckt_name
if not ckt_name.endswith('.json'):
ckt_name_json += '.json'
with open( ckt_name_json, "rt") as fp:
d = json.load(fp)
if skip_layers is None:
skip_layers = set( ["boundary", "diearea", "cellarea", "ndiff", "pdiff", "nwell", "poly", "gcn", "tcn", "polycon", "diffcon"])
else:
skip_layers = set( ["boundary", "diearea", "cellarea"])
layer_tbl = { "diffcon": "Diffcon",
"polycon": "Polycon",
"nwell": "Nwell",
"metal1": "M1",
"metal2": "M2",
"metal3": "M3",
"metal4": "M4",
"metal5": "M5",
"via0": "V0",
"via1": "V1",
"via2": "V2",
"via3": "V3",
"via4": "V4"}
p = re.compile( "^(.*)_gr$")
def s( r):
assert all( v%10 == 0 for v in r)
return [ v//10 for v in r]
terminals = []
for term in d['terminals']:
ly = term['layer']
if str(ly).lower() in skip_layers:
continue
nm = term['netName'] if 'netName' in term else term['net_name']
#
# !kor and !float signals might be need. Right now, just excluding them.
#
if nm in ['!kor', '!float']: continue
if nm is not None and p.match(nm): continue
term['layer'] = layer_tbl.get( ly, ly)
term['rect'] = s(term['rect'])
terminals.append( term)
d['terminals'] = terminals
cnv = IntelP1222p2Canvas()
cnv.bbox = transformation.Rect( *s(d['bbox']))
cnv.terminals = d['terminals']
data = cnv.gen_data(run_pex=True, run_drc=True)
with open( ckt_name + "_prim.json", "w") as fp:
cnv.writeJSON(fp)
tbl = cnv.pex.getSummaryCaps()
def diffs( n0, n1):
a, b = tbl[n0], tbl[n1]
s = (a+b)/2
return f"{n0},{n1}: {a:.2f}f, {b:.2f}f, {100*(a/s-1):.1f}%, {100*(b/s-1):.1f}%"
if ckt_name.startswith("comparator") or ckt_name.startswith("align"):
print( diffs( 'vin', 'vip'))
print( diffs( 'vin_d', 'vip_d'))
print( diffs( 'vin_o', 'vip_o'))
print( diffs( 'von', 'vop'))
if ckt_name.startswith('pushpull'):
print( diffs( 'v_n1', 'v_n2'))
print( diffs( 'v_p1', 'v_p2'))
print( diffs( 'vo_ls', 'vref_ls'))
print( diffs( 'vo_hs', 'vref_hs'))
if False:
assert len(cnv.rd.different_widths) == 0, pformat(cnv.rd.different_widths)
assert len(cnv.rd.shorts) == 0, pformat(cnv.rd.shorts)
assert len(cnv.rd.opens) == 0, pformat(cnv.rd.opens)
assert len(cnv.drc.errors) == 0, pformat(cnv.drc.errors)
return(data)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Check <circuit>.JSON against design rules")
parser.add_argument("--circuit", required=True, type=str, help="Circuit name")
args = parser.parse_args()
check_results(args.circuit)
|
the-stack_0_19324 | # Copyright (c) 2014 Thales Services SAS
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from neutron_lib import constants as p_const
from neutron_lib import context as neutron_ctx
from neutron_lib.db import api as db_api
from neutron_lib import exceptions
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from neutron_lib.plugins.ml2 import api
from neutron_lib.plugins import utils as p_utils
from neutron_lib.utils import helpers
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log
from sqlalchemy import and_
from sqlalchemy import sql
from neutron.db.models import network_segment_range as range_model
from neutron.objects import base as base_obj
LOG = log.getLogger(__name__)
IDPOOL_SELECT_SIZE = 100
class BaseTypeDriver(api.ML2TypeDriver):
"""BaseTypeDriver for functions common to Segment and flat."""
def __init__(self):
try:
self.physnet_mtus = helpers.parse_mappings(
cfg.CONF.ml2.physical_network_mtus, unique_values=False
)
except Exception as e:
LOG.error("Failed to parse physical_network_mtus: %s", e)
self.physnet_mtus = []
def get_mtu(self, physical_network=None):
return p_utils.get_deployment_physnet_mtu()
class SegmentTypeDriver(BaseTypeDriver):
"""SegmentTypeDriver for segment allocation.
Provide methods helping to perform segment allocation fully or partially
specified.
"""
def __init__(self, model):
super(SegmentTypeDriver, self).__init__()
if issubclass(model, base_obj.NeutronDbObject):
self.model = model.db_model
else:
self.model = model
self.primary_keys = set(dict(self.model.__table__.columns))
self.primary_keys.remove("allocated")
# TODO(ataraday): get rid of this method when old TypeDriver won't be used
def _get_session(self, arg):
if isinstance(arg, neutron_ctx.Context):
return arg.session, db_api.CONTEXT_WRITER.using(arg)
return arg, arg.session.begin(subtransactions=True)
def build_segment_query(self, session, **filters):
# Only uses filters that correspond to columns defined by this model.
# Subclasses may use/support additional filters
columns = set(dict(self.model.__table__.columns))
model_filters = dict((k, filters[k])
for k in columns & set(filters.keys()))
return [session.query(self.model).filter_by(allocated=False,
**model_filters)]
def build_segment_queries_for_tenant_and_shared_ranges(self, session,
**filters):
"""Enforces that segments are allocated from network segment ranges
that are owned by the tenant, and then from shared ranges, but never
from ranges owned by other tenants.
This method also enforces that other network segment range attributes
are used when constraining the set of possible segments to be used.
"""
network_type = self.get_type()
project_id = filters.pop('project_id', None)
columns = set(dict(self.model.__table__.columns))
model_filters = dict((k, filters[k])
for k in columns & set(filters.keys()))
query = (session.query(self.model)
.filter_by(allocated=False, **model_filters))
query = query.join(
range_model.NetworkSegmentRange,
and_(range_model.NetworkSegmentRange.network_type == network_type,
self.model.physical_network ==
range_model.NetworkSegmentRange.physical_network if
network_type == p_const.TYPE_VLAN else
sql.expression.true()))
query = query.filter(and_(self.model_segmentation_id >=
range_model.NetworkSegmentRange.minimum,
self.model_segmentation_id <=
range_model.NetworkSegmentRange.maximum))
query_project_id = (query.filter(
range_model.NetworkSegmentRange.project_id == project_id) if
project_id is not None else [])
query_shared = query.filter(
range_model.NetworkSegmentRange.shared == sql.expression.true())
return [query_project_id] + [query_shared]
def allocate_fully_specified_segment(self, context, **raw_segment):
"""Allocate segment fully specified by raw_segment.
If segment exists, then try to allocate it and return db object
If segment does not exists, then try to create it and return db object
If allocation/creation failed, then return None
"""
network_type = self.get_type()
session, ctx_manager = self._get_session(context)
try:
with ctx_manager:
alloc = (
session.query(self.model).filter_by(**raw_segment).
first())
if alloc:
if alloc.allocated:
# Segment already allocated
return
else:
# Segment not allocated
LOG.debug("%(type)s segment %(segment)s allocate "
"started ",
{"type": network_type,
"segment": raw_segment})
count = (session.query(self.model).
filter_by(allocated=False, **raw_segment).
update({"allocated": True}))
if count:
LOG.debug("%(type)s segment %(segment)s allocate "
"done ",
{"type": network_type,
"segment": raw_segment})
return alloc
# Segment allocated or deleted since select
LOG.debug("%(type)s segment %(segment)s allocate "
"failed: segment has been allocated or "
"deleted",
{"type": network_type,
"segment": raw_segment})
# Segment to create or already allocated
LOG.debug("%(type)s segment %(segment)s create started",
{"type": network_type, "segment": raw_segment})
alloc = self.model(allocated=True, **raw_segment)
alloc.save(session)
LOG.debug("%(type)s segment %(segment)s create done",
{"type": network_type, "segment": raw_segment})
except db_exc.DBDuplicateEntry:
# Segment already allocated (insert failure)
alloc = None
LOG.debug("%(type)s segment %(segment)s create failed",
{"type": network_type, "segment": raw_segment})
return alloc
def allocate_partially_specified_segment(self, context, **filters):
"""Allocate model segment from pool partially specified by filters.
Return allocated db object or None.
"""
network_type = self.get_type()
session, ctx_manager = self._get_session(context)
with ctx_manager:
queries = (self.build_segment_queries_for_tenant_and_shared_ranges(
session, **filters)
if directory.get_plugin(
plugin_constants.NETWORK_SEGMENT_RANGE) else
self.build_segment_query(session, **filters))
for select in queries:
# Selected segment can be allocated before update by someone
# else
allocs = select.limit(IDPOOL_SELECT_SIZE).all()
if not allocs:
# No resource available
continue
alloc = random.choice(allocs)
raw_segment = dict((k, alloc[k]) for k in self.primary_keys)
LOG.debug("%(type)s segment allocate from pool "
"started with %(segment)s ",
{"type": network_type,
"segment": raw_segment})
count = (session.query(self.model).
filter_by(allocated=False, **raw_segment).
update({"allocated": True}))
if count:
LOG.debug("%(type)s segment allocate from pool "
"success with %(segment)s ",
{"type": network_type,
"segment": raw_segment})
return alloc
# Segment allocated since select
LOG.debug("Allocate %(type)s segment from pool "
"failed with segment %(segment)s",
{"type": network_type,
"segment": raw_segment})
# saving real exception in case we exceeded amount of attempts
raise db_exc.RetryRequest(
exceptions.NoNetworkFoundInMaximumAllowedAttempts())
|
the-stack_0_19325 | import os.path as p
import sys
from .conda import conda
class PyMathJaxError(Exception):
pass
def mathjax_path(as_url: bool=False) -> str:
""" Returns MathJax.js file absolute path. """
if conda:
import os
pyexedir = p.dirname(p.abspath(sys.executable))
if os.name == 'nt':
mathjax_dir = p.join(pyexedir, 'Library', 'lib', 'mathjax')
else:
mathjax_dir = p.join(p.dirname(pyexedir), 'lib', 'mathjax')
else:
mathjax_dir = p.join(p.dirname(p.abspath(__file__)), 'mathjax')
mathjax = p.join(mathjax_dir, 'MathJax.js')
if not p.isfile(mathjax):
raise PyMathJaxError(f"'{mathjax}' wasn't found.")
if as_url:
import pathlib
return pathlib.Path(mathjax).as_uri()
else:
return mathjax
def cli():
"""
Usage: py-mathjax-path [OPTIONS]
Prints MathJax.js file absolute path.
Options:
--url, -u print MathJax.js file abs path as URL,
--help, -h Show this message and exit.
"""
url = False
if len(sys.argv) > 1:
if sys.argv[1] in ('--url', '-u'):
url = True
elif sys.argv[1] in ('--help', '-u'):
print(str(cli.__doc__).replace('\n ', '\n'))
return None
sys.stdout.write(mathjax_path(as_url=url))
if __name__ == '__main__':
cli()
|
the-stack_0_19326 | #appModules/wlmail.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2010 NVDA Contributors <http://www.nvda-project.org/>
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
import appModuleHandler
import controlTypes
import api
import winUser
from keyboardHandler import KeyboardInputGesture
from NVDAObjects.IAccessible.MSHTML import MSHTML
import msimn
class AboutBlankDocument(MSHTML):
"""A document called about:blank which hosts the HTML message composer document using viewlink.
Unfortunately, there doesn't seem to be any way to access the real (editable) viewlink document.
Therefore, we need to ignore this about:blank document so the user can access the editable document.
"""
# Make sure a buffer doesn't get created for this document.
# Otherwise, the viewLink document beneath it will be treated as part of this buffer and won't be accessible.
role = controlTypes.ROLE_UNKNOWN
def event_gainFocus(self):
# This document is useless to us, so don't bother to report it.
return
class AppModule(appModuleHandler.AppModule):
def chooseNVDAObjectOverlayClasses(self, obj, clsList):
if obj.windowClassName == "Internet Explorer_Server" and obj.role == controlTypes.ROLE_DOCUMENT and obj.HTMLNode and obj.HTMLNode.document.url=="about:blank":
clsList.insert(0, AboutBlankDocument)
elif obj.windowClassName=="SysListView32" and obj.windowControlID in (128,129,130) and obj.role==controlTypes.ROLE_LISTITEM:
clsList.insert(0,msimn.MessageRuleListItem)
elif obj.windowClassName=="SysListView32" and obj.role==controlTypes.ROLE_LISTITEM and obj.parent.name=="Outlook Express Message List":
clsList.insert(0,msimn.MessageListItem)
def event_gainFocus(self,obj,nextHandler):
nextHandler()
#Force focus to move to something sane when landing on a plain text message window
if obj.windowClassName=="ME_DocHost" and obj.windowControlID==1000 and obj.role==controlTypes.ROLE_PANE:
firstChild=obj.firstChild
if firstChild:
firstChild=obj.firstChild
if firstChild:
firstChild.setFocus()
return
if obj.windowClassName=="ATH_Note" and obj.event_objectID==winUser.OBJID_CLIENT and obj.IAccessibleChildID==0:
api.processPendingEvents()
if obj==api.getFocusObject() and controlTypes.STATE_FOCUSED in obj.states:
return KeyboardInputGesture.fromName("shift+tab").send()
|
the-stack_0_19328 | import logging
import re
import os
import time
from typing import Dict, List, cast
from assemblyline.common import isotime, forge
from assemblyline.common.constants import SUBMISSION_QUEUE, FILE_QUEUE, DISPATCH_TASK_HASH, \
DISPATCH_RUNNING_TASK_HASH, get_temporary_submission_data_name, get_tag_set_name, make_watcher_list_name
from assemblyline.common.forge import CachedObject, get_service_queue
from assemblyline.common.metrics import MetricsFactory
from assemblyline.datastore import Collection
from assemblyline.datastore.exceptions import MultiKeyError
from assemblyline.datastore.helper import AssemblylineDatastore
from assemblyline.odm.messages.dispatcher_heartbeat import Metrics
from assemblyline.odm.messages.dispatching import WatchQueueMessage
from assemblyline.odm.models.config import Config
from assemblyline.odm.models.error import Error
from assemblyline.odm.models.service import Service
from assemblyline.remote.datatypes import get_client
from assemblyline.remote.datatypes.hash import Hash, ExpiringHash
from assemblyline.remote.datatypes.queues.named import NamedQueue
from assemblyline.remote.datatypes.set import ExpiringSet
from assemblyline_core.dispatching.dispatch_hash import DispatchHash
from assemblyline import odm
from assemblyline.odm.messages.task import FileInfo, Task as ServiceTask
from assemblyline.odm.models.submission import Submission
from assemblyline_core.server_base import get_service_stage_hash, ServiceStage
from assemblyline_core.watcher.client import WatcherClient
# If you are doing development and you want the system to route jobs ignoring the service setup/teardown
# set an environment variable SKIP_SERVICE_SETUP to true for all dispatcher containers
SKIP_SERVICE_SETUP = os.environ.get('SKIP_SERVICE_SETUP', 'false').lower() in ['true', '1']
@odm.model()
class SubmissionTask(odm.Model):
"""Dispatcher internal model for submissions"""
submission: Submission = odm.Compound(Submission)
completed_queue = odm.Optional(odm.Keyword()) # Which queue to notify on completion
@odm.model()
class FileTask(odm.Model):
"""Dispatcher internal model for tracking each file in a submission."""
sid = odm.Keyword()
min_classification = odm.Classification() # Minimum classification of the file being scanned
parent_hash = odm.Optional(odm.Keyword())
file_info: FileInfo = odm.Compound(FileInfo)
depth = odm.Integer()
max_files = odm.Integer()
def get_tag_set_name(self) -> str:
"""Get the name of a redis set where the task tags are collected."""
return get_tag_set_name(self.sid, self.file_info.sha256)
def get_temporary_submission_data_name(self) -> str:
"""Get the name of a redis hash where tags for a submission are collected."""
return get_temporary_submission_data_name(self.sid, self.file_info.sha256)
class Scheduler:
"""This object encapsulates building the schedule for a given file type for a submission."""
def __init__(self, datastore: AssemblylineDatastore, config: Config, redis):
self.datastore = datastore
self.config = config
self.services = cast(Dict[str, Service], CachedObject(self._get_services))
self.service_stage = get_service_stage_hash(redis)
def build_schedule(self, submission: Submission, file_type: str) -> List[Dict[str, Service]]:
all_services = dict(self.services)
# Load the selected and excluded services by category
excluded = self.expand_categories(submission.params.services.excluded)
runtime_excluded = self.expand_categories(submission.params.services.runtime_excluded)
if not submission.params.services.selected:
selected = [s for s in all_services.keys()]
else:
selected = self.expand_categories(submission.params.services.selected)
# Add all selected, accepted, and not rejected services to the schedule
schedule: List[Dict[str, Service]] = [{} for _ in self.config.services.stages]
services = list(set(selected) - set(excluded) - set(runtime_excluded))
selected = []
skipped = []
for name in services:
service = all_services.get(name, None)
if not service:
skipped.append(name)
logging.warning(f"Service configuration not found: {name}")
continue
accepted = not service.accepts or re.match(service.accepts, file_type)
rejected = bool(service.rejects) and re.match(service.rejects, file_type)
if accepted and not rejected:
schedule[self.stage_index(service.stage)][name] = service
selected.append(name)
else:
skipped.append(name)
return schedule
def expand_categories(self, services: List[str]) -> List[str]:
"""Expands the names of service categories found in the list of services.
Args:
services (list): List of service category or service names.
"""
if services is None:
return []
services = list(services)
categories = self.categories()
found_services = []
seen_categories = set()
while services:
name = services.pop()
# If we found a new category mix in it's content
if name in categories:
if name not in seen_categories:
# Add all of the items in this group to the list of
# things that we need to evaluate, and mark this
# group as having been seen.
services.extend(categories[name])
seen_categories.update(name)
continue
# If it isn't a category, its a service
found_services.append(name)
# Use set to remove duplicates, set is more efficient in batches
return list(set(found_services))
def categories(self) -> Dict[str, List[str]]:
all_categories = {}
for service in self.services.values():
try:
all_categories[service.category].append(service.name)
except KeyError:
all_categories[service.category] = [service.name]
return all_categories
def stage_index(self, stage):
return self.config.services.stages.index(stage)
def _get_services(self):
stages = self.service_stage.items()
# noinspection PyUnresolvedReferences
return {x.name: x for x in self.datastore.list_all_services(full=True)
if x.enabled and (stages.get(x.name) == ServiceStage.Running or SKIP_SERVICE_SETUP)}
def depths_from_tree(file_tree: Dict[str, List[str]]) -> Dict[str, int]:
file_children: Dict[str, List[str]] = {}
depths: Dict[str, int] = {}
remaining_files = set()
for child, parents in file_tree.items():
remaining_files.add(child)
for parent in parents:
if parent:
remaining_files.add(parent)
file_children[parent] = file_children.get(parent, []) + [child]
else:
depths[child] = 0
next_round = dict(file_children)
change = True
while next_round and change:
file_children = next_round
next_round = dict()
change = False
for parent, children in file_children.items():
if parent in depths:
change = True
for child in children:
depths[child] = min(depths[parent] + 1, depths.get(child, float('inf')))
else:
next_round[parent] = children
return depths
class Dispatcher:
def __init__(self, datastore, redis, redis_persist, logger, counter_name='dispatcher'):
# Load the datastore collections that we are going to be using
self.datastore: AssemblylineDatastore = datastore
self.log: logging.Logger = logger
self.submissions: Collection = datastore.submission
self.results: Collection = datastore.result
self.errors: Collection = datastore.error
self.files: Collection = datastore.file
# Create a config cache that will refresh config values periodically
self.config: Config = forge.get_config()
# Connect to all of our persistent redis structures
self.redis = redis or get_client(
host=self.config.core.redis.nonpersistent.host,
port=self.config.core.redis.nonpersistent.port,
private=False,
)
self.redis_persist = redis_persist or get_client(
host=self.config.core.redis.persistent.host,
port=self.config.core.redis.persistent.port,
private=False,
)
# Build some utility classes
self.scheduler = Scheduler(datastore, self.config, self.redis)
self.classification_engine = forge.get_classification()
self.timeout_watcher = WatcherClient(self.redis_persist)
self.submission_queue = NamedQueue(SUBMISSION_QUEUE, self.redis)
self.file_queue = NamedQueue(FILE_QUEUE, self.redis)
self._nonper_other_queues = {}
self.active_submissions = ExpiringHash(DISPATCH_TASK_HASH, host=self.redis_persist)
self.running_tasks = ExpiringHash(DISPATCH_RUNNING_TASK_HASH, host=self.redis)
# Publish counters to the metrics sink.
self.counter = MetricsFactory(metrics_type='dispatcher', schema=Metrics, name=counter_name,
redis=self.redis, config=self.config)
def volatile_named_queue(self, name: str) -> NamedQueue:
if name not in self._nonper_other_queues:
self._nonper_other_queues[name] = NamedQueue(name, self.redis)
return self._nonper_other_queues[name]
def dispatch_submission(self, task: SubmissionTask):
"""
Find any files associated with a submission and dispatch them if they are
not marked as in progress. If all files are finished, finalize the submission.
This version of dispatch submission doesn't verify each result, but assumes that
the dispatch table has been kept up to date by other components.
Preconditions:
- File exists in the filestore and file collection in the datastore
- Submission is stored in the datastore
"""
submission = task.submission
sid = submission.sid
if not self.active_submissions.exists(sid):
self.log.info(f"[{sid}] New submission received")
self.active_submissions.add(sid, task.as_primitives())
else:
self.log.info(f"[{sid}] Received a pre-existing submission, check if it is complete")
# Refresh the watch, this ensures that this function will be called again
# if something goes wrong with one of the files, and it never gets invoked by dispatch_file.
self.timeout_watcher.touch(key=sid, timeout=int(self.config.core.dispatcher.timeout),
queue=SUBMISSION_QUEUE, message={'sid': sid})
# Refresh the quota hold
if submission.params.quota_item and submission.params.submitter:
self.log.info(f"[{sid}] Submission will count towards {submission.params.submitter.upper()} quota")
Hash('submissions-' + submission.params.submitter, self.redis_persist).add(sid, isotime.now_as_iso())
# Open up the file/service table for this submission
dispatch_table = DispatchHash(submission.sid, self.redis, fetch_results=True)
file_parents = dispatch_table.file_tree() # Load the file tree data as well
# All the submission files, and all the file_tree files, to be sure we don't miss any incomplete children
unchecked_hashes = [submission_file.sha256 for submission_file in submission.files]
unchecked_hashes = list(set(unchecked_hashes) | set(file_parents.keys()))
# Using the file tree we can recalculate the depth of any file
depth_limit = self.config.submission.max_extraction_depth
file_depth = depths_from_tree(file_parents)
# Try to find all files, and extracted files, and create task objects for them
# (we will need the file data anyway for checking the schedule later)
max_files = len(submission.files) + submission.params.max_extracted
unchecked_files = [] # Files that haven't been checked yet
try:
for sha, file_data in self.files.multiget(unchecked_hashes).items():
unchecked_files.append(FileTask(dict(
sid=sid,
min_classification=task.submission.classification,
file_info=dict(
magic=file_data.magic,
md5=file_data.md5,
mime=file_data.mime,
sha1=file_data.sha1,
sha256=file_data.sha256,
size=file_data.size,
type=file_data.type,
),
depth=file_depth.get(sha, 0),
max_files=max_files
)))
except MultiKeyError as missing:
errors = []
for file_sha in missing.keys:
error = Error(dict(
archive_ts=submission.archive_ts,
expiry_ts=submission.expiry_ts,
response=dict(
message="Submission couldn't be completed due to missing file.",
service_name="dispatcher",
service_tool_version='4',
service_version='4',
status="FAIL_NONRECOVERABLE",
),
sha256=file_sha,
type='UNKNOWN'
))
error_key = error.build_key(service_tool_version=sid)
self.datastore.error.save(error_key, error)
errors.append(error_key)
return self.cancel_submission(task, errors, file_parents)
# Files that have already been encountered, but may or may not have been processed yet
# encountered_files = {file.sha256 for file in submission.files}
pending_files = {} # Files that have not yet been processed
# Track information about the results as we hit them
file_scores: Dict[str, int] = {}
# # Load the current state of the dispatch table in one go rather than one at a time in the loop
prior_dispatches = dispatch_table.all_dispatches()
# found should be added to the unchecked files if they haven't been encountered already
for file_task in unchecked_files:
sha = file_task.file_info.sha256
schedule = self.build_schedule(dispatch_table, submission, sha, file_task.file_info.type)
while schedule:
stage = schedule.pop(0)
for service_name in stage:
# Only active services should be in this dict, so if a service that was placed in the
# schedule is now missing it has been disabled or taken offline.
service = self.scheduler.services.get(service_name)
if not service:
continue
# If the service is still marked as 'in progress'
runtime = time.time() - prior_dispatches.get(sha, {}).get(service_name, 0)
if runtime < service.timeout:
pending_files[sha] = file_task
continue
# It hasn't started, has timed out, or is finished, see if we have a result
result_row = dispatch_table.finished(sha, service_name)
# No result found, mark the file as incomplete
if not result_row:
pending_files[sha] = file_task
continue
if not submission.params.ignore_filtering and result_row.drop:
schedule.clear()
# The process table is marked that a service has been abandoned due to errors
if result_row.is_error:
continue
# Collect information about the result
file_scores[sha] = file_scores.get(sha, 0) + result_row.score
# Using the file tree find the most shallow parent of the given file
def lowest_parent(_sha):
# A root file won't have any parents in the dict
if _sha not in file_parents or None in file_parents[_sha]:
return None
return min((file_depth.get(parent, depth_limit), parent) for parent in file_parents[_sha])[1]
# Filter out things over the depth limit
pending_files = {sha: ft for sha, ft in pending_files.items() if ft.depth < depth_limit}
# Filter out files based on the extraction limits
pending_files = {sha: ft for sha, ft in pending_files.items()
if dispatch_table.add_file(sha, max_files, lowest_parent(sha))}
# If there are pending files, then at least one service, on at least one
# file isn't done yet, and hasn't been filtered by any of the previous few steps
# poke those files
if pending_files:
self.log.debug(f"[{sid}] Dispatching {len(pending_files)} files: {list(pending_files.keys())}")
for file_task in pending_files.values():
self.file_queue.push(file_task.as_primitives())
else:
self.log.debug(f"[{sid}] Finalizing submission.")
max_score = max(file_scores.values()) if file_scores else 0 # Submissions with no results have no score
self.finalize_submission(task, max_score, file_scores.keys())
def _cleanup_submission(self, task: SubmissionTask, file_list: List[str]):
"""Clean up code that is the same for canceled and finished submissions"""
submission = task.submission
sid = submission.sid
# Erase the temporary data which may have accumulated during processing
for file_hash in file_list:
hash_name = get_temporary_submission_data_name(sid, file_hash=file_hash)
ExpiringHash(hash_name, host=self.redis).delete()
if submission.params.quota_item and submission.params.submitter:
self.log.info(f"[{sid}] Submission no longer counts toward {submission.params.submitter.upper()} quota")
Hash('submissions-' + submission.params.submitter, self.redis_persist).pop(sid)
if task.completed_queue:
self.volatile_named_queue(task.completed_queue).push(submission.as_primitives())
# Send complete message to any watchers.
watcher_list = ExpiringSet(make_watcher_list_name(sid), host=self.redis)
for w in watcher_list.members():
NamedQueue(w).push(WatchQueueMessage({'status': 'STOP'}).as_primitives())
# Clear the timeout watcher
watcher_list.delete()
self.timeout_watcher.clear(sid)
self.active_submissions.pop(sid)
# Count the submission as 'complete' either way
self.counter.increment('submissions_completed')
def cancel_submission(self, task: SubmissionTask, errors, file_list):
"""The submission is being abandoned, delete everything, write failed state."""
submission = task.submission
sid = submission.sid
# Pull down the dispatch table and clear it from redis
dispatch_table = DispatchHash(submission.sid, self.redis)
dispatch_table.delete()
submission.classification = submission.params.classification
submission.error_count = len(errors)
submission.errors = errors
submission.state = 'failed'
submission.times.completed = isotime.now_as_iso()
self.submissions.save(sid, submission)
self._cleanup_submission(task, file_list)
self.log.error(f"[{sid}] Failed")
def finalize_submission(self, task: SubmissionTask, max_score, file_list):
"""All of the services for all of the files in this submission have finished or failed.
Update the records in the datastore, and flush the working data from redis.
"""
submission = task.submission
sid = submission.sid
# Pull down the dispatch table and clear it from redis
dispatch_table = DispatchHash(submission.sid, self.redis)
all_results = dispatch_table.all_results()
errors = dispatch_table.all_extra_errors()
dispatch_table.delete()
# Sort the errors out of the results
results = []
for row in all_results.values():
for status in row.values():
if status.is_error:
errors.append(status.key)
elif status.bucket == 'result':
results.append(status.key)
else:
self.log.warning(f"[{sid}] Unexpected service output bucket: {status.bucket}/{status.key}")
submission.classification = submission.params.classification
submission.error_count = len(errors)
submission.errors = errors
submission.file_count = len(file_list)
submission.results = results
submission.max_score = max_score
submission.state = 'completed'
submission.times.completed = isotime.now_as_iso()
self.submissions.save(sid, submission)
self._cleanup_submission(task, file_list)
self.log.info(f"[{sid}] Completed; files: {len(file_list)} results: {len(results)} "
f"errors: {len(errors)} score: {max_score}")
def dispatch_file(self, task: FileTask):
""" Handle a message describing a file to be processed.
This file may be:
- A new submission or extracted file.
- A file that has just completed a stage of processing.
- A file that has not completed a a stage of processing, but this
call has been triggered by a timeout or similar.
If the file is totally new, we will setup a dispatch table, and fill it in.
Once we make/load a dispatch table, we will dispatch whichever group the table
shows us hasn't been completed yet.
When we dispatch to a service, we check if the task is already in the dispatch
queue. If it isn't proceed normally. If it is, check that the service is still online.
"""
# Read the message content
file_hash = task.file_info.sha256
active_task = self.active_submissions.get(task.sid)
if active_task is None:
self.log.warning(f"[{task.sid}] Untracked submission is being processed")
return
submission_task = SubmissionTask(active_task)
submission = submission_task.submission
# Refresh the watch on the submission, we are still working on it
self.timeout_watcher.touch(key=task.sid, timeout=int(self.config.core.dispatcher.timeout),
queue=SUBMISSION_QUEUE, message={'sid': task.sid})
# Open up the file/service table for this submission
dispatch_table = DispatchHash(task.sid, self.redis, fetch_results=True)
# Load things that we will need to fill out the
file_tags = ExpiringSet(task.get_tag_set_name(), host=self.redis)
file_tags_data = file_tags.members()
temporary_submission_data = ExpiringHash(task.get_temporary_submission_data_name(), host=self.redis)
temporary_data = [dict(name=row[0], value=row[1]) for row in temporary_submission_data.items().items()]
# Calculate the schedule for the file
schedule = self.build_schedule(dispatch_table, submission, file_hash, task.file_info.type)
started_stages = []
# Go through each round of the schedule removing complete/failed services
# Break when we find a stage that still needs processing
outstanding = {}
score = 0
errors = 0
while schedule and not outstanding:
stage = schedule.pop(0)
started_stages.append(stage)
for service_name in stage:
service = self.scheduler.services.get(service_name)
if not service:
continue
# Load the results, if there are no results, then the service must be dispatched later
# Don't look at if it has been dispatched, as multiple dispatches are fine,
# but missing a dispatch isn't.
finished = dispatch_table.finished(file_hash, service_name)
if not finished:
outstanding[service_name] = service
continue
# If the service terminated in an error, count the error and continue
if finished.is_error:
errors += 1
continue
# if the service finished, count the score, and check if the file has been dropped
score += finished.score
if not submission.params.ignore_filtering and finished.drop:
schedule.clear()
if schedule: # If there are still stages in the schedule, over write them for next time
dispatch_table.schedules.set(file_hash, started_stages)
# Try to retry/dispatch any outstanding services
if outstanding:
self.log.info(f"[{task.sid}] File {file_hash} sent to services : {', '.join(list(outstanding.keys()))}")
for service_name, service in outstanding.items():
# Find the actual file name from the list of files in submission
filename = None
for file in submission.files:
if task.file_info.sha256 == file.sha256:
filename = file.name
break
# Build the actual service dispatch message
config = self.build_service_config(service, submission)
service_task = ServiceTask(dict(
sid=task.sid,
metadata=submission.metadata,
min_classification=task.min_classification,
service_name=service_name,
service_config=config,
fileinfo=task.file_info,
filename=filename or task.file_info.sha256,
depth=task.depth,
max_files=task.max_files,
ttl=submission.params.ttl,
ignore_cache=submission.params.ignore_cache,
ignore_dynamic_recursion_prevention=submission.params.ignore_dynamic_recursion_prevention,
tags=file_tags_data,
temporary_submission_data=temporary_data,
deep_scan=submission.params.deep_scan,
priority=submission.params.priority,
))
dispatch_table.dispatch(file_hash, service_name)
queue = get_service_queue(service_name, self.redis)
queue.push(service_task.priority, service_task.as_primitives())
else:
# There are no outstanding services, this file is done
# clean up the tags
file_tags.delete()
# If there are no outstanding ANYTHING for this submission,
# send a message to the submission dispatcher to finalize
self.counter.increment('files_completed')
if dispatch_table.all_finished():
self.log.info(f"[{task.sid}] Finished processing file '{file_hash}' starting submission finalization.")
self.submission_queue.push({'sid': submission.sid})
else:
self.log.info(f"[{task.sid}] Finished processing file '{file_hash}'. Other files are not finished.")
def build_schedule(self, dispatch_hash: DispatchHash, submission: Submission,
file_hash: str, file_type: str) -> List[List[str]]:
"""Rather than rebuilding the schedule every time we see a file, build it once and cache in redis."""
cached_schedule = dispatch_hash.schedules.get(file_hash)
if not cached_schedule:
# Get the schedule for that file type based on the submission parameters
obj_schedule = self.scheduler.build_schedule(submission, file_type)
# The schedule built by the scheduling tool has the service objects, we just want the names for now
cached_schedule = [list(stage.keys()) for stage in obj_schedule]
dispatch_hash.schedules.add(file_hash, cached_schedule)
return cached_schedule
@classmethod
def build_service_config(cls, service: Service, submission: Submission) -> Dict[str, str]:
"""Prepare the service config that will be used downstream.
v3 names: get_service_params get_config_data
"""
# Load the default service config
params = {x.name: x.default for x in service.submission_params}
# Over write it with values from the submission
if service.name in submission.params.service_spec:
params.update(submission.params.service_spec[service.name])
return params
|
the-stack_0_19331 | from argparse import ArgumentParser
from typing import Any
from django.core.management.base import CommandError
from confirmation.models import Confirmation, create_confirmation_link
from zerver.lib.email_validation import email_allowed_for_realm
from zerver.lib.management import ZulipBaseCommand
from zerver.models import DomainNotAllowedForRealmError, PreregistrationUser
class Command(ZulipBaseCommand):
help = "Generate activation links for users and print them to stdout."
def add_arguments(self, parser: ArgumentParser) -> None:
parser.add_argument('--force',
action="store_true",
default=False,
help='Override that the domain is restricted to external users.')
parser.add_argument('emails', metavar='<email>', type=str, nargs='*',
help='email of users to generate an activation link for')
self.add_realm_args(parser, True)
def handle(self, *args: Any, **options: Any) -> None:
duplicates = False
realm = self.get_realm(options)
assert realm is not None # Should be ensured by parser
if not options['emails']:
self.print_help("./manage.py", "generate_invite_links")
raise CommandError
for email in options['emails']:
try:
self.get_user(email, realm)
print(email + ": There is already a user registered with that address.")
duplicates = True
continue
except CommandError:
pass
if duplicates:
return
for email in options['emails']:
try:
email_allowed_for_realm(email, realm)
except DomainNotAllowedForRealmError:
if not options["force"]:
raise CommandError("You've asked to add an external user '{}' to a "
"closed realm '{}'.\nAre you sure? To do this, "
"pass --force.".format(email, realm.string_id))
prereg_user = PreregistrationUser(email=email, realm=realm)
prereg_user.save()
print(email + ": " + create_confirmation_link(prereg_user,
Confirmation.INVITATION))
|
the-stack_0_19332 | """ Module to access the Root endpoints """
# pylint: disable=too-many-lines,too-many-locals,too-many-public-methods,too-few-public-methods
from ...models import PushNotification
from ..base import ApiBaseClass
class RootApi(ApiBaseClass):
""" """
async def acknowledge_notification(
self,
) -> PushNotification:
"""Acknowledge receiving of a notification
Permissions:
Must be logged in.
Minimum Server Version:
3.10
Api Reference:
`AcknowledgeNotification <https://api.mattermost.com/#operation/AcknowledgeNotification>`_
"""
url = "/notifications/ack"
request_kwargs = {
"url": url,
}
# pylint: disable-next=protected-access
async with self.client._get_httpx_client() as httpx_client:
response = await httpx_client.post(
**request_kwargs,
)
if self.skip_response_parsing:
return response
if response.status_code == 200:
response200 = PushNotification.parse_obj(response.json())
return response200
return response
|
the-stack_0_19333 | K = int(input())
NumsStr = input().split()
Nums = list(map(int, NumsStr)) # map
# algorithm 4. O(N)
MaxSum = 0
ThisSum = 0
for i in range(K):
ThisSum += Nums[i]
if ThisSum > MaxSum:
MaxSum = ThisSum
elif ThisSum < 0:
ThisSum = 0
print(MaxSum)
# algorithm 3. O(NlogN)
def MaxBorderSum(List, Start, End, Step):
MaxSum = 0
ThisSum = 0
for i in range(Start, End+Step, Step):
ThisSum += List[i]
if ThisSum > MaxSum:
MaxSum = ThisSum
return MaxSum
def DivideAndConquer(List, Left, Right):
if Left == Right: # end of recursion
return max(List[Left], 0)
Center = (Left + Right) // 2 # floored quotient
MaxLeftSum = DivideAndConquer(List, Left, Center)
MaxRightSum = DivideAndConquer(List, Center+1, Right)
MaxLeftBorderSum = MaxBorderSum(List, Center, Left, -1)
MaxRightBorderSum = MaxBorderSum(List, Center+1, Right, 1)
return max(MaxLeftSum, MaxRightSum, MaxLeftBorderSum + MaxRightBorderSum)
MaxSum = DivideAndConquer(Nums, 0, K-1)
print(MaxSum)
# algorithm 2. O(N**2)
MaxSum = 0
for i in range(K):
ThisSum = 0
for j in range(K)[i:]:
ThisSum += Nums[j]
if ThisSum > MaxSum:
MaxSum = ThisSum
print(MaxSum)
# algorithm 1. O(N**3)
MaxSum = 0
for i in range(K):
for j in range(K)[i:]:
ThisSum = 0
for k in range(i, j+1):
ThisSum += Nums[k]
if ThisSum > MaxSum:
MaxSum = ThisSum
print(MaxSum)
|
the-stack_0_19334 | class ReviewRouter(object):
"""
Sends all review-related operations to a database with the alias of
"reviews" or "reviews-s". No other apps should use this db alias.
"""
def db_for_read(self, model, **hints):
if model._meta.app_label == "reviews":
return "reviews-s"
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == "reviews":
return "reviews"
return None
def allow_syncdb(self, db, model):
this_app = (model._meta.app_label == "reviews")
reviews_db = (db in ("reviews", "reviews-s"))
if this_app:
return reviews_db
if reviews_db:
return False
return None
|
the-stack_0_19335 | from builtins import map
from copy import deepcopy
from snips_nlu.pipeline.configs import ProcessingUnitConfig
from snips_nlu.pipeline.processing_unit import get_processing_unit_config
from snips_nlu.resources import merge_required_resources
from snips_nlu.utils import classproperty
class NLUEngineConfig(ProcessingUnitConfig):
"""Configuration of a :class:`.SnipsNLUEngine` object
Args:
intent_parsers_configs (list): List of intent parser configs
(:class:`.ProcessingUnitConfig`). The order in the list determines
the order in which each parser will be called by the nlu engine.
"""
# pylint: disable=super-init-not-called
def __init__(self, intent_parsers_configs=None):
if intent_parsers_configs is None:
from snips_nlu.pipeline.configs import (
ProbabilisticIntentParserConfig,
DeterministicIntentParserConfig)
intent_parsers_configs = [
DeterministicIntentParserConfig(),
ProbabilisticIntentParserConfig()
]
self.intent_parsers_configs = list(map(get_processing_unit_config,
intent_parsers_configs))
# pylint: enable=super-init-not-called
@classproperty
def unit_name(cls): # pylint:disable=no-self-argument
from snips_nlu.nlu_engine.nlu_engine import SnipsNLUEngine
return SnipsNLUEngine.unit_name
def get_required_resources(self):
resources = dict()
for config in self.intent_parsers_configs:
resources = merge_required_resources(
resources, config.get_required_resources())
return resources
def to_dict(self):
return {
"unit_name": self.unit_name,
"intent_parsers_configs": [
config.to_dict() for config in self.intent_parsers_configs
]
}
@classmethod
def from_dict(cls, obj_dict):
d = obj_dict
if "unit_name" in obj_dict:
d = deepcopy(obj_dict)
d.pop("unit_name")
return cls(**d)
|
the-stack_0_19338 | from __future__ import division
from __future__ import unicode_literals
import frappe
def get_context(context):
user = frappe.session.user
result = frappe.get_list("Exam Result", fields=["user", "student", "exam_id", "exam_start_date", "exam_end_date", "name",
"attended", "correct_answers", "wrong_answers", "total_marks", "secured__marks"],
filters={"user": user}, limit_page_length= 500, )
context.result = result
context.user = user
|
the-stack_0_19341 | from logging import Logger
from typing import Callable, Optional, Awaitable
from slack_sdk.errors import SlackApiError
from slack_bolt.logger import get_bolt_logger
from slack_bolt.request.async_request import AsyncBoltRequest
from slack_bolt.response import BoltResponse
from .async_authorization import AsyncAuthorization
from .async_internals import _build_error_response, _is_no_auth_required
from .internals import _is_no_auth_test_call_required
from ...authorization import AuthorizeResult
from ...authorization.async_authorize import AsyncAuthorize
class AsyncMultiTeamsAuthorization(AsyncAuthorization):
authorize: AsyncAuthorize
def __init__(self, authorize: AsyncAuthorize, base_logger: Optional[Logger] = None):
"""Multi-workspace authorization.
Args:
authorize: The function to authorize incoming requests from Slack.
base_logger: The base logger
"""
self.authorize = authorize
self.logger = get_bolt_logger(
AsyncMultiTeamsAuthorization, base_logger=base_logger
)
async def async_process(
self,
*,
req: AsyncBoltRequest,
resp: BoltResponse,
# As this method is not supposed to be invoked by bolt-python users,
# the naming conflict with the built-in one affects
# only the internals of this method
next: Callable[[], Awaitable[BoltResponse]],
) -> BoltResponse:
if _is_no_auth_required(req):
return await next()
if _is_no_auth_test_call_required(req):
req.context.set_authorize_result(
AuthorizeResult(
enterprise_id=req.context.enterprise_id,
team_id=req.context.team_id,
user_id=req.context.user_id,
)
)
return await next()
try:
auth_result: Optional[AuthorizeResult] = await self.authorize(
context=req.context,
enterprise_id=req.context.enterprise_id,
team_id=req.context.team_id,
user_id=req.context.user_id,
)
if auth_result:
req.context.set_authorize_result(auth_result)
token = auth_result.bot_token or auth_result.user_token
req.context["token"] = token
# As AsyncApp#_init_context() generates a new AsyncWebClient for this request,
# it's safe to modify this instance.
req.context.client.token = token
return await next()
else:
# This situation can arise if:
# * A developer installed the app from the "Install to Workspace" button in Slack app config page
# * The InstallationStore failed to save or deleted the installation for this workspace
self.logger.error(
"Although the app should be installed into this workspace, "
"the AuthorizeResult (returned value from authorize) for it was not found."
)
return _build_error_response()
except SlackApiError as e:
self.logger.error(f"Failed to authorize with the given token ({e})")
return _build_error_response()
|
the-stack_0_19343 | #!/usr/bin/env python3
import sys
def main(args):
nub = [s.strip() for s in sys.stdin][0]
nub2 = nub + nub[0]
n = 0
for i in range(len(nub)):
if nub2[i] == nub2[i+1]:
n += int(nub2[i])
print(n)
n = 0
l = len(nub)
h = l//2
for i in range(l):
if nub[i] == nub[(i+h) % l]:
n += int(nub[i])
print(n)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
the-stack_0_19345 | from typing import Optional
from pydantic import BaseModel
from xpresso import App, FromQuery, Path
class Filter(BaseModel):
prefix: str
limit: int
skip: int = 0
async def read_items(filter: FromQuery[Optional[Filter]]) -> Optional[Filter]:
return filter
app = App(
routes=[
Path(
path="/items/",
get=read_items,
),
]
)
|
the-stack_0_19346 | # Some code was borrowed from https://github.com/petewarden/tensorflow_makefile/blob/master/tensorflow/models/image/mnist/convolutional.py
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import numpy
from scipy import ndimage
from six.moves import urllib
import tensorflow as tf
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
DATA_DIRECTORY = "data"
# Params for MNIST
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
VALIDATION_SIZE = 5000 # Size of the validation set.
# Download MNIST data
def maybe_download(filename):
"""Download the data from Yann's website, unless it's already here."""
if not tf.gfile.Exists(DATA_DIRECTORY):
tf.gfile.MakeDirs(DATA_DIRECTORY)
filepath = os.path.join(DATA_DIRECTORY, filename)
if not tf.gfile.Exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
# Extract the images
def extract_data(filename, num_images, norm_shift=False, norm_scale=True):
"""Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)
data = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.float32)
if norm_shift:
data = data - (PIXEL_DEPTH / 2.0)
if norm_scale:
data = data / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)
data = numpy.reshape(data, [num_images, -1])
return data
# Extract the labels
def extract_labels(filename, num_images):
"""Extract the labels into a vector of int64 label IDs."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)
num_labels_data = len(labels)
one_hot_encoding = numpy.zeros((num_labels_data,NUM_LABELS))
one_hot_encoding[numpy.arange(num_labels_data),labels] = 1
one_hot_encoding = numpy.reshape(one_hot_encoding, [-1, NUM_LABELS])
return one_hot_encoding
# Augment training data
def expend_training_data(images, labels):
expanded_images = []
expanded_labels = []
j = 0 # counter
for x, y in zip(images, labels):
j = j+1
if j%100==0:
print ('expanding data : %03d / %03d' % (j,numpy.size(images,0)))
# register original data
expanded_images.append(x)
expanded_labels.append(y)
# get a value for the background
# zero is the expected value, but median() is used to estimate background's value
bg_value = numpy.median(x) # this is regarded as background's value
image = numpy.reshape(x, (-1, 28))
for i in range(4):
# rotate the image with random degree
angle = numpy.random.randint(-15,15,1)
new_img = ndimage.rotate(image,angle,reshape=False, cval=bg_value)
# shift the image with random distance
shift = numpy.random.randint(-2, 2, 2)
new_img_ = ndimage.shift(new_img,shift, cval=bg_value)
# register new training data
expanded_images.append(numpy.reshape(new_img_, 784))
expanded_labels.append(y)
# images and labels are concatenated for random-shuffle at each epoch
# notice that pair of image and label should not be broken
expanded_train_total_data = numpy.concatenate((expanded_images, expanded_labels), axis=1)
numpy.random.shuffle(expanded_train_total_data)
return expanded_train_total_data
# Prepare MNISt data
def prepare_MNIST_data(use_norm_shift=False, use_norm_scale=True, use_data_augmentation=False, reuse=False, percentage=0.1):
# Get the data.
train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')
# Extract it into numpy arrays.
train_data = extract_data(train_data_filename, 60000, use_norm_shift, use_norm_scale)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000, use_norm_shift, use_norm_scale)
test_labels = extract_labels(test_labels_filename, 10000)
if reuse:
train_inds = numpy.loadtxt('train_inds.csv').astype(int)
percentage = numpy.loadtxt('percentage.csv').astype(float)
print(percentage)
else:
# shuffle data randomly
train_inds = numpy.arange(len(train_data))
numpy.random.shuffle(train_inds)
numpy.savetxt('train_inds.csv', train_inds)
numpy.savetxt('percentage.csv', [percentage])
# shuffle according to train_inds
train_data = train_data[train_inds]
train_labels = train_labels[train_inds]
# Generate a validation set.
VALIDATION_SIZE = int((1-percentage)*60000) # Size of the validation set.
validation_data = train_data[:VALIDATION_SIZE, :]
validation_labels = train_labels[:VALIDATION_SIZE,:]
train_data = train_data[VALIDATION_SIZE:, :]
train_labels = train_labels[VALIDATION_SIZE:,:]
# Concatenate train_data & train_labels for random shuffle
if use_data_augmentation:
train_total_data = expend_training_data(train_data, train_labels)
else:
train_total_data = numpy.concatenate((train_data, train_labels), axis=1)
# Concatenate train_data & train_labels for random shuffle
if use_data_augmentation:
validation_total_data = expend_training_data(validation_data, validation_labels)
else:
validation_total_data = numpy.concatenate((validation_data, validation_labels), axis=1)
# Concatenate train_data & train_labels for random shuffle
if use_data_augmentation:
test_total_data = expend_training_data(test_data, test_labels)
else:
test_total_data = numpy.concatenate((test_data, test_labels), axis=1)
train_size = train_total_data.shape[0]
validation_size = validation_total_data.shape[0]
test_size = test_total_data.shape[0]
print("#######")
print(train_size)
print(validation_size)
print(test_size)
print("#######")
return train_total_data, train_size, validation_total_data, validation_size, test_total_data, test_size, test_data, test_labels |
the-stack_0_19347 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2013-2018 Danilo Bargen
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import print_function, division, absolute_import, unicode_literals
from collections import namedtuple
from time import sleep
import RPi.GPIO as GPIO
from . import common as c
from .lcd import BaseCharLCD
from .compat import range
import sys
if sys.version_info.major < 3:
from time import clock as now
else:
from time import perf_counter as now
PinConfig = namedtuple('PinConfig', 'rs rw e d0 d1 d2 d3 d4 d5 d6 d7 backlight mode')
class CharLCD(BaseCharLCD):
def __init__(self, numbering_mode=None, pin_rs=None, pin_rw=None, pin_e=None, pins_data=None,
pin_backlight=None, backlight_mode='active_low',
backlight_enabled=True,
cols=20, rows=4, dotsize=8,
charmap='A02',
auto_linebreaks=True,
compat_mode=False,
compat_mode_wait_time=0.001,
cache_text=True):
"""
Character LCD controller.
The default pin numbers are based on the BOARD numbering scheme (1-26).
You can save 1 pin by not using RW. Set ``pin_rw`` to ``None`` if you
want this.
:param pin_rs: Pin for register select (RS). Default: ``15``.
:type pin_rs: int
:param pin_rw: Pin for selecting read or write mode (R/W). Set this to
``None`` for read only mode. Default: ``18``.
:type pin_rw: int
:param pin_e: Pin to start data read or write (E). Default: ``16``.
:type pin_e: int
:param pins_data: List of data bus pins in 8 bit mode (DB0-DB7) or in 4
bit mode (DB4-DB7) in ascending order. Default: ``[21, 22, 23, 24]``.
:type pins_data: list of int
:param pin_backlight: Pin for controlling backlight on/off. Set this to
``None`` for no backlight control. Default: ``None``.
:type pin_backlight: int
:param backlight_mode: Set this to either ``active_high`` or ``active_low``
to configure the operating control for the backlight. Has no effect if
pin_backlight is ``None``
:type backlight_mode: str
:param backlight_enabled: Whether the backlight is enabled initially.
Default: ``True``. Has no effect if pin_backlight is ``None``
:type backlight_enabled: bool
:param numbering_mode: Which scheme to use for numbering of the GPIO pins,
either ``GPIO.BOARD`` or ``GPIO.BCM``. Default: ``GPIO.BOARD`` (1-26).
:type numbering_mode: int
:param rows: Number of display rows (usually 1, 2 or 4). Default: ``4``.
:type rows: int
:param cols: Number of columns per row (usually 16 or 20). Default ``20``.
:type cols: int
:param dotsize: Some 1 line displays allow a font height of 10px.
Allowed: ``8`` or ``10``. Default: ``8``.
:type dotsize: int
:param charmap: The character map used. Depends on your LCD. This must
be either ``A00`` or ``A02`` or ``ST0B``. Default: ``A02``.
:type charmap: str
:param auto_linebreaks: Whether or not to automatically insert line
breaks. Default: ``True``.
:type auto_linebreaks: bool
:param compat_mode: Whether to run additional checks to support older LCDs
that may not run at the reference clock (or keep up with it).
:type compat_mode: bool
:param compat_mode_wait_time: Minimum time to pass between sends.
if zero, turns off compat_mode Default: ``0.001`` seconds.
:type compat_mode_wait_time: float
:param : Minimum time to pass between sends.
if zero, turns off compat_mode Default: ``0.001`` seconds.
:type compat_mode_wait_time: float
"""
# Configure compatibility mode
self.compat_mode = compat_mode and compat_mode_wait_time > 0
self.compat_mode_wait_time = compat_mode_wait_time
if compat_mode:
self.last_send_event = now()
# Set attributes
if numbering_mode == GPIO.BCM or numbering_mode == GPIO.BOARD:
self.numbering_mode = numbering_mode
else:
raise ValueError('Invalid GPIO numbering mode: numbering_mode=%s, '
'must be either GPIO.BOARD or GPIO.BCM.\n'
'See https://gist.github.com/dbrgn/77d984a822bfc9fddc844f67016d0f7e '
'for more details.' % numbering_mode)
if pin_rs is None:
raise ValueError('pin_rs is not defined.')
if pin_e is None:
raise ValueError('pin_e is not defined.')
if len(pins_data) == 4: # 4 bit mode
self.data_bus_mode = c.LCD_4BITMODE
block1 = [None] * 4
elif len(pins_data) == 8: # 8 bit mode
self.data_bus_mode = c.LCD_8BITMODE
block1 = pins_data[:4]
else:
raise ValueError('There should be exactly 4 or 8 data pins.')
block2 = pins_data[-4:]
self.pins = PinConfig(rs=pin_rs, rw=pin_rw, e=pin_e,
d0=block1[0], d1=block1[1], d2=block1[2], d3=block1[3],
d4=block2[0], d5=block2[1], d6=block2[2], d7=block2[3],
backlight=pin_backlight,
mode=numbering_mode)
self.backlight_mode = backlight_mode
# Call superclass
super(CharLCD, self).__init__(cols, rows, dotsize,
charmap=charmap,
auto_linebreaks=auto_linebreaks,
cache_text=cache_text)
# Set backlight status
if pin_backlight is not None:
self.backlight_enabled = backlight_enabled
def _init_connection(self):
# Setup GPIO
GPIO.setmode(self.numbering_mode)
for pin in list(filter(None, self.pins))[:-1]:
GPIO.setup(pin, GPIO.OUT)
if self.pins.backlight is not None:
GPIO.setup(self.pins.backlight, GPIO.OUT)
# Initialization
c.msleep(50)
GPIO.output(self.pins.rs, 0)
GPIO.output(self.pins.e, 0)
if self.pins.rw is not None:
GPIO.output(self.pins.rw, 0)
def _close_connection(self):
pins = (self.pins.rs, self.pins.rw, self.pins.e, self.pins.d0, self.pins.d1,
self.pins.d2, self.pins.d3, self.pins.d4, self.pins.d5, self.pins.d6,
self.pins.d7)
active_pins = [pin for pin in pins if pin is not None]
GPIO.cleanup(active_pins)
# Properties
def _get_backlight_enabled(self):
# We could probably read the current GPIO output state via sysfs, but
# for now let's just store the state in the class
if self.pins.backlight is None:
raise ValueError('You did not configure a GPIO pin for backlight control!')
return bool(self._backlight_enabled)
def _set_backlight_enabled(self, value):
if self.pins.backlight is None:
raise ValueError('You did not configure a GPIO pin for backlight control!')
if not isinstance(value, bool):
raise ValueError('backlight_enabled must be set to ``True`` or ``False``.')
self._backlight_enabled = value
GPIO.output(self.pins.backlight,
value ^ (self.backlight_mode == 'active_low'))
backlight_enabled = property(_get_backlight_enabled, _set_backlight_enabled,
doc='Whether or not to turn on the backlight.')
# Low level commands
def _send(self, value, mode):
"""Send the specified value to the display with automatic 4bit / 8bit
selection. The rs_mode is either ``RS_DATA`` or ``RS_INSTRUCTION``."""
# Wait, if compatibility mode is enabled
if self.compat_mode:
self._wait()
# Choose instruction or data mode
GPIO.output(self.pins.rs, mode)
# If the RW pin is used, set it to low in order to write.
if self.pins.rw is not None:
GPIO.output(self.pins.rw, 0)
# Write data out in chunks of 4 or 8 bit
if self.data_bus_mode == c.LCD_8BITMODE:
self._write8bits(value)
else:
self._write4bits(value >> 4)
self._write4bits(value)
# Record the time for the tail-end of the last send event
if self.compat_mode:
self.last_send_event = now()
def _send_data(self, value):
"""Send data to the display. """
self._send(value, c.RS_DATA)
def _send_instruction(self, value):
"""Send instruction to the display. """
self._send(value, c.RS_INSTRUCTION)
def _write4bits(self, value):
"""Write 4 bits of data into the data bus."""
for i in range(4):
bit = (value >> i) & 0x01
GPIO.output(self.pins[i + 7], bit)
self._pulse_enable()
def _write8bits(self, value):
"""Write 8 bits of data into the data bus."""
for i in range(8):
bit = (value >> i) & 0x01
GPIO.output(self.pins[i + 3], bit)
self._pulse_enable()
def _pulse_enable(self):
"""Pulse the `enable` flag to process data."""
GPIO.output(self.pins.e, 0)
c.usleep(1)
GPIO.output(self.pins.e, 1)
c.usleep(1)
GPIO.output(self.pins.e, 0)
c.usleep(100) # commands need > 37us to settle
def _wait(self):
"""Rate limit the number of send events."""
end = self.last_send_event + self.compat_mode_wait_time
sleep_duration = end - now()
if sleep_duration > 0:
sleep(sleep_duration)
|
the-stack_0_19348 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Code are based on
# https://github.com/fmassa/vision/blob/voc_dataset/torchvision/datasets/voc.py
# Copyright (c) Francisco Massa.
# Copyright (c) Ellis Brown, Max deGroot.
# Copyright (c) Megvii, Inc. and its affiliates.
from loguru import logger
import cv2
import numpy as np
from yolox.evaluators.voc_eval import voc_eval
import os
import os.path
import pickle
import xml.etree.ElementTree as ET
from .datasets_wrapper import Dataset
from .voc_classes import VOC_CLASSES
class AnnotationTransform(object):
"""Transforms a VOC annotation into a Tensor of bbox coords and label index
Initilized with a dictionary lookup of classnames to indexes
Arguments:
class_to_ind (dict, optional): dictionary lookup of classnames -> indexes
(default: alphabetic indexing of VOC's 20 classes)
keep_difficult (bool, optional): keep difficult instances or not
(default: False)
height (int): height
width (int): width
"""
def __init__(self, class_to_ind=None, keep_difficult=True):
self.class_to_ind = class_to_ind or dict(
zip(VOC_CLASSES, range(len(VOC_CLASSES)))
)
self.keep_difficult = keep_difficult
def __call__(self, target):
"""
Arguments:
target (annotation) : the target annotation to be made usable
will be an ET.Element
Returns:
a list containing lists of bounding boxes [bbox coords, class name]
"""
res = np.empty((0, 5))
for obj in target.iter("object"):
difficult = obj.find("difficult")
if difficult is not None:
difficult = int(difficult.text) == 1
else:
difficult = False
if not self.keep_difficult and difficult:
continue
name = obj.find("name").text.strip()
bbox = obj.find("bndbox")
pts = ["xmin", "ymin", "xmax", "ymax"]
bndbox = []
for i, pt in enumerate(pts):
cur_pt = int(bbox.find(pt).text) - 1
# scale height or width
# cur_pt = cur_pt / width if i % 2 == 0 else cur_pt / height
bndbox.append(cur_pt)
label_idx = self.class_to_ind[name]
bndbox.append(label_idx)
res = np.vstack((res, bndbox)) # [xmin, ymin, xmax, ymax, label_ind]
# img_id = target.find('filename').text[:-4]
width = int(target.find("size").find("width").text)
height = int(target.find("size").find("height").text)
img_info = (height, width)
return res, img_info
class VOCDetection(Dataset):
"""
VOC Detection Dataset Object
input is image, target is annotation
Args:
root (string): filepath to VOCdevkit folder.
image_set (string): imageset to use (eg. 'train', 'val', 'test')
transform (callable, optional): transformation to perform on the
input image
target_transform (callable, optional): transformation to perform on the
target `annotation`
(eg: take in caption string, return tensor of word indices)
dataset_name (string, optional): which dataset to load
(default: 'VOC2007')
"""
def __init__(
self,
data_dir,
image_sets=[("2007", "trainval"), ("2012", "trainval")],
img_size=(416, 416),
preproc=None,
target_transform=AnnotationTransform(),
dataset_name="VOC0712",
cache=False,
):
super().__init__(img_size)
self.root = data_dir
self.image_set = image_sets
self.img_size = img_size
self.preproc = preproc
self.target_transform = target_transform
self.name = dataset_name
self._annopath = os.path.join("%s", "Annotations", "%s.xml")
self._imgpath = os.path.join("%s", "JPEGImages", "%s.jpg")
self._classes = VOC_CLASSES
self.ids = list()
for (year, name) in image_sets:
self._year = year
rootpath = os.path.join(self.root, "VOC" + year)
for line in open(
os.path.join(rootpath, "ImageSets", "Main", name + ".txt")
):
self.ids.append((rootpath, line.strip()))
self.annotations = self._load_coco_annotations()
self.imgs = None
if cache:
self._cache_images()
def __len__(self):
return len(self.ids)
def _load_coco_annotations(self):
return [self.load_anno_from_ids(_ids) for _ids in range(len(self.ids))]
def _cache_images(self):
logger.warning(
"\n********************************************************************************\n"
"You are using cached images in RAM to accelerate training.\n"
"This requires large system RAM.\n"
"Make sure you have 60G+ RAM and 19G available disk space for training VOC.\n"
"********************************************************************************\n"
)
max_h = self.img_size[0]
max_w = self.img_size[1]
cache_file = self.root + "/img_resized_cache_" + self.name + ".array"
if not os.path.exists(cache_file):
logger.info(
"Caching images for the frist time. This might take about 3 minutes for VOC"
)
self.imgs = np.memmap(
cache_file,
shape=(len(self.ids), max_h, max_w, 3),
dtype=np.uint8,
mode="w+",
)
from tqdm import tqdm
from multiprocessing.pool import ThreadPool
NUM_THREADs = min(8, os.cpu_count())
loaded_images = ThreadPool(NUM_THREADs).imap(
lambda x: self.load_resized_img(x),
range(len(self.annotations)),
)
pbar = tqdm(enumerate(loaded_images), total=len(self.annotations))
for k, out in pbar:
self.imgs[k][: out.shape[0], : out.shape[1], :] = out.copy()
self.imgs.flush()
pbar.close()
else:
logger.warning(
"You are using cached imgs! Make sure your dataset is not changed!!"
)
logger.info("Loading cached imgs...")
self.imgs = np.memmap(
cache_file,
shape=(len(self.ids), max_h, max_w, 3),
dtype=np.uint8,
mode="r+",
)
def load_anno_from_ids(self, index):
img_id = self.ids[index]
target = ET.parse(self._annopath % img_id).getroot()
assert self.target_transform is not None
res, img_info = self.target_transform(target)
height, width = img_info
r = min(self.img_size[0] / height, self.img_size[1] / width)
res[:, :4] *= r
resized_info = (int(height * r), int(width * r))
return (res, img_info, resized_info)
def load_anno(self, index):
return self.annotations[index][0]
def load_resized_img(self, index):
img = self.load_image(index)
r = min(self.img_size[0] / img.shape[0], self.img_size[1] / img.shape[1])
resized_img = cv2.resize(
img,
(int(img.shape[1] * r), int(img.shape[0] * r)),
interpolation=cv2.INTER_LINEAR,
).astype(np.uint8)
return resized_img
def load_image(self, index):
img_id = self.ids[index]
img = cv2.imread(self._imgpath % img_id, cv2.IMREAD_COLOR)
assert img is not None
return img
def pull_item(self, index):
"""Returns the original image and target at an index for mixup
Note: not using self.__getitem__(), as any transformations passed in
could mess up this functionality.
Argument:
index (int): index of img to show
Return:
img, target
"""
if self.imgs is not None:
target, img_info, resized_info = self.annotations[index]
pad_img = self.imgs[index]
img = pad_img[: resized_info[0], : resized_info[1], :].copy()
else:
img = self.load_resized_img(index)
target, img_info, _ = self.annotations[index]
return img, target, img_info, index
@Dataset.mosaic_getitem
def __getitem__(self, index):
img, target, img_info, img_id = self.pull_item(index)
if self.preproc is not None:
img, target = self.preproc(img, target, self.input_dim)
return img, target, img_info, img_id
def evaluate_detections(self, all_boxes, output_dir=None):
"""
all_boxes is a list of length number-of-classes.
Each list element is a list of length number-of-images.
Each of those list elements is either an empty list []
or a numpy array of detection.
all_boxes[class][image] = [] or np.array of shape #dets x 5
"""
self._write_voc_results_file(all_boxes)
IouTh = np.linspace(
0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True
)
mAPs = []
for iou in IouTh:
mAP = self._do_python_eval(output_dir, iou)
mAPs.append(mAP)
print("--------------------------------------------------------------")
print("map_5095:", np.mean(mAPs))
print("map_50:", mAPs[0])
print("--------------------------------------------------------------")
return np.mean(mAPs), mAPs[0]
def _get_voc_results_file_template(self):
filename = "comp4_det_test" + "_{:s}.txt"
filedir = os.path.join(self.root, "results", "VOC" + self._year, "Main")
if not os.path.exists(filedir):
os.makedirs(filedir)
path = os.path.join(filedir, filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(VOC_CLASSES):
cls_ind = cls_ind
if cls == "__background__":
continue
print("Writing {} VOC results file".format(cls))
filename = self._get_voc_results_file_template().format(cls)
with open(filename, "wt") as f:
for im_ind, index in enumerate(self.ids):
index = index[1]
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in range(dets.shape[0]):
f.write(
"{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n".format(
index,
dets[k, -1],
dets[k, 0] + 1,
dets[k, 1] + 1,
dets[k, 2] + 1,
dets[k, 3] + 1,
)
)
def _do_python_eval(self, output_dir="output", iou=0.5):
rootpath = os.path.join(self.root, "VOC" + self._year)
name = self.image_set[0][1]
annopath = os.path.join(rootpath, "Annotations", "{:s}.xml")
imagesetfile = os.path.join(rootpath, "ImageSets", "Main", name + ".txt")
cachedir = os.path.join(
self.root, "annotations_cache", "VOC" + self._year, name
)
if not os.path.exists(cachedir):
os.makedirs(cachedir)
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print("Eval IoU : {:.2f}".format(iou))
if output_dir is not None and not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(VOC_CLASSES):
if cls == "__background__":
continue
filename = self._get_voc_results_file_template().format(cls)
rec, prec, ap = voc_eval(
filename,
annopath,
imagesetfile,
cls,
cachedir,
ovthresh=iou,
use_07_metric=use_07_metric,
)
aps += [ap]
if iou == 0.5:
print("AP for {} = {:.4f}".format(cls, ap))
if output_dir is not None:
with open(os.path.join(output_dir, cls + "_pr.pkl"), "wb") as f:
pickle.dump({"rec": rec, "prec": prec, "ap": ap}, f)
if iou == 0.5:
print("Mean AP = {:.4f}".format(np.mean(aps)))
print("~~~~~~~~")
print("Results:")
for ap in aps:
print("{:.3f}".format(ap))
print("{:.3f}".format(np.mean(aps)))
print("~~~~~~~~")
print("")
print("--------------------------------------------------------------")
print("Results computed with the **unofficial** Python eval code.")
print("Results should be very close to the official MATLAB eval code.")
print("Recompute with `./tools/reval.py --matlab ...` for your paper.")
print("-- Thanks, The Management")
print("--------------------------------------------------------------")
return np.mean(aps)
|
the-stack_0_19349 | import os
import shutil
import tkinter as tk
from ftplib import FTP
import cftime
import cartopy.crs as ccrs
import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
from netCDF4 import Dataset
def getdata():
global file_dir, file_list, window, var, forma
'''
说明:
1、本模块数据来源:中国Argo实时资料中心网站(或自然资源部杭州全球海洋Argo系统野外科学观测研究站)
2、版权所属:自然资源部第二海洋研究所中国Argo实时资料中心
3、编写者对用户因使用此模块产生的损失和不良后果不负任何法律责任。
4、本模块采用匿名登录ftp方式下载数据。
5、已知bug:受制于主机和服务器的带宽有一定概率下载失败,如果没有显示 '下载完毕。' 则一致认定为下载失败。
WRITE BY YuHanxue in 2021.12.1 in OUC
联系邮箱:[email protected]
'''
window.destroy()
window = tk.Tk()
window.geometry("500x500")
window.iconbitmap(r".\lib\ico\IDisk HD ALT.ico")
window.title('FTP模块')
tk.Label(window, text='已启动FTP下载模块', font=('Arial', 12)).place(x=0, y=0)
ftpserver = 'data.argo.org.cn'
ftpath = '/pub/ARGO/BOA_Argo/NetCDF'
localpath = './data/'
ftp = FTP()
try:
ftp.connect(ftpserver, 21)
ftp.login()
ftp.cwd(ftpath)
except:
raise IOError('FTP数据连接失败,请检查您的网络环境')
else:
tk.Label(window, text=f'{ftpserver}欢迎信息:{ftp.getwelcome()}', font=(
'Arial', 12)).place(x=0, y=20)
tk.Label(window, text='FTP连接成功', font=('Arial', 12)).place(x=0, y=40)
tk.Label(window, text=f'成功进入FTP服务器:{ftp.pwd()}', font=(
'Arial', 12)).place(x=0, y=60)
file_list = list(ftp.nlst())
for i in range(13):
file_list.pop()
file_sta = (file_list[0]).split('_')
file_end = (file_list[-1]).split('_')
tk.Label(window, text=f'NC文件记录时间从{file_sta[2]:4}年{file_sta[3][0:2]:2}月\
到{file_end[2]:4}年{file_end[3][0:2]:2}月', font=('Arial', 12)).place(x=0, y=80)
# 下载数据
if not os.path.exists(localpath):
os.makedirs(localpath)
tk.Label(window, text='请问需要单个数据还是批量数据?',
font=('Arial', 12)).place(x=0, y=100)
print()
def single():
global file_dir, file_list, window, var, forma
def get():
global file_dir, file_list, window, var, forma
year = e_year.get()
mon = e_mon.get()
filename = 'BOA_Argo_'+str(year)+'_'+str(mon).zfill(2)+'.nc'
bufsize = 1024
path = os.path.join(localpath, filename)
with open(path, 'wb') as fid:
tk.Label(window, text='正在下载:').grid(row=3, column=1)
window.update()
ftp.retrbinary('RETR {0}'.format(filename), fid.write, bufsize)
tk.Label(window, text='下载完毕。').grid(row=4, column=1)
tk.Button(window, text='进入可视化', command=printfil).grid(
row=5, column=1)
window.destroy()
window = tk.Tk()
window.geometry("300x500")
window.iconbitmap(r".\lib\ico\IDisk HD ALT.ico")
window.title('ARGO数据单项下载')
l_year = tk.Label(window, text='年:(2004-2021)')
l_year.grid(row=0)
e_year = tk.Entry(window)
e_year.grid(row=0, column=1)
l_mon = tk.Label(window, text='月:(1-12)')
l_mon.grid(row=1)
e_mon = tk.Entry(window)
e_mon.grid(row=1, column=1)
b_sure = tk.Button(window, text='确定', command=get)
b_sure.grid(row=2, column=1)
def batch():
global file_dir, file_list, window, var, forma
def get():
global file_dir, file_list, window, var, forma
tk.Label(window, text='正在下载,请不要退出').grid(row=6)
window.update()
year1 = e_year.get()
mon1 = e_mon.get()
year2 = e_year2.get()
mon2 = e_mon2.get()
file_down_start = 'BOA_Argo_' + \
str(year1)+'_'+str(mon1).zfill(2)+'.nc'
file_down_start_index = file_list.index(file_down_start)
file_down_end = 'BOA_Argo_'+str(year2)+'_'+str(mon2).zfill(2)+'.nc'
file_down_end_index = file_list.index(file_down_end)
i = 7
for filename in file_list[file_down_start_index:file_down_end_index+1]:
bufsize = 1024
path = os.path.join(localpath, filename)
with open(path, 'wb') as fid:
tk.Label(window, text=f'正在下载:{filename}').grid(row=i)
window.update()
ftp.retrbinary('RETR {0}'.format(
filename), fid.write, bufsize)
tk.Label(window, text=f'{filename}文件下载结束').grid(row=i+1)
window.update()
i += 2
if filename == file_list[file_down_end_index]:
window.destroy()
window = tk.Tk()
window.geometry("300x500")
window.iconbitmap(r".\lib\ico\IDisk HD ALT.ico")
window.title('ARGO数据结束下载')
tk.Button(window, text='进入可视化', command=printfil).pack()
window.destroy()
window = tk.Tk()
window.geometry("300x500")
window.iconbitmap(r".\lib\ico\IDisk HD ALT.ico")
window.title('ARGO数据批量下载')
l_year = tk.Label(window, text='起始年份:(2004-2021)')
l_year.grid(row=0)
e_year = tk.Entry(window)
e_year.grid(row=0, column=1)
l_mon = tk.Label(window, text='起始月份:(1-12)')
l_mon.grid(row=1)
e_mon = tk.Entry(window)
e_mon.grid(row=1, column=1)
l_year2 = tk.Label(window, text='起始年份:(2004-2021)')
l_year2.grid(row=2)
e_year2 = tk.Entry(window)
e_year2.grid(row=2, column=1)
l_mon2 = tk.Label(window, text='起始月份:(1-12)')
l_mon2.grid(row=3)
e_mon2 = tk.Entry(window)
e_mon2.grid(row=3, column=1)
b_sure = tk.Button(window, text='确定', command=get)
b_sure.grid(row=4, column=1)
b = tk.Button(window, text='单个数据', command=single, width=35)
c = tk.Button(window, text='批量数据', command=batch, width=35)
b.place(x=0, y=120)
c.place(x=250, y=120)
window.mainloop()
def ftp():
global file_dir, file_list, window, var, forma
file_dir = './data'
if os.path.exists(file_dir):
shutil.rmtree('./data')
os.makedirs('./data')
getdata()
def demo():
global file_dir, file_list, window, var, forma
file_dir = './demo_data'
printfil()
def selfdata():
global file_dir, file_list, window, var, forma
file_dir = './self_data'
if not os.path.exists(file_dir):
os.makedirs(file_dir)
window.destroy()
window = tk.Tk()
window.geometry("300x500")
window.iconbitmap(r".\lib\ico\IDisk HD ALT.ico")
window.title('自定义文件')
tk.Label(window, text='请将文件放入self_data文件夹内', font=('Arial', 12)).pack()
tk.Label(window, text='文件必须是BOA_Argo_yyyy_mm.nc格式,y指年份,m指月份',
font=('Arial', 12)).pack()
tk.Label(window, text='放置结束请按结束键', font=('Arial', 12)).pack()
tk.Button(window, text='结束放置', command=printfil).pack()
def printfil():
global file_dir, file_list, window, var, forma
file_list = list(os.listdir(file_dir))
for i in range(len(file_list)):
file_list[i] = file_dir+'/'+file_list[i]
window.destroy()
window = tk.Tk()
window.geometry("300x500")
window.title('文件确认')
window.iconbitmap(r".\lib\ico\IDisk HD ALT.ico")
tk.Label(window, text='您将处理以下文件:', font=('Arial', 12)).pack()
for i in file_list:
tk.Label(window, text=i, font=('Arial', 12)).pack()
tk.Button(window, text='进入可视化', command=choose).pack()
def choose():
global file_dir, file_list, window, var, forma
window.destroy()
window = tk.Tk()
window.title('请选择温度或盐度')
window.geometry("300x500")
window.iconbitmap(r".\lib\ico\IDisk HD ALT.ico")
la1 = tk.Label(window, text='请选择可视化对象', font=('Arial', 12))
la1.place(y=0)
b = tk.Button(window, text='温度', command=temp, width=20)
c = tk.Button(window, text='盐度', command=salt, width=20)
b.place(x=0, y=20)
c.place(x=150, y=20)
def salt():
global file_dir, file_list, window, var, forma
window.destroy()
window = tk.Tk()
window.title('已选择盐度')
window.geometry("300x500")
window.iconbitmap(r".\lib\ico\IDisk HD ALT.ico")
var = 'salt'
la1 = tk.Label(window, text='请选择可视化格式', font=('Arial', 12))
la1.place(y=0)
b = tk.Button(window, text='GIF 动图', command=forma_gif, width=20)
c = tk.Button(window, text='JPG 图片', command=forma_jpg, width=20)
b.place(x=0, y=20)
c.place(x=150, y=20)
def temp():
global file_dir, file_list, window, var, forma
window.destroy()
window = tk.Tk()
window.title('已选择温度')
window.geometry("300x500")
window.iconbitmap(r".\lib\ico\IDisk HD ALT.ico")
var = 'temp'
la1 = tk.Label(window, text='请选择可视化格式', font=('Arial', 12))
la1.place(y=0)
b = tk.Button(window, text='GIF 动图', command=forma_gif, width=20)
c = tk.Button(window, text='JPG 图片', command=forma_jpg, width=20)
b.place(x=0, y=20)
c.place(x=150, y=20)
def forma_gif():
global file_dir, file_list, window, var, forma
forma = gifmake
window.destroy()
window = tk.Tk()
window.title('GIF制作')
window.geometry("300x500")
window.iconbitmap(r".\lib\ico\IDisk HD ALT.ico")
for fil in file_list:
file_name = var+'_of_'+fil.split('/')[-1]
data = Dataset(fil)
lon = data.variables['lon']
lat = data.variables['lat']
data1 = data.variables[var]
lat = slice(np.min(lat), np.max(lat)+lat[1]-lat[0], lat[1]-lat[0])
lon = slice(np.min(lon), np.max(lon)+lon[1]-lon[0], lon[1]-lon[0])
Lat, Lon = np.mgrid[lat, lon]
tk.Label(
window, text=f'正在可视化{file_name[:-3]}', font=('Arial', 12)).pack()
window.update()
forma(Lon, Lat, data1, file_name[:-3])
end()
def forma_jpg():
global file_dir, file_list, window, var, forma
forma = picmake
window.destroy()
window = tk.Tk()
window.title('JPG制作')
window.geometry("300x500")
window.iconbitmap(r".\lib\ico\IDisk HD ALT.ico")
for fil in file_list:
file_name = var+'_of_'+fil.split('/')[-1]
data = Dataset(fil)
lon = data.variables['lon']
lat = data.variables['lat']
data1 = data.variables[var]
lat = slice(np.min(lat), np.max(lat)+lat[1]-lat[0], lat[1]-lat[0])
lon = slice(np.min(lon), np.max(lon)+lon[1]-lon[0], lon[1]-lon[0])
Lat, Lon = np.mgrid[lat, lon]
tk.Label(window, text=f'正在可视化{file_name}', font=('Arial', 12)).pack()
window.update()
forma(Lon, Lat, data1, file_name[:-3])
end()
def picmake(Lon, Lat, data1, name):
global file_dir, file_list, window, var, forma
for i in range(np.shape(data1)[1]):
plt.cla()
plt.figure(figsize=(20, 10))
ax = plt.axes(projection=ccrs.PlateCarree(central_longitude=180))
ax.coastlines()
data_drew = data1[0, i, :, :]
plt.contour(Lon, Lat, data_drew, 16, alpha=0.75, linewidths=0.5,
colors='black', transform=ccrs.PlateCarree(central_longitude=0))
c = plt.contourf(Lon, Lat, data_drew, 16,
transform=ccrs.PlateCarree(central_longitude=0))
plt.colorbar(c)
plt.title(f'depth={i}', fontsize='xx-large')
name1 = './pic/'+name+f'_depth={i}.jpg'
plt.savefig(name1, dpi=200)
plt.close()
def gifmake(Lon, Lat, data1, name):
global file_dir, file_list, window, var, forma
name = './gif/'+name+'.gif'
fig = plt.figure(figsize=(20, 10))
def updatefig(num):
plt.cla()
ax = plt.axes(projection=ccrs.PlateCarree(central_longitude=180))
ax.coastlines()
data_drew = data1[0, num, :, :]
ax.contourf(Lon, Lat, data_drew, 16,
transform=ccrs.PlateCarree(central_longitude=0))
ax.contour(Lon, Lat, data_drew, 16, linewidths=0.5, alpha=0.75,
colors='black', transform=ccrs.PlateCarree(central_longitude=0))
plt.title(f'depth={num}', fontsize='xx-large')
return ax
ani = animation.FuncAnimation(
fig, updatefig, frames=range(np.shape(data1)[1]))
ani.save(name, fps=15)
def end():
global file_dir, file_list, window, var, forma
window.destroy()
window = tk.Tk()
window.title('结束')
window.iconbitmap(r".\lib\ico\IDisk HD ALT.ico")
if forma == picmake:
tk.Label(window, text=f'制作完毕,请于pic文件夹内查看', font=('Arial', 12)).pack()
elif forma == gifmake:
tk.Label(window, text=f'制作完毕,请于gif文件夹内查看', font=('Arial', 12)).pack()
tk.Button(window, text='结束程序', command=endgui).pack()
def endgui():
window.destroy()
global file_dir, file_list, ishit, window
window = tk.Tk()
window.title('ARGO数据可视化')
window.geometry("300x500")
window.iconbitmap(r".\lib\ico\IDisk HD ALT.ico")
la1 = tk.Label(window, text='请选择数据来源', font=('Arial', 12))
la1.place(y=0)
ishit = 0
b = tk.Button(window, text='ftp', command=ftp, width=13)
c = tk.Button(window, text='demo', command=demo, width=13)
d = tk.Button(window, text='self', command=selfdata, width=13)
#b.grid(column=10, row=10)
b.place(x=0, y=20)
c.place(x=100, y=20)
d.place(x=200, y=20)
window.mainloop()
|
the-stack_0_19352 | """:mod:`wand.version` --- Version data
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can find the current version in the command line interface:
.. sourcecode:: console
$ python -m wand.version
0.0.0
$ python -m wand.version --verbose
Wand 0.0.0
ImageMagick 6.7.7-6 2012-06-03 Q16 http://www.imagemagick.org
$ python -m wand.version --config | grep CC | cut -d : -f 2
gcc -std=gnu99 -std=gnu99
$ python -m wand.version --fonts | grep Helvetica
Helvetica
Helvetica-Bold
Helvetica-Light
Helvetica-Narrow
Helvetica-Oblique
$ python -m wand.version --formats | grep CMYK
CMYK
CMYKA
.. versionadded:: 0.2.0
The command line interface.
.. versionadded:: 0.2.2
The ``--verbose``/``-v`` option which also prints ImageMagick library
version for CLI.
.. versionadded:: 0.4.1
The ``--fonts``, ``--formats``, & ``--config`` option allows printing
additional information about ImageMagick library.
"""
from __future__ import print_function
import ctypes
import datetime
import re
import sys
try:
from .api import libmagick, library
except ImportError: # pragma: no cover
libmagick = None
from .compat import binary, string_type, text
__all__ = ('VERSION', 'VERSION_INFO', 'MAGICK_VERSION',
'MAGICK_VERSION_DELEGATES', 'MAGICK_VERSION_FEATURES',
'MAGICK_VERSION_INFO', 'MAGICK_VERSION_NUMBER',
'MAGICK_RELEASE_DATE', 'MAGICK_RELEASE_DATE_STRING', 'MAGICK_HDRI',
'QUANTUM_DEPTH', 'QUANTUM_RANGE', 'QUANTUM_SCALE',
'configure_options', 'fonts', 'formats')
#: (:class:`tuple`) The version tuple e.g. ``(0, 1, 2)``.
#:
#: .. versionchanged:: 0.1.9
#: Becomes :class:`tuple`. (It was string before.)
VERSION_INFO = (0, 6, 8)
#: (:class:`basestring`) The version string e.g. ``'0.1.2'``.
#:
#: .. versionchanged:: 0.1.9
#: Becomes string. (It was :class:`tuple` before.)
VERSION = '{0}.{1}.{2}'.format(*VERSION_INFO)
if libmagick:
c_magick_version = ctypes.c_size_t()
#: (:class:`basestring`) The version string of the linked ImageMagick
#: library. The exactly same string to the result of
#: :c:func:`GetMagickVersion` function.
#:
#: Example::
#:
#: 'ImageMagick 6.7.7-6 2012-06-03 Q16 http://www.imagemagick.org'
#:
#: .. versionadded:: 0.2.1
MAGICK_VERSION = text(
libmagick.GetMagickVersion(ctypes.byref(c_magick_version))
)
#: (:class:`numbers.Integral`) The version number of the linked
#: ImageMagick library.
#:
#: .. versionadded:: 0.2.1
MAGICK_VERSION_NUMBER = c_magick_version.value
_match = re.match(r'^ImageMagick\s+(\d+)\.(\d+)\.(\d+)(?:-(\d+))?',
MAGICK_VERSION)
#: (:class:`basestring`) A string of all delegates enabled.
#: This value is identical to what is returned by
#: :c:func:`GetMagickDelegates`
#:
#: Set to empty string if the system uses an older version of
#: ImageMagick-6, or does not support :c:func:`GetMagickDelegates`.
#:
#: .. versionadded:: 0.5.0
if libmagick.GetMagickDelegates: # pragma: no cover
MAGICK_VERSION_DELEGATES = text(libmagick.GetMagickDelegates())
else: # pragma: no cover
MAGICK_VERSION_DELEGATES = ""
#: (:class:`basestring`) A string of all features enabled.
#: This value is identical to what is returned by
#: :c:func:`GetMagickFeatures`
#:
#: .. versionadded:: 0.5.0
MAGICK_VERSION_FEATURES = text(libmagick.GetMagickFeatures())
#: (:class:`tuple`) The version tuple e.g. ``(6, 7, 7, 6)`` of
#: :const:`MAGICK_VERSION`.
#:
#: .. versionadded:: 0.2.1
MAGICK_VERSION_INFO = tuple(int(v or 0) for v in _match.groups())
#: (:class:`basestring`) The date string e.g. ``'2012-06-03'`` of
#: :const:`MAGICK_RELEASE_DATE_STRING`. This value is the exactly same
#: string to the result of :c:func:`GetMagickReleaseDate` function.
#:
#: .. versionadded:: 0.2.1
MAGICK_RELEASE_DATE_STRING = text(libmagick.GetMagickReleaseDate())
if MAGICK_RELEASE_DATE_STRING:
_match = re.match(r'^(\d{4})-?(\d\d)-?(\d\d)$',
MAGICK_RELEASE_DATE_STRING)
#: (:class:`datetime.date`) The release date of the linked ImageMagick
#: library. Equivalent to the result of :c:func:`GetMagickReleaseDate`
#: function.
#:
#: .. versionadded:: 0.2.1
MAGICK_RELEASE_DATE = datetime.date(*map(int, _match.groups()))
c_quantum_depth = ctypes.c_size_t()
libmagick.GetMagickQuantumDepth(ctypes.byref(c_quantum_depth))
#: (:class:`numbers.Integral`) The quantum depth configuration of
#: the linked ImageMagick library. One of 8, 16, 32, or 64.
#:
#: .. versionadded:: 0.3.0
QUANTUM_DEPTH = c_quantum_depth.value
c_quantum_range = ctypes.c_size_t()
libmagick.GetMagickQuantumRange(ctypes.byref(c_quantum_range))
#: (:class:`numbers.Integral`) The quantum range configuration of
#: the linked ImageMagick library.
#:
#: .. versionadded:: 0.5.0
QUANTUM_RANGE = c_quantum_range.value
#: (:class:`numbers.Real`) The quantum scale of the linked ImageMagick
#: library. This is calculated as `1.0 / QUANTUM_RANGE`.
#:
#: .. versionadded:: 0.6.8
QUANTUM_SCALE = 1.0 / float(QUANTUM_RANGE)
#: (:class:`bool`) True if ImageMagick is compiled for High Dynamic
#: Range Image.
MAGICK_HDRI = 'HDRI' in MAGICK_VERSION_FEATURES
del c_magick_version, _match, c_quantum_depth, c_quantum_range
def configure_options(pattern='*'):
"""
Queries ImageMagick library for configurations options given at
compile-time.
Example: Find where the ImageMagick documents are installed::
>>> from wand.version import configure_options
>>> configure_options('DOC*')
{'DOCUMENTATION_PATH': '/usr/local/share/doc/ImageMagick-6'}
:param pattern: A term to filter queries against. Supports wildcard '*'
characters. Default patterns '*' for all options.
:type pattern: :class:`basestring`
:returns: Directory of configuration options matching given pattern
:rtype: :class:`collections.defaultdict`
"""
if not isinstance(pattern, string_type):
raise TypeError('pattern must be a string, not ' + repr(pattern))
# We must force init environment to load user config paths.
library.MagickWandGenesis()
pattern_p = ctypes.create_string_buffer(binary(pattern))
config_count = ctypes.c_size_t(0)
configs = {}
configs_p = library.MagickQueryConfigureOptions(pattern_p,
ctypes.byref(config_count))
for cursor in range(config_count.value):
config = ctypes.string_at(configs_p[cursor])
val_p = library.MagickQueryConfigureOption(config)
if val_p:
configs[text(config)] = text(ctypes.string_at(val_p))
val_p = library.MagickRelinquishMemory(val_p)
if configs_p:
configs_p = library.MagickRelinquishMemory(configs_p)
return configs
def fonts(pattern='*'):
"""
Queries ImageMagick library for available fonts.
Available fonts can be configured by defining `types.xml`,
`type-ghostscript.xml`, or `type-windows.xml`.
Use :func:`wand.version.configure_options` to locate system search path,
and `resources <http://www.imagemagick.org/script/resources.php>`_
article for defining xml file.
Example: List all bold Helvetica fonts::
>>> from wand.version import fonts
>>> fonts('*Helvetica*Bold*')
['Helvetica-Bold', 'Helvetica-Bold-Oblique', 'Helvetica-BoldOblique',
'Helvetica-Narrow-Bold', 'Helvetica-Narrow-BoldOblique']
:param pattern: A term to filter queries against. Supports wildcard '*'
characters. Default patterns '*' for all options.
:type pattern: :class:`basestring`
:returns: Sequence of matching fonts
:rtype: :class:`collections.Sequence`
"""
if not isinstance(pattern, string_type):
raise TypeError('pattern must be a string, not ' + repr(pattern))
# We must force init environment to load user config paths.
library.MagickWandGenesis()
pattern_p = ctypes.create_string_buffer(binary(pattern))
number_fonts = ctypes.c_size_t(0)
fonts = []
fonts_p = library.MagickQueryFonts(pattern_p,
ctypes.byref(number_fonts))
for cursor in range(number_fonts.value):
font = ctypes.string_at(fonts_p[cursor])
fonts.append(text(font))
if fonts_p:
fonts_p = library.MagickRelinquishMemory(fonts_p)
return fonts
def formats(pattern='*'):
"""
Queries ImageMagick library for supported formats.
Example: List supported PNG formats::
>>> from wand.version import formats
>>> formats('PNG*')
['PNG', 'PNG00', 'PNG8', 'PNG24', 'PNG32', 'PNG48', 'PNG64']
:param pattern: A term to filter formats against. Supports wildcards '*'
characters. Default pattern '*' for all formats.
:type pattern: :class:`basestring`
:returns: Sequence of matching formats
:rtype: :class:`collections.Sequence`
"""
if not isinstance(pattern, string_type):
raise TypeError('pattern must be a string, not ' + repr(pattern))
# We must force init environment to load user config paths.
library.MagickWandGenesis()
pattern_p = ctypes.create_string_buffer(binary(pattern))
number_formats = ctypes.c_size_t(0)
formats = []
formats_p = library.MagickQueryFormats(pattern_p,
ctypes.byref(number_formats))
for cursor in range(number_formats.value):
value = ctypes.string_at(formats_p[cursor])
formats.append(text(value))
if formats_p:
formats_p = library.MagickRelinquishMemory(formats_p)
return formats
if __doc__ is not None:
__doc__ = __doc__.replace('0.0.0', VERSION)
del libmagick
if __name__ == '__main__': # pragma: no cover
options = frozenset(sys.argv[1:])
if '-v' in options or '--verbose' in options:
print('Wand', VERSION)
try:
print(MAGICK_VERSION)
except NameError:
pass
elif '--fonts' in options:
for font in fonts():
print(font)
elif '--formats' in options:
for supported_format in formats():
print(supported_format)
elif '--config' in options:
config_options = configure_options()
for key in config_options:
print('{:24s}: {}'.format(key, config_options[key]))
else:
print(VERSION)
|
the-stack_0_19354 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
import inspect
import itertools
import logging
import time
from types import NoneType
import types
from google.appengine.ext import db, ndb
from common.mcfw.cache import set_cache_key
from common.mcfw.consts import MISSING
from common.mcfw.properties import get_members, simple_types, object_factory, long_property, unicode_property, typed_property
class ErrorResponse(object):
status_code = long_property('1')
error = unicode_property('2')
data = typed_property('3', dict)
def __init__(self, rest_exception):
"""
Args:
rest_exception (mcfw.exceptions.HttpException):
"""
self.status_code = rest_exception.http_code
self.error = u'%s' % rest_exception.error
self.data = rest_exception.data
class MissingArgumentException(Exception):
def __init__(self, name, func=None):
Exception.__init__(self, "%s is a required argument%s!" % (
name, (' in function %s' % func.func_name) if func else ''))
self.name = name
def log_access(call=True, response=True):
def wrap(f):
def logged(*args, **kwargs):
if call:
arg_str = ""
for i, arg in enumerate(args):
arg_str += " %s: %s\n" % (i, arg)
kwarg_str = ""
for kw, arg in kwargs.iteritems():
kwarg_str += " %s: %s\n" % (kw, arg)
logging.debug(u"%s.%s\nargs:\n%skwargs:\n%s" % (f.__module__, f.__name__, arg_str, kwarg_str))
start = time.time()
try:
result = f(*args, **kwargs)
if response:
end = time.time()
logging.debug(
u"%s.%s finished in %s seconds returning %s" % (f.__module__, f.__name__, end - start, result))
return result
except:
if response:
end = time.time()
logging.exception(u"%s.%s failed in %s seconds" % (f.__module__, f.__name__, end - start))
raise
set_cache_key(logged, f)
logged.__name__ = f.__name__
logged.__module__ = f.__module__
if hasattr(f, u"meta"):
logged.meta.update(f.meta)
return logged
return wrap
def arguments(**kwarg_types):
""" The arguments decorator function describes & validates the parameters of the function."""
for value in kwarg_types.itervalues():
_validate_type_spec(value)
def wrap(f):
# validate argspec
f_args = inspect.getargspec(f)
f_args = inspect.ArgSpec([a for a in f_args[0] if a not in ('self', 'cls')], f_args[1], f_args[2], f_args[3])
f_arg_count = len(f_args[0])
f_defaults = f_args[3]
if not f_defaults:
f_defaults = []
f_arg_defaults_count = len(f_defaults)
f_arg_no_defaults_count = f_arg_count - f_arg_defaults_count
f_arg_defaults = {
f_args[0][i]: f_defaults[i - f_arg_no_defaults_count] if i >= f_arg_no_defaults_count else MISSING
for i in xrange(f_arg_count)}
f_pure_default_args_dict = {f_args[0][i]: f_defaults[i - f_arg_no_defaults_count]
for i in xrange(f_arg_no_defaults_count, f_arg_count)}
if f_arg_count != len(kwarg_types):
raise ValueError(f.func_name + " does not contain the expected arguments!")
unknown_args = [arg for arg in f_args[0] if arg not in kwarg_types]
if unknown_args:
raise ValueError("No type information is supplied for %s!" % ", ".join(unknown_args))
def typechecked_f(*args, **kwargs):
arg_length = len(args)
if arg_length > f_arg_count:
raise ValueError("%s() takes %s arguments (%s given)" % (f.__name__, f_arg_count, arg_length))
for i in xrange(arg_length):
kwargs[f_args[0][i]] = args[i]
# accept MISSING as magical value or not
accept_missing = u'accept_missing' in kwargs
if accept_missing:
kwargs.pop(u'accept_missing')
# apply default value if available
for arg in kwarg_types:
value = kwargs.get(arg, f_arg_defaults[arg])
if value is MISSING:
value = f_arg_defaults.get(arg, MISSING)
kwargs[arg] = value
# validate number of arguments
if not len(kwargs) == len(kwarg_types):
raise ValueError("kwarg mismatch\nExpected:%s\nGot:%s" % (kwarg_types, kwargs))
# validate supplied arguments
unknown_args = [arg for arg in kwargs if arg not in kwarg_types]
if unknown_args:
raise ValueError("Unknown argument(s) %s supplied!" % ", ".join(unknown_args))
# validate argument values
for arg in kwargs:
_check_type(arg, kwarg_types[arg], kwargs[arg], accept_missing=accept_missing, func=f)
return f(**kwargs)
set_cache_key(typechecked_f, f)
typechecked_f.__name__ = f.__name__
typechecked_f.__module__ = f.__module__
typechecked_f.meta[u"fargs"] = f_args
typechecked_f.meta[u"kwarg_types"] = kwarg_types
typechecked_f.meta[u"pure_default_args_dict"] = f_pure_default_args_dict
if hasattr(f, u"meta"):
typechecked_f.meta.update(f.meta)
return typechecked_f
return wrap
def returns(type_=NoneType):
""" The retunrs decorator function describes & validates the result of the function."""
_validate_type_spec(type_)
def wrap(f):
def typechecked_return(*args, **kwargs):
result = f(*args, **kwargs)
return _check_type(u"Result", type_, result, func=f)
set_cache_key(typechecked_return, f)
typechecked_return.__name__ = f.__name__
typechecked_return.__module__ = f.__module__
typechecked_return.meta[u"return_type"] = type_
if hasattr(f, u"meta"):
typechecked_return.meta.update(f.meta)
return typechecked_return
return wrap
def run(function, args, kwargs):
kwargs['accept_missing'] = None
result = function(*args, **kwargs)
type_, islist = _get_return_type_details(function)
return serialize_value(result, type_, islist, skip_missing=True)
def parse_parameters(function, parameters):
kwarg_types = get_parameter_types(function)
return get_parameters(parameters, kwarg_types)
def parse_complex_value(type_, value, islist):
if value is None:
return None
parser = _get_complex_parser(type_)
if islist:
return map(parser, value)
else:
return parser(value)
def check_function_metadata(function):
if "kwarg_types" not in function.meta or "return_type" not in function.meta:
raise ValueError("Can not execute function. Too little meta information is available!")
def get_parameter_types(function):
return function.meta["kwarg_types"]
def get_parameters(parameters, kwarg_types):
return {name: parse_parameter(name, type_, parameters[name]) if name in parameters else MISSING
for name, type_ in kwarg_types.iteritems()}
def get_type_details(type_, value=MISSING):
if isinstance(type_, tuple):
# The value can have multiple types.
if value is not MISSING:
# We must find the type by comparing the possible types with the real type of <value>
value_is_list = isinstance(value, list)
if value_is_list:
if not value:
return unicode, True # The type doesn't matter, the list is empty
value = value[0]
for t in type_:
is_list = isinstance(t, list)
if is_list != value_is_list:
continue
if is_list:
t = t[0]
if t in (str, unicode):
type_to_check = (str, unicode)
elif t in (int, long):
type_to_check = (int, long)
else:
type_to_check = t
if isinstance(value, type_to_check):
return type(value), is_list
# Weird... type not found and @arguments didn't raise... The serialization will probably fail.
is_list = isinstance(type_, list)
if is_list:
type_ = type_[0]
return type_, is_list
def serialize_complex_value(value, type_, islist, skip_missing=False):
if type_ == dict:
return value
def optimal_serializer(val):
if not isinstance(type_, object_factory) and isinstance(val, type_):
serializer = _get_complex_serializer(val.__class__)
else:
serializer = _get_complex_serializer(type_)
return serializer(val, skip_missing)
if value is None:
return None
if islist:
try:
return map(optimal_serializer, value)
except:
logging.warn("value for type %s was %s", type_, value)
raise
else:
return optimal_serializer(value)
def serialize_value(value, type_, islist, skip_missing=False):
if value is None \
or type_ in simple_types \
or (isinstance(type_, tuple) and all(t in simple_types for t in type_)):
return value
else:
return serialize_complex_value(value, type_, islist, skip_missing)
def parse_parameter(name, type_, value):
raw_type, is_list = get_type_details(type_, value)
if isinstance(value, list) != is_list:
raise ValueError("list expected for parameter %s and got %s or vice versa!" % (name, value))
if isinstance(value, list):
return map(lambda x: _parse_value(name, raw_type, x), value)
else:
return _parse_value(name, raw_type, value)
def _validate_type_spec(type_):
if isinstance(type_, list) and len(type_) != 1:
raise ValueError("Illegal type specification!")
DICT_KEY_ITERATOR_TYPE = type(dict().iterkeys())
def _check_type(name, type_, value, accept_missing=False, func=None):
if value == MISSING:
if accept_missing:
return value
else:
raise MissingArgumentException(name, func)
checktype = (str, unicode) if type_ in (str, unicode) else type_
checktype = (int, long) if checktype in (int, long) else checktype
if value is None and (isinstance(checktype, list) or type_ not in (int, long, float, bool)):
return value
if isinstance(type_, tuple):
# multiple types are allowed. checking if value is one of the them.
errors = []
for t in type_:
try:
return _check_type(name, t, value, accept_missing, func)
except (ValueError, TypeError) as e:
errors.append(e)
continue
logging.debug('\n\n'.join(map(str, errors)))
raise ValueError("%s is not of expected type %s! Its type is %s:\n%s" % (name, str(type_), type(value), value))
if isinstance(checktype, list) and isinstance(value, list):
checktype = (str, unicode) if checktype[0] in (str, unicode) else checktype[0]
for i, x in enumerate(value):
t = checktype.get_subtype(x) if isinstance(checktype, object_factory) else checktype
if not isinstance(x, t):
raise ValueError(
"%s: Not all items were of expected type %s. Encountered an item at index %s with type %s: %s."
% (name, str(checktype), i, type(x), x))
elif isinstance(checktype, list) and isinstance(value, (
types.GeneratorType, db.Query, ndb.Query, db._QueryIterator, itertools.chain, DICT_KEY_ITERATOR_TYPE)):
checktype = (str, unicode) if checktype[0] in (str, unicode) else checktype[0]
def checkStreaming():
for o in value:
if not isinstance(o, checktype):
raise ValueError(
"%s: Not all items were of expected type %s. Encountered an item with type %s: %s."
% (name, str(checktype), type(o), o))
yield o
return checkStreaming()
elif checktype == type and isinstance(value, list):
if len(value) != 1:
raise ValueError("%s: unexpected type count (%s)" % (name, len(value)))
def check(t, i):
if not isinstance(t, type):
raise ValueError(
"%s: Not all items were of expected type %s. Encountered an item at index %s with type %s: %s."
% (name, str(checktype), i, type(x), x))
if isinstance(value[0], tuple):
for i, t in enumerate(value[0]):
check(t, i)
else:
check(value[0], 0)
else:
if isinstance(checktype, object_factory):
checktype = checktype.get_subtype(value)
try:
if not isinstance(value, checktype):
raise ValueError(
"%s is not of expected type %s! Its type is %s:\n%s" % (name, str(checktype), type(value), value))
except TypeError as e:
raise TypeError("%s\nvalue: %s\nchecktype: %s" % (e.message, value, checktype))
return value
_complexParserCache = {}
def _get_complex_parser(type_):
if type_ is dict:
return lambda x: x
if type_ not in _complexParserCache:
def parse(value):
t = type_.get_subtype(value) if isinstance(type_, object_factory) else type_
inst = t()
complex_members, simple_members = get_members(t)
for name, prop in simple_members:
setattr(inst, name, value[name] if name in value else getattr(t, name).default)
for name, prop in complex_members:
setattr(inst, name, parse_complex_value(
prop.get_subtype(inst) if (prop.subtype_attr_name and prop.subtype_mapping) else prop.type,
value[name], prop.list) if name in value else MISSING)
return inst
_complexParserCache[type_] = parse
return parse
else:
return _complexParserCache[type_]
_value_types = {int, long, float, bool, NoneType}
def _parse_value(name, type_, value):
def raize():
raise ValueError("Incorrect type received for parameter '%s'. Expected %s and got %s (%s)."
% (name, type_, type(value), value))
istuple = isinstance(type_, tuple)
if (istuple and set(type_).issubset(_value_types)) or type_ in _value_types:
if not isinstance(value, type_):
raize()
return value
elif istuple:
for tt in type_:
try:
return _parse_value(name, tt, value)
except ValueError:
pass
raize()
elif value is None:
return None
elif type_ == unicode:
if not isinstance(value, (str, unicode)):
raize()
return value if isinstance(value, unicode) else unicode(value)
elif type_ == str:
if not isinstance(value, (str, unicode)):
raize()
return value
elif not isinstance(value, dict):
raize()
return parse_complex_value(type_, value, False)
_complex_serializer_cache = {}
def _get_complex_serializer(type_):
if type_ not in _complex_serializer_cache:
def serializer(value, skip_missing):
t = type_.get_subtype(value) if isinstance(type_, object_factory) else type_
complex_members, simple_members = get_members(t)
result = {name: getattr(value, name) for (name, _) in simple_members
if not skip_missing or getattr(value, name) is not MISSING}
def _serialize(name, prop):
attr = getattr(value, name)
real_type = prop.get_subtype(value) if (prop.subtype_attr_name and prop.subtype_mapping) else prop.type
serialized_value = serialize_complex_value(attr, real_type, prop.list, skip_missing)
return serialized_value
for (name, prop) in complex_members:
if not skip_missing or getattr(value, name) is not MISSING:
result[name] = _serialize(name, prop)
return result
_complex_serializer_cache[type_] = serializer
return serializer
else:
return _complex_serializer_cache[type_]
def _get_return_type_details(function):
return get_type_details(function.meta["return_type"])
|
the-stack_0_19355 | """
Tree
is either empty or consists of a root and zero or more subtrees, each of which is also a tree. The root
of each subtree is connected to the root of the parent tree by an edge.
*
left right
/ \
* *
left right left right
\
*
left right
Find the successor/predecessor node of a node in a tree
Successor:
The BinaryTree has left/right attributes which reference to the successor of left/right subtree.
Predecessor (see ./parse_tree.py):
To find predecessor node, we need a stack to keep the current node before descent to the child node.
The peak node in the stack is the predecessor of current node.
"""
class BinaryTree(object):
def __init__(self, rootobj=None):
self.key = rootobj
self.left = None
self.right = None
def __repr__(self, i: int = 0):
"""Print BinaryTree in a structured way"""
s = ''
s += ' ' * (i-1) * bool(i) + ':..' * bool(i) + str(self.key) + '\n'
if self.left is not None:
s += self.left.__repr__(i + 1)
if self.right is not None:
s += self.right.__repr__(i + 1)
return s
def insertLeft(self, newobj):
"""Insert a subtree as the left child"""
tmp = BinaryTree(newobj)
if self.left is None:
self.left = tmp
else:
tmp.left = self.left
self.left = tmp
return self
def insertRight(self, newobj):
"""Insert a subtree as the right child"""
tmp = BinaryTree(newobj)
if self.right is None:
self.right = tmp
else:
tmp.right = self.right
self.right = tmp
return self
if __name__ == '__main__':
t = BinaryTree('a')
t.insertLeft('b')
t.insertRight('c')
t.left.insertLeft('e')
t.left.insertRight('f')
print(t)
|
the-stack_0_19357 | import requests, json, io, datetime, pathlib, sys, time, os, csv
from io import StringIO
import county_report, state_report
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
STATE_ABBR = 'SC'
STATE = 'South Carolina'
URL = 'https://www.arcgis.com/home/webmap/viewer.html?url=https://services2.arcgis.com/XZg2efAbaieYAXmu/ArcGIS/rest/services/COVID19_SharingView/FeatureServer/0&source=sd'
def get_row_data(table):
for row in table:
yield [td.text for td in row.find_elements_by_xpath(".//td")]
def scraper():
counties = []
# You will need a WebDriver for Edge. See https://developer.microsoft.com/en-us/microsoft-edge/tools/webdriver/
try:
browser = webdriver.Edge("msedgedriver.exe")
browser.get(URL)
time.sleep(1)
show_table_link = WebDriverWait(browser, 30).until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[1]/div[3]/div[3]/div/div/div[3]/div[2]/div/div[1]/div[2]/div[1]/div[1]/div/div[2]/span')))
show_table_link.click()
time.sleep(1)
county_div = WebDriverWait(browser, 30).until(EC.presence_of_element_located((By.XPATH, '/html/body/div[1]/div[1]/div[3]/div[5]/div[4]/div[1]/div/div/div/div[1]/div/div/div[2]/div/div[2]/div')))
county_div_rows = county_div.find_elements_by_xpath('.//div[@role="row"]')
# SC puts its county level data into lots of <div> elements, with one <div> per county. Each <div> has its own single-row <table> that contains the county data. Thus, we
# have some extra stuff to do to make this work right.
for div_row in county_div_rows:
county_table = div_row.find_element_by_xpath('.//table')
htmlRows = county_table.find_elements_by_xpath(".//tr")
rows = get_row_data(htmlRows)
for row in rows:
county_name = row[0]
if county_name == 'Unknown':
continue
confirmed = int(row[3].replace(',', ''))
deaths = int(row[4].replace(',', ''))
county = county_report.CountyReport(STATE, county_name, confirmed, deaths, -1, -1, datetime.datetime.now())
counties.append(county)
except:
print("Unexpected error:", sys.exc_info()[0])
browser.quit()
# print the number of counties we processed
print(' ', STATE_ABBR, ':', len(counties), ' counties processed OK')
# build the state-level report object that will include all of the counties
stateReport = state_report.StateReport(STATE, STATE_ABBR, counties, datetime.datetime.now())
# return the state-level report
return stateReport |
the-stack_0_19359 | import os
import bpy
from tests import utils
from io_scene_xray import plugin_prefs
class TestLevel(utils.XRayTestCase):
def test_default(self):
if bpy.app.version >= (2, 80, 0):
prefs = plugin_prefs.get_preferences()
prefs.gamemtl_file = os.path.join(self.relpath(), 'gamemtl.xr')
# Import
bpy.ops.xray_import.level(filepath=os.path.join(
self.relpath(), 'test_fmt_level', 'level'
))
# Export
level_obj = bpy.data.objects['test_fmt_level']
level_obj.select_set(True)
directory = self.outpath('test_fmt_level_export')
if not os.path.exists(directory):
os.makedirs(directory)
bpy.context.view_layer.objects.active = level_obj
bpy.ops.xray_export.level(directory=directory)
# Assert
self.assertReportsNotContains('WARNING')
|
the-stack_0_19360 | from math import inf
def max_sub_array_sum(array:list):
'''
Takes an array and returns the maximum possible sum of a sub-array and
returns the sum and the indices of sub-array whose sum is that max sum
'''
def conquer(left:int, mid:int, right:int) -> tuple:
left_sum, right_sum = -inf, -inf
max_left, max_right = mid, mid
s = 0
for i in range(mid, left - 1, -1):
s += array[i]
if s >= left_sum:
left_sum = s
max_left = i
s = 0
for i in range(mid + 1, right + 1):
s += array[i]
if s >= right_sum:
right_sum = s
max_right = i
return left_sum + right_sum, max_left, max_right
def divide(left:int, right:int) -> tuple:
if left == right:
return array[left], left, left
else:
mid = (left + right) // 2
left_sum = divide(left, mid)
right_sum = divide(mid + 1, right)
mid_sum = conquer(left, mid, right)
returned_sum = left_sum
if returned_sum[0] < right_sum[0]:
returned_sum = right_sum
if returned_sum[0] < mid_sum[0]:
returned_sum = mid_sum
return returned_sum
return divide(0, len(array) - 1)
|
the-stack_0_19361 | import torch
import torch.nn as nn
import hourglasses
import modules.vae as V
import modules.landmark_projection as lp
import torch.nn.functional as F
import vgg
from torchvision.transforms import Normalize
from time import process_time
# # # # # # # # # # DISENTANGLING CONTENT AND STYLE VIA UNSUPERVISED GEOMETRY DISTILLATION # # # # # # # # # #
class GeomVAE(nn.Module):
"""VAE of DISENTANGLING CONTENT AND STYLE VIA UNSUPERVISED GEOMETRY DISTILLATION"""
def __init__(self, in_channels=3, landmarks=30, sigma=2):
super(GeomVAE, self).__init__()
num_channels = 16 # from hourglass paper
# Structure Branch
self.structure_branch = hourglasses.StackedHourGlass(in_channels=in_channels, nChannels=num_channels, nStack=1,
nModules=2, numReductions=4, nJoints=landmarks)
# self.structure_branch = hg2.Hourglass()
self.project_y = lp.HeatmapProjection(sigma=sigma, landmarks=landmarks)
self.reduce_y = False # reduce y to 1 channel after extraction
if self.reduce_y:
landmarks = 1
self.y_norm = nn.InstanceNorm2d(landmarks)
self.encode_structure = EncoderVAE(in_channels=landmarks, need_skips=True)
# Style Branch
self.encode_style = EncoderVAE(in_channels=in_channels+landmarks, need_skips=False)
# Decoder
self.skip_mode = 'cat'
# self.skip_mode = 'add'
decoder_in = 128
self.decoder = DecoderVAE(in_channels=decoder_in, out_channels=in_channels, skip_mode=self.skip_mode)
# batch_norm = True
# self.vgg_extractor = ExtractorVGG(features=vgg.make_layers(vgg.cfgs['E'], batch_norm=batch_norm),
# arch='vgg19', batch_norm=batch_norm).eval()
self.L1_loss = nn.L1Loss()
def forward(self, x):
# # Structure y through hourglass
t = [process_time()]
y = self.structure_branch(x)
y = y[0]
t.append(process_time())
y = F.interpolate(y, size=(256, 256), mode='nearest')
# y = F.interpolate(y, size=(256, 256), mode='bilinear', align_corners=True)
y, prior_loss = self.project_y(y)
# y = y.detach()
t.append(process_time())
if self.reduce_y:
y = y.sum(1, keepdim=True)
# y, _ = y.max(dim=1)
# y = y.unsqueeze(1)
y = self.y_norm(y)
# y = self.y_norm(y)
# print(y.max(), y.min())
z_structure, skips = self.encode_structure(y)
# # Style branch
# # x_y = torch.cat((x, y), dim=1) # concatenate x with structure, y, to encode style p(z|x, y)
t.append(process_time())
z_style = self.encode_style(torch.cat((x, y), dim=1))
# # By modeling the two distributions as Gaussian with identity covariances,
# # the KL Loss is simply equal to the Euclidean distance between their means
t.append(process_time())
kl_loss = 0.5 * torch.pow(z_style.squeeze() - z_structure.squeeze(), 2).sum(-1)
kl_loss = kl_loss.mean()
t.append(process_time())
z_style = self.reparameterize(z_style)
if self.skip_mode == 'cat':
z = torch.cat((z_style, z_structure), dim=1)
else:
z = z_style + z_structure # fuse features, essentially the first skip layers
x_out = self.decoder(z, skips=skips)
t.append(process_time())
rc_loss = self.reconstruction_loss(x, x_out)
t.append(process_time())
# delta_t = []
# for i in range(1, len(t)):
# delta_t.append(t[i] - t[i-1])
# print(delta_t)
return x_out, y, (rc_loss, prior_loss, kl_loss)
@staticmethod
def reparameterize(mu, logvar=None):
if logvar is None:
# in the paper, they just estimate mu, and use an identify matrix as sigma
logvar = torch.ones_like(mu)
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return mu + eps * std
def reconstruction_loss(self, x_in, x_out):
"""
Calculate the reconstruction loss of the whole model using combined losses for L1 loss between the image apir,
and loss between the image pairs' features from the l^th layer of VGG-19.
**Authors note it would also be possible to add an adversarial loss too
:param x_in: Original Image
:param x_out: Reconstructed Image
:param lam: Weighting factor for feature map losses
:return:
"""
# self.vgg_extractor.eval()
# calculate L1(?) loss between x_in, x_out
x_loss = self.L1_loss(x_in, x_out)
return x_loss
class EncoderVAE(nn.Module):
def __init__(self, in_channels, need_skips=False):
super(EncoderVAE, self).__init__()
self.need_skips = need_skips
# self.layer_0 = nn.Sequential(nn.InstanceNorm2d(in_channels),
# nn.Conv2d(in_channels, 64, kernel_size=4, stride=2, padding=1, bias=True))
self.layer_0 = nn.Conv2d(in_channels, 64, kernel_size=4, stride=2, padding=1, bias=True)
main_layers = []
# Arch first layer is 64-128, rest are 128-128 channels
arch = [(64, 128, 4, 2)] + [(128, 128, 4, 2)] * 5 + [(128, 128, 4, 2, 1, True)]
# final layer maps to a vector so can't instant norm
for layer in arch:
main_layers.append(V.LeakyBlock(*layer))
self.main_layers = nn.ModuleList(main_layers)
self.layer_mu = nn.Conv2d(128, 128, kernel_size=1, stride=1)
def forward(self, x):
x = self.layer_0(x)
skips = [x.clone()]
for i, layer in enumerate(self.main_layers):
x = layer(x)
if self.need_skips:
skips.append(x.clone())
assert not torch.equal(x[0, :, :, :], x[1, :, :, :])
x = self.layer_mu(x)
if self.need_skips:
return x, skips
else:
return x
class DecoderVAE(nn.Module):
"""
Using upsample->skip->conv instead of skip->deconv
"""
def __init__(self, in_channels, out_channels, skip_mode='cat'):
super(DecoderVAE, self).__init__()
decoder_channels = 128
if skip_mode == 'cat':
in_channels *= 2
arch = [(decoder_channels*2, decoder_channels, 4, 2)] * 6 + [(decoder_channels*2, 64, 4, 2)]
final_channels = 128
elif skip_mode == 'add':
arch = [(decoder_channels, decoder_channels, 4, 2)] * 6 + [(decoder_channels, 64, 4, 2)]
final_channels = 64
else:
print('Invalid skip_mode')
raise NotImplementedError
# opposite to encoder so layer_mu is first, then layer_9 at the end
self.conv_0 = nn.Conv2d(in_channels, decoder_channels, kernel_size=1, stride=1, padding=0)
main_layers = []
for layer in arch:
main_layers.append(V.DeconvBlock(*layer))
self.main_layers = nn.ModuleList(main_layers)
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
self.layer_end = nn.ConvTranspose2d(final_channels, out_channels, kernel_size=4, stride=2, padding=1, bias=True)
self.skip_mode = skip_mode
def forward(self, x, skips=None):
x = self.conv_0(x)
skips = skips[::-1]
for layer_id, layer in enumerate(self.main_layers):
if skips is not None:
x = self.skip_layer(x, skips[layer_id])
x = layer(x)
x = self.skip_layer(x, skips[-1])
x = self.tanh(self.layer_end(self.relu(x)))
return x
def skip_layer(self, x, skip_x):
if self.skip_mode == 'cat':
return torch.cat((x, skip_x), dim=1)
else:
return x + skip_x
class ExtractorVGG(vgg.VGG):
def __init__(self, features, arch, batch_norm):
super(ExtractorVGG, self).__init__(features)
# check for batch norm
if batch_norm:
arch = 'vgg19_bn'
self.load_weights(arch)
del self.classifier
# # extract features before every Maxpool layer
# if not batch_norm:
# self.extract_ids = [3, 8, 17, 26, 35]
# else:
# self.extract_ids = [5, 12, 25, 38, 51]
# extract features after every Maxpool layer
if not batch_norm:
self.extract_ids = [4, 9, 18, 27, 36]
else:
self.extract_ids = [6, 13, 26, 39, 52]
self.loss = nn.L1Loss()
def _forward(self, x):
# normalize images
# x = F.interpolate(x, size=(224, 224), mode='bilinear', align_corners=False)
x = self.normalize(x)
outputs = []
for i, layer in enumerate(self.features):
x = layer(x)
if i in self.extract_ids:
outputs.append(x)
return tuple(outputs)
def load_weights(self, arch, progress=True):
state_dict = vgg.load_state_dict_from_url(vgg.model_urls[arch],
progress=progress)
state_dict = {k: v for k, v in state_dict.items() if k in self.state_dict()}
self.load_state_dict(state_dict)
def forward(self, x_in, x_out, lam=1.):
# def reconstruction_loss(self, x_in, x_out, lam=1.):
"""
Calculate the reconstruction loss of the whole model using combined losses for L1 loss between the image apir,
and loss between the image pairs' features from the l^th layer of VGG-19.
**Authors note it would also be possible to add an adversarial loss too
:param x_in: Original Image
:param x_out: Reconstructed Image
:param lam: Weighting factor for feature map losses
:return:
"""
# calculate L1(?) losses between l-th vgg features f_in, f_out for all l
x_loss = 0.
# concat inputs
batch_size = x_in.size(0)
x = torch.cat((x_in, x_out), dim=0)
fmaps = self._forward(x)
for layer_id in range(len(fmaps)):
x_loss += lam * self.loss(fmaps[layer_id][:batch_size, :, :, :], fmaps[layer_id][batch_size:, :, :, :])
return x_loss
@staticmethod
def normalize(x):
# Normalize with imagenet mean and std
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
x_new = x.clone()
for channel in range(3):
x_new[:, channel, :, :] = x[:, channel, :, :] - mean[channel]
x_new[:, channel, :, :] = x[:, channel, :, :] / std[channel]
return x_new
|
the-stack_0_19362 | # Copyright 2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import lzma
import os
import sys
import shutil
import subprocess
import pickle
import hashlib
import tarfile, zipfile
import tempfile
from glob import glob
from mesonbuild.environment import detect_ninja
from mesonbuild.mesonlib import windows_proof_rmtree
from mesonbuild import mlog
def create_hash(fname):
hashname = fname + '.sha256sum'
m = hashlib.sha256()
m.update(open(fname, 'rb').read())
with open(hashname, 'w') as f:
f.write('%s %s\n' % (m.hexdigest(), os.path.basename(fname)))
def create_zip(zipfilename, packaging_dir):
prefix = os.path.dirname(packaging_dir)
removelen = len(prefix) + 1
with zipfile.ZipFile(zipfilename,
'w',
compression=zipfile.ZIP_DEFLATED,
allowZip64=True) as zf:
zf.write(packaging_dir, packaging_dir[removelen:])
for root, dirs, files in os.walk(packaging_dir):
for d in dirs:
dname = os.path.join(root, d)
zf.write(dname, dname[removelen:])
for f in files:
fname = os.path.join(root, f)
zf.write(fname, fname[removelen:])
def del_gitfiles(dirname):
for f in glob(os.path.join(dirname, '.git*')):
if os.path.isdir(f) and not os.path.islink(f):
windows_proof_rmtree(f)
else:
os.unlink(f)
def process_submodules(dirname):
module_file = os.path.join(dirname, '.gitmodules')
if not os.path.exists(module_file):
return
subprocess.check_call(['git', 'submodule', 'update', '--init', '--recursive'], cwd=dirname)
for line in open(module_file):
line = line.strip()
if '=' not in line:
continue
k, v = line.split('=', 1)
k = k.strip()
v = v.strip()
if k != 'path':
continue
del_gitfiles(os.path.join(dirname, v))
def run_dist_scripts(dist_root, dist_scripts):
assert(os.path.isabs(dist_root))
env = os.environ.copy()
env['MESON_DIST_ROOT'] = dist_root
for d in dist_scripts:
script = d['exe']
args = d['args']
name = ' '.join(script + args)
print('Running custom dist script {!r}'.format(name))
try:
rc = subprocess.call(script + args, env=env)
if rc != 0:
sys.exit('Dist script errored out')
except OSError:
print('Failed to run dist script {!r}'.format(name))
sys.exit(1)
def git_have_dirty_index(src_root):
'''Check whether there are uncommitted changes in git'''
ret = subprocess.call(['git', '-C', src_root, 'diff-index', '--quiet', 'HEAD'])
return ret == 1
def create_dist_git(dist_name, src_root, bld_root, dist_sub, dist_scripts):
if git_have_dirty_index(src_root):
mlog.warning('Repository has uncommitted changes that will not be included in the dist tarball')
distdir = os.path.join(dist_sub, dist_name)
if os.path.exists(distdir):
shutil.rmtree(distdir)
os.makedirs(distdir)
subprocess.check_call(['git', 'clone', '--shared', src_root, distdir])
process_submodules(distdir)
del_gitfiles(distdir)
run_dist_scripts(distdir, dist_scripts)
xzname = distdir + '.tar.xz'
# Should use shutil but it got xz support only in 3.5.
with tarfile.open(xzname, 'w:xz') as tf:
tf.add(distdir, dist_name)
# Create only .tar.xz for now.
# zipname = distdir + '.zip'
# create_zip(zipname, distdir)
shutil.rmtree(distdir)
return (xzname, )
def hg_have_dirty_index(src_root):
'''Check whether there are uncommitted changes in hg'''
out = subprocess.check_output(['hg', '-R', src_root, 'summary'])
return b'commit: (clean)' not in out
def create_dist_hg(dist_name, src_root, bld_root, dist_sub, dist_scripts):
if hg_have_dirty_index(src_root):
mlog.warning('Repository has uncommitted changes that will not be included in the dist tarball')
os.makedirs(dist_sub, exist_ok=True)
tarname = os.path.join(dist_sub, dist_name + '.tar')
xzname = tarname + '.xz'
subprocess.check_call(['hg', 'archive', '-R', src_root, '-S', '-t', 'tar', tarname])
if dist_scripts:
mlog.warning('dist scripts are not supported in Mercurial projects')
with lzma.open(xzname, 'wb') as xf, open(tarname, 'rb') as tf:
shutil.copyfileobj(tf, xf)
os.unlink(tarname)
# Create only .tar.xz for now.
# zipname = os.path.join(dist_sub, dist_name + '.zip')
# subprocess.check_call(['hg', 'archive', '-R', src_root, '-S', '-t', 'zip', zipname])
return (xzname, )
def check_dist(packagename, meson_command, privdir):
print('Testing distribution package %s' % packagename)
unpackdir = os.path.join(privdir, 'dist-unpack')
builddir = os.path.join(privdir, 'dist-build')
installdir = os.path.join(privdir, 'dist-install')
for p in (unpackdir, builddir, installdir):
if os.path.exists(p):
shutil.rmtree(p)
os.mkdir(p)
ninja_bin = detect_ninja()
try:
tf = tarfile.open(packagename)
tf.extractall(unpackdir)
srcdir = glob(os.path.join(unpackdir, '*'))[0]
if subprocess.call(meson_command + ['--backend=ninja', srcdir, builddir]) != 0:
print('Running Meson on distribution package failed')
return 1
if subprocess.call([ninja_bin], cwd=builddir) != 0:
print('Compiling the distribution package failed')
return 1
if subprocess.call([ninja_bin, 'test'], cwd=builddir) != 0:
print('Running unit tests on the distribution package failed')
return 1
myenv = os.environ.copy()
myenv['DESTDIR'] = installdir
if subprocess.call([ninja_bin, 'install'], cwd=builddir, env=myenv) != 0:
print('Installing the distribution package failed')
return 1
finally:
shutil.rmtree(unpackdir)
shutil.rmtree(builddir)
shutil.rmtree(installdir)
print('Distribution package %s tested' % packagename)
return 0
def run(args):
src_root = args[0]
bld_root = args[1]
meson_command = args[2:]
priv_dir = os.path.join(bld_root, 'meson-private')
dist_sub = os.path.join(bld_root, 'meson-dist')
buildfile = os.path.join(priv_dir, 'build.dat')
build = pickle.load(open(buildfile, 'rb'))
dist_name = build.project_name + '-' + build.project_version
_git = os.path.join(src_root, '.git')
if os.path.isdir(_git) or os.path.isfile(_git):
names = create_dist_git(dist_name, src_root, bld_root, dist_sub, build.dist_scripts)
elif os.path.isdir(os.path.join(src_root, '.hg')):
names = create_dist_hg(dist_name, src_root, bld_root, dist_sub, build.dist_scripts)
else:
print('Dist currently only works with Git or Mercurial repos')
return 1
if names is None:
return 1
error_count = 0
for name in names:
rc = check_dist(name, meson_command, priv_dir) # Check only one.
if rc == 0:
create_hash(name)
error_count += rc
return 1 if error_count else 0
|
the-stack_0_19366 | from pathlib import Path
from typing import List
from blspy import AugSchemeMPL
from peas.util.ints import uint32
from peas.util.keychain import Keychain
from peas.util.validate_alert import create_alert_file, create_not_ready_alert_file, validate_alert_file
bitcoin_hash = None
bram_message = None
status = None
while True:
status_input = input("What is the status of this alert? (ready/not ready)").lower()
if status_input == "ready":
status = True
break
elif status_input == "not ready":
status = False
break
else:
print("Unknown input")
keychain: Keychain = Keychain()
print("\n___________ SELECT KEY ____________")
private_keys = keychain.get_all_private_keys()
if len(private_keys) == 0:
print("There are no saved private keys.")
quit()
print("Showing all private keys:")
for sk, seed in private_keys:
print("\nFingerprint:", sk.get_g1().get_fingerprint())
selected_key = None
while True:
user_input = input("\nEnter fingerprint of the key you want to use, or enter Q to quit: ").lower()
if user_input == "q":
quit()
for sk, seed in private_keys:
fingerprint = sk.get_g1().get_fingerprint()
pub = sk.get_g1()
if int(user_input) == fingerprint:
print(f"Selected: {fingerprint}")
selected_key = sk
break
if selected_key is not None:
break
print("\n___________ HD PATH ____________")
while True:
hd_path = input("Enter the HD path in the form 'm/12381/8444/n/n', or enter Q to quit: ").lower()
if hd_path == "q":
quit()
verify = input(f"Is this correct path: {hd_path}? (y/n) ").lower()
if verify == "y":
break
k = Keychain()
private_keys = k.get_all_private_keys()
path: List[uint32] = [uint32(int(i)) for i in hd_path.split("/") if i != "m"]
# Derive HD key using path form input
for c in path:
selected_key = AugSchemeMPL.derive_child_sk(selected_key, c)
print("Public key:", selected_key.get_g1())
# get file path
file_path = None
while True:
file_path = input("Enter the path where you want to save signed alert file, or q to quit: ")
if file_path == "q" or file_path == "Q":
quit()
file_path = file_path.strip()
y_n = input(f"Is this correct path (y/n)?: {file_path} ").lower()
if y_n == "y":
break
f_path: Path = Path(file_path)
if status is True:
print("")
print("___________ BITCOIN BLOCK HASH ____________")
while True:
bitcoin_hash = input("Insert Bitcoin block hash: ")
print(f"Bitcoin block hash = {bitcoin_hash}")
y_n = input("Does this look good (y/n): ").lower()
if y_n == "y":
break
print("")
print("___________ BRAM MESSAGE ____________")
while True:
bram_message = input("Insert message from Bram: ")
print(f"Bram message = {bram_message}")
y_n = input("Does this look good (y/n): ").lower()
if y_n == "y":
break
genesis_challenge_preimage = f"bitcoin_hash:{bitcoin_hash},bram_message:{bram_message}"
create_alert_file(f_path, selected_key, genesis_challenge_preimage)
print(f"Alert written to file {f_path}")
pubkey = f"{bytes(selected_key.get_g1()).hex()}"
validated = validate_alert_file(f_path, pubkey)
if validated:
print(f"Signature has passed validation for pubkey: {pubkey}")
else:
print(f"Signature has failed validation for pubkey: {pubkey}")
assert False
else:
create_not_ready_alert_file(f_path, selected_key)
print(f"Alert written to file {f_path}")
|
the-stack_0_19367 | # !/usr/bin/python2
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from resource_management.core.logger import Logger
from resource_management.libraries.functions import component_version
from resource_management.libraries.script import Script
from unittest import TestCase
Logger.initialize_logger()
class TestComponentVersionMapping(TestCase):
def test_get_component_versions(self):
"""
Tests that the component version map can be parsed
:return:
"""
command_json = TestComponentVersionMapping._get_component_version_mappings()
Script.config = command_json
version = component_version.get_component_repository_version(service_name="HDFS",
component_name="DATANODE")
self.assertEqual(version, "2.5.0.0-1234")
version = component_version.get_component_repository_version(service_name = "ZOOKEEPER",
component_name = "ZOOKEEPER_SERVER")
self.assertEqual(version, "2.6.0.0-9999")
def test_get_component_version_by_service_name(self):
"""
Tests that the component version map can be parsed using only the service name
:return:
"""
command_json = TestComponentVersionMapping._get_component_version_mappings()
Script.config = command_json
version = component_version.get_component_repository_version(service_name="HDFS")
self.assertEqual(version, "2.5.0.0-1234")
version = component_version.get_component_repository_version(service_name = "ZOOKEEPER")
self.assertEqual(version, "2.6.0.0-9999")
@staticmethod
def _get_component_version_mappings():
"""
A typical component version mapping structure
:return:
"""
return {
"componentVersionMap": {
"HDFS": {
"NAMENODE": "2.5.0.0-1234",
"SECONDARY_NAMENODE": "2.5.0.0-1234",
"DATANODE": "2.5.0.0-1234",
"HDFS_CLIENT": "2.5.0.0-1234"
},
"ZOOKEEPER": {
"ZOOKEEPER_SERVER": "2.6.0.0-9999",
"ZOOKEEPER_CLIENT": "2.6.0.0-9999"
}
},
}
|
the-stack_0_19368 | # -*- coding: utf-8 -*-
"""Monitoring library for Puppet Agent and Puppet Server components.
Implements a number of interfaces for interacting with Puppet Agent state and
provides the ability to reliably determine the status of a Puppet Agent,
profile catalog run performance and detect failed resources and events.
Also provides interfaces for interacting with Puppet Server service APIs to
reliably detect the status of critical services on Puppet.
Attributes:
LOG (:class:`logging.Logger`): A module level logger instance. The
configuration of handlers is the prerogative of developers consuming
the library.
"""
import logging
import os
import yaml
from .utils import get_timedelta, safe_get
LOG = logging.getLogger('puppetbeacon.monitor')
class AgentState(object):
"""Base class that provides an interface to the Puppet agent state.
Provides an interface that can be used to interact with the various Puppet
agent state objects to determine if an agent is enabled or actively
executing a catalog run. Also provides a method for retrieving
detailed statistics from the last catalog run.
Args:
summary_file (:obj:`str`, optional): Fully-qualified path to the Puppet
Agent last run summary file. This file contains YAML structured
data and statistics about the agents last catalog run.
Default: /opt/puppetlabs/puppet/cache/state/last_run_summary.yaml
run_lock (:obj:`str`, optional): Fully-qualified path to the Puppet
Agent run lock. This lock is present if the agent is actively
executing a catalog run.
Default: /opt/puppetlabs/puppet/cache/state/agent_catalog_run.lock
disabled_lock (:obj:`str`, optional): Fully-qualified path to the
Puppet Agent disabled lock. This lock is present if the agent has
been administratively disabled and contains an optional message.
Default: /opt/puppetlabs/puppet/cache/state/agent_disabled.lock
Attributes:
disabled_message (:obj:`str`): If the agent has been administratively
disabled, the disabled_message will be provided here if available.
Defaults to None.
"""
def __init__(self, summary_file=None, run_lock=None, disabled_lock=None):
self.summary_file = summary_file if summary_file is not None else \
'/opt/puppetlabs/puppet/cache/state/last_run_summary.yaml'
self.run_lock = run_lock if run_lock is not None else \
'/opt/puppetlabs/puppet/cache/state/agent_catalog_run.lock'
self.disabled_lock = disabled_lock if disabled_lock is not None else \
'/opt/puppetlabs/puppet/cache/state/agent_disabled.lock'
self._disabled = None
self.disabled_message = None
@property
def disabled(self):
""":obj:`bool`: Puppet Agent administrative status. True if the agent
is disabled, False otherwise.
"""
try:
with open(self.disabled_lock, 'r') as disabled_lock:
LOG.debug('Located agent disabled lock at %s, looking ' +
'for an administrative message.', self.disabled_lock)
self.disabled_message = \
safe_get(yaml.safe_load(disabled_lock), 'disabled_message')
LOG.warning('Puppet agent has been administratively ' +
'disabled. Message: %s', self.disabled_message)
return True
except EnvironmentError:
LOG.debug('No agent disabled lock present at %s, agent is ' +
'enabled.', self.disabled_lock)
return False
def get_run_summary(self):
"""Retrieves and deserializes agent run summary data and statistics.
Provides an interface to interact with the last run summary data and
statistics of a Puppet agent. Method performs a safe_load when
deserializing to provide protection against execution of arbitrary
code.
Returns:
dict: Returns a nested dictionary object with the deserialized data
and statistics from the agent's last run. Returns an empty
dictionary if the method is unable to retrieve or deserialize the
summary data.
"""
run_summary = None
try:
with open(self.summary_file, 'r') as summary_file:
run_summary = yaml.safe_load(summary_file)
LOG.debug('Successfully parsed Puppet agent summary data in ' +
'file %s', self.summary_file)
except IOError as error:
# TODO(e.westfall): Raise exception rather than return empty dict?
LOG.error('Unable to locate or open summary file %s. Error: %s',
self.summary_file, error)
except yaml.YAMLError as error:
LOG.error('Unable to parse summary file %s. Error: %s',
self.summary_file, error)
return run_summary if run_summary else {}
class PuppetAgent(AgentState):
"""Provides an interface to detailed Puppet agent data and statistics.
Implements an interface that exposes data and statistics providing detailed
information about resource and event failures, the last time the agent
completed a catalog run and its duration, and the agent version.
Also provides a method to determine if the agent is currently executing a
catalog run and if so, its duration.
Args:
*args: Variable length argument list that is passed through to the
:class:`AgentState` base class initializer.
Can be used to override the default values for `summary_file`,
`run_lock` and `disabled_lock` when directly instantiating a
:class:`PuppetAgent` instance.
**kwargs: Arbitrary keyword arguments that are passed through to the
:class:`AgentState` base class.
Can be used to override the default values for `summary_file`,
`run_lock` and `disabled_lock` when directly instantiating a
:class:`PuppetAgent` instance.
Attributes:
last_run (:obj:`int`): The number of seconds since the last catalog
run.
last_run_duration (:obj:`int`): The duration of the last catalog run
in seconds.
events_failed (:obj:`int`): The number of failed events during the last
catalog run.
resources_failed (:obj:`int`): The number of resources that failed
during the last catalog run.
resources_failed_restart (:obj:`int`): The number of resources that
failed to restart during the last catalog run.
puppet_version (:obj:`str`): The Puppet agent version.
"""
def __init__(self, *args, **kwargs):
AgentState.__init__(self, *args, **kwargs)
self._run_duration = None
self.last_run = None
self.last_run_duration = None
self.events_failed = None
self.resources_failed = None
self.resources_failed_restart = None
self.puppet_version = None
self.get_last_run()
@property
def run_duration(self):
"""Determine if a catalog run is in progress and return duration.
Checks for the presence of the agent run lock and if present,
determines the duration of the catalog run by calculating the age of
the lock in seconds.
Returns:
int: An integer representing the number of seconds the run lock has
been held by the Puppet agent.
"""
run_duration = None
try:
run_duration = get_timedelta(os.path.getmtime(self.run_lock))
LOG.debug('Located agent run lock at %s, Puppet agent has been ' +
'executing a catalog run for %s seconds.',
self.run_lock, run_duration)
except OSError:
LOG.debug('Puppet agent is not executing a catalog run.')
return run_duration
def get_last_run(self):
"""Obtains and processes summary data for last catalog run.
Calls the :method:`get_run_summary` method from the :class:`AgentState`
class to obtain last run summary data and statistics.
Processes key data using the :func:`safe_get` helper function to safely
evaluate nested values from the deserialized object. If values can be
found, instance attributes are set.
Returns:
bool: Returns True if last run summary data was successfully
retrieved and processed, returns False otherwise.
"""
run_summary = self.get_run_summary()
if not run_summary:
return False
self.last_run = \
get_timedelta(safe_get(run_summary, 'time', 'last_run'))
self.last_run_duration = \
int(safe_get(run_summary, 'time', 'total'))
self.events_failed = safe_get(run_summary, 'events', 'failure')
self.resources_failed = \
safe_get(run_summary, 'resources', 'failed')
self.resources_failed_restart = \
safe_get(run_summary, 'resources', 'failed_to_restart')
self.puppet_version = safe_get(run_summary, 'version', 'puppet')
return True
|
the-stack_0_19369 | import json
from bm_instance_agent.common import utils
from bm_instance_agent import exception
class Base(object):
""" Construct obj from req body
"""
k_v_mapping = {}
allowed_keys = []
def __init__(self):
for v in self.k_v_mapping.values():
setattr(self, v, None)
@staticmethod
def body(req):
b_data = req.get('body', {})
if isinstance(b_data, str):
b_data = json.loads(b_data)
return b_data
def construct(self, data):
for k in self.allowed_keys:
setattr(self, k, data.get(k))
def to_json(self):
return {k: getattr(self, k) for k in self.allowed_keys}
class BmInstanceObj(Base):
""" Construct a bm instance obj from req body
Bm instance part of req::
{
'bmInstance': {
'uuid': 'uuid',
'provisionIpAddress': '192.168.101.10',
'provisionNicMac': 'aa:bb:cc:dd:ee:ff'
}
}
"""
allowed_keys = ['uuid', 'provision_ip', 'provision_mac']
@classmethod
def from_json(cls, bm_instance):
obj = cls()
obj.construct(bm_instance)
return obj
class VolumeObj(Base):
""" Construct a volume obj from req body
Volume part of req::
{
'volume': {
'uuid': 'uuid',
'primaryStorageType': 'NFS',
'type': 'Data/Sys',
'path': '/path/to/nfs/qcow2/volume',
'format': 'qcow2',
'deviceId': 2
}
}
"""
allowed_keys = ['uuid', 'device_id']
@classmethod
def from_json(cls, volume):
obj = cls()
obj.construct(volume)
return obj
class PortObj(Base):
""" Construct a Port obj from req body
A port example::
{
'mac': 'aa:bb:cc:dd:ee:ff',
'ipAddress': '10.0.120.10',
'netmask': '255.255.255.0',
'gateway': '10.0.120.1',
'vlanId': '1024',
'defaultRoute': True
}
`nextDefaultRoutePort` only used during port detach.
"""
allowed_keys = ['mac', 'ip_address', 'netmask', 'gateway',
'default_route', 'iface_name', 'vlan_id']
@classmethod
def from_json(cls, port):
obj = cls()
obj.construct(port)
local_ifaces = utils.get_interfaces()
if obj.mac not in local_ifaces:
raise exception.NewtorkInterfaceNotFound(mac=obj.mac,
vlan_id=obj.vlan_id)
# NOTE(ya.wang) For vlan nic, the name is 'iface.vlan_id', therefore
# try to split it.
iface_name = local_ifaces.get(obj.mac).split('.')[0]
if obj.vlan_id:
iface_name = '{iface_name}.{vlan_id}'.format(
iface_name=iface_name, vlan_id=obj.vlan_id)
setattr(obj, 'iface_name', iface_name)
return obj
class NetworkObj(Base):
""" Construct a network obj from req body
single port req example::
{
'port': {
'mac': '52:54:00:23:f1:c0',
'ipAddress': '10.0.120.10',
'netmask': '255.255.255.0',
'gateway': '10.0.120.1',
'vlanId': '1024',
'defaultRoute': True
}
}
multi ports req example::
{
'ports': [
{
'mac': '52:54:00:23:a1:c0',
'ipAddress': '10.0.120.10',
'netmask': '255.255.255.0',
'gateway': '10.0.120.1',
'vlanId': '1024',
'defaultRoute': True
},
{
'mac': '52:54:00:e5:c3:bf',
'ipAddress': '10.0.130.10',
'netmask': '255.255.255.0',
'gateway': '10.0.130.1',
'vlanId': '1024',
'defaultRoute': False
}
]
}
"""
@classmethod
def from_json(cls, req):
if not req:
return None
obj = cls()
setattr(obj, 'default_gw_addr', '')
setattr(obj, 'ports', [])
ports = req if isinstance(req, list) else [req]
for port_dict in ports:
obj.ports.append(PortObj.from_json(port_dict))
if port_dict.get('default_route'):
obj.default_gw_addr = port_dict.get('gateway')
return obj
class HeaderObj(Base):
""" Construct a request header obj from req headers
A req headers example::
{
'taskuuid': '4dca1e5c-6b25-498d-b89e-99c55b32bd81',
'callbackurl': 'mn callback url',
'Gateway-Callback-Uri': 'The callback url which proxy by gw nginx'
}
If callbackurl and Gateway-Callback-Uri both exist, use
Gateway-Callback-Uri
"""
allowed_keys = ['task_uuid', 'callback_url']
@classmethod
def from_headers(cls, headers):
obj = cls()
setattr(obj, 'task_uuid', headers.get('taskuuid'))
if 'Gateway-Callback-Uri' in headers:
setattr(obj, 'callback_url', headers.get('Gateway-Callback-Uri'))
else:
setattr(obj, 'callback_url', headers.get('callbackurl'))
return obj
|
the-stack_0_19370 |
# -------------------------------------------------------------------------------------------------
# Imports
# -------------------------------------------------------------------------------------------------
import logging
import os
from pathlib import Path
import shutil
import numpy as np
import imageio
from datoviz import app, canvas
ROOT_PATH = Path(__file__).resolve().parent.parent.parent.parent
CYTHON_PATH = Path(__file__).resolve().parent.parent
IMAGES_PATH = CYTHON_PATH / 'images'
SCREENSHOTS_PATH = ROOT_PATH / 'data/screenshots'
logger = logging.getLogger('datoviz')
# -------------------------------------------------------------------------------------------------
# Util functions
# -------------------------------------------------------------------------------------------------
def check_screenshot(filename):
"""Compare a new screenshot with the reference image."""
assert filename.exists
filename_ref = filename.with_suffix('').with_suffix('').with_suffix('.png')
if not filename_ref.exists():
logger.debug(f"Reference image {filename_ref} didn't exist, skipping image check.")
shutil.copy(filename, filename_ref)
return True
img_new = imageio.imread(filename)
if img_new.sum() == 0:
logger.warning("Screenshot is empty")
return False
img_ref = imageio.imread(filename_ref)
if img_new.shape != img_ref.shape:
logger.debug(f"Image size is different: {img_new.shape} != {img_ref.shape}")
return False
return np.all(img_new == img_ref)
def check_canvas(ca, test_name, output_dir=None):
"""Run a canvas, make a screenshot, and check it with respect to the reference image."""
output_dir = output_dir or IMAGES_PATH
if not output_dir.exists():
output_dir.mkdir(exist_ok=True, parents=True)
screenshot = output_dir / f'{test_name}.new.png'
# Interactive mode if debug.
debug = os.environ.get('DVZ_DEBUG', None)
if debug:
app().run()
ca.close()
return
# Run and save the screenshot.
# app().run(10, screenshot=str(screenshot))
app().run(n_frames=5)
ca.screenshot(str(screenshot))
ca.close()
# Check the screenshot.
res = check_screenshot(screenshot)
assert res, f"Screenshot check failed for {test_name}"
# Delete the new screenshot if it matched the reference image.
if res:
logger.debug(f"Screenshot check succeedeed for {test_name}")
os.remove(screenshot)
|
the-stack_0_19371 |
import typing as t
from pyday_night_funkin.core.camera import Camera
from pyday_night_funkin.core.graphics import PNFBatch, get_default_batch, PNFGroup
class Context():
"""
Graphics context, which is fancy talk for a batch, a group and a
camera in a slotted container class.
"""
__slots__ = ("batch", "camera", "group")
def __init__(
self,
batch: t.Optional["PNFBatch"] = None,
group: t.Optional["PNFGroup"] = None,
camera: t.Optional["Camera"] = None,
) -> None:
"""
Creates a new context.
If no `batch` is given, it will be set to be the default batch.
If no `group` is given, it will be set to an empty group
without state mutators and no parent.
If no `camera` is given, it will be set to the global dummy
camera.
"""
self.batch = batch or get_default_batch()
self.group = group or PNFGroup()
self.camera = camera or Camera.get_dummy()
|
the-stack_0_19372 | # Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace
import numpy as np
from dace.transformation.dataflow import StripMining
N = dace.symbol('N')
@dace.program(dace.float64, dace.float64[N], dace.float64[N])
def axpy(A, X, Y):
@dace.map(_[0:N])
def multiplication(i):
in_A << A
in_X << X[i]
in_Y << Y[i]
out >> Y[i]
out = in_A * in_X + in_Y
def test_tiling_number_of_tiles():
size = 250
np.random.seed(0)
A = np.random.rand()
X = np.random.rand(size)
Y = np.random.rand(size)
Z = np.copy(Y)
sdfg = axpy.to_sdfg()
sdfg.name = 'tiling_number_of_tiles'
sdfg.apply_strict_transformations()
sdfg.apply_transformations(StripMining,
options=[{
'tile_size': '16',
'tiling_type': dace.TilingType.NumberOfTiles
}])
sdfg(A=A, X=X, Y=Y, N=size)
assert np.allclose(Y, A*X+Z)
print('PASS')
if __name__ == "__main__":
test_tiling_number_of_tiles()
|
the-stack_0_19373 | import sys
from setuptools import setup, find_packages
with open("README.rst") as fp:
long_description = fp.read()
install_requires = [
"requests>=2.12",
"PyYAML",
"six>=1.10.0",
"tzlocal",
]
if sys.version_info < (3,):
install_requires.extend([
"ipaddress",
])
setup(
name="pykube",
version="0.16a2",
description="Python client library for Kubernetes",
long_description=long_description,
author="Eldarion, Inc.",
author_email="[email protected]",
license="Apache",
url="https://github.com/kelproject/pykube",
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
zip_safe=False,
packages=find_packages(),
entry_points={
"httpie.plugins.transport.v1": [
"httpie_pykube = pykube.contrib.httpie_plugin:PyKubeTransportPlugin"
],
},
install_requires=install_requires,
extras_require={
"gcp": [
"google-auth",
"jsonpath-ng",
]
},
)
|
the-stack_0_19375 | # Copyright 2020 Open Reaction Database Project Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset processing script for database submissions.
This script is meant to be a one-stop shop for preparing submissions to the
Open Reaction Database.
By default, the script only validates the input Dataset messages. Validation
may introduce changes to the Reaction messages, such as the addition of SMILES
for compounds identified only by NAME. Users should frequently run these checks
as they are preparing a dataset for submission.
With the optional --update flag, the script also performs database-specific
updates (such as adding record IDs). These operations are meant to be run as
part of the submission process and not as part of the pre-submission validation
cycle.
Usage:
process_dataset.py (--input_pattern=<str> | --input_file=<str>) [options]
Options:
--input_pattern=<str> Pattern matching input Dataset protos
--input_file=<str> File containing Dataset proto filenames
--root=<str> Root of the repository [default: ]
--output_format=<str> Dataset output format [default: .pb.gz]
--write_errors If True, errors will be written to *.error
--no-validate If set, reactions will not be validated
--update If True, update Reaction protos
--cleanup If True, use git to clean up
--max_size=<float> Maximum size (in MB) for any Reaction message [default: 10.0]
--base=<str> Git branch to diff against
--issue=<str> GitHub pull request number; if provided, a comment will be added
--token=<str> GitHub authentication token
"""
import dataclasses
import glob
import gzip
import logging
import os
import subprocess
import sys
from typing import Iterable, List, Mapping, Optional, Set, Tuple
import docopt
import github
from rdkit import RDLogger
from ord_schema import message_helpers
from ord_schema import updates
from ord_schema import validations
from ord_schema.proto import dataset_pb2
logger = logging.getLogger(__name__)
# pylint: disable=too-many-branches,too-many-locals
@dataclasses.dataclass(eq=True, frozen=True, order=True)
class FileStatus:
"""A filename and its status in Git."""
filename: str
status: str
original_filename: str
def __post_init__(self):
if self.status[0] not in ["A", "D", "M", "R"]:
raise ValueError(f"unsupported file status: {self.status}")
def _get_inputs(kwargs) -> List[FileStatus]:
"""Gets a list of Dataset proto filenames to process.
Returns:
List of FileStatus objects.
Raises:
ValueError: If a git-diff status is not one of {'A', 'D', 'M', 'R'}.
"""
if kwargs["--input_pattern"]:
# Setting recursive=True allows recursive matching with '**'.
filenames = glob.glob(kwargs["--input_pattern"], recursive=True)
return [FileStatus(filename, "A", "") for filename in filenames]
if kwargs["--input_file"]:
inputs = []
with open(kwargs["--input_file"]) as f:
for line in f:
fields = line.strip().split("\t")
if len(fields) == 3:
status, original_filename, filename = fields
if not status.startswith("R"):
raise ValueError(f"malformed status line: {line.strip()}")
else:
status, filename = fields
if not status.startswith(("A", "D", "M")):
raise ValueError(f"unsupported git-diff statue: {status}")
original_filename = ""
inputs.append(FileStatus(filename, status, original_filename))
return inputs
raise ValueError("one of --input_pattern or --input_file is required")
def cleanup(filename: str, output_filename: str):
"""Removes and/or renames the input Dataset files.
Args:
filename: Original dataset filename.
output_filename: Updated dataset filename.
"""
if filename == output_filename:
logger.info("editing an existing dataset; no cleanup needed")
return # Reuse the existing dataset ID.
args = ["git", "mv", filename, output_filename]
logger.info("Running command: %s", " ".join(args))
subprocess.run(args, check=True)
def _get_reaction_ids(dataset: dataset_pb2.Dataset) -> Set[str]:
"""Returns a set containing the reaction IDs in a Dataset."""
reaction_ids = set()
for reaction in dataset.reactions:
if reaction.reaction_id:
reaction_ids.add(reaction.reaction_id)
return reaction_ids
def _load_base_dataset(file_status: FileStatus, base: str) -> dataset_pb2.Dataset:
"""Loads a Dataset message from another branch."""
if file_status.status.startswith("A"):
return None # Dataset only exists in the submission.
# NOTE(kearnes): Use --no-pager to avoid a non-zero exit code.
args = ["git", "--no-pager", "show"]
if file_status.status.startswith("R"):
args.append(f"{base}:{file_status.original_filename}")
else:
args.append(f"{base}:{file_status.filename}")
logger.info("Running command: %s", " ".join(args))
serialized = subprocess.run(args, capture_output=True, check=True, text=False)
if serialized.stdout.startswith(b"version"):
# Convert Git LFS pointers to real data.
serialized = subprocess.run(
["git", "lfs", "smudge"],
input=serialized.stdout,
capture_output=True,
check=True,
text=False,
)
if args[-1].endswith(".gz"):
value = gzip.decompress(serialized.stdout)
else:
value = serialized.stdout
return dataset_pb2.Dataset.FromString(value)
def get_change_stats(
datasets: Mapping[str, dataset_pb2.Dataset], inputs: Iterable[FileStatus], base: str
) -> Tuple[Set[str], Set[str], Set[str]]:
"""Computes diff statistics for the submission.
Args:
datasets: Dict mapping filenames to Dataset messages.
inputs: List of FileStatus objects.
base: Git branch to diff against.
Returns:
added: Set of added reaction IDs.
removed: Set of deleted reaction IDs.
changed: Set of changed reaction IDs.
"""
old, new = set(), set()
for file_status in inputs:
if not file_status.status.startswith("D"):
new.update(_get_reaction_ids(datasets[file_status.filename]))
dataset = _load_base_dataset(file_status, base)
if dataset is not None:
old.update(_get_reaction_ids(dataset))
return new - old, old - new, new & old
def _run_updates(datasets: Mapping[str, dataset_pb2.Dataset], kwargs):
"""Updates the submission files.
Args:
datasets: Dict mapping filenames to Dataset messages.
Raises:
ValueError: if any Reaction is larger than FLAGS.max_size.
"""
for dataset in datasets.values():
# Set reaction_ids, resolve names, fix cross-references, etc.
updates.update_dataset(dataset)
# Final validation to make sure we didn't break anything.
options = validations.ValidationOptions(validate_ids=True, require_provenance=True)
validations.validate_datasets(datasets, kwargs["--write_errors"], options=options)
for filename, dataset in datasets.items():
output_filename = os.path.join(
kwargs["--root"],
message_helpers.id_filename(f'{dataset.dataset_id}{kwargs["--output_format"]}'),
)
os.makedirs(os.path.dirname(output_filename), exist_ok=True)
if kwargs["--cleanup"]:
cleanup(filename, output_filename)
logger.info("writing Dataset to %s", output_filename)
message_helpers.write_message(dataset, output_filename)
def run(kwargs) -> Tuple[Optional[Set[str]], Optional[Set[str]], Optional[Set[str]]]:
"""Main function that returns added/removed reaction ID sets.
This function should be called directly by tests to get access to the
return values. If main() returns something other than None it will break
shell error code logic downstream.
Returns:
added: Set of added reaction IDs.
removed: Set of deleted reaction IDs.
changed: Set of changed reaction IDs.
"""
inputs = sorted(_get_inputs(kwargs))
if not inputs:
logger.info("nothing to do")
return set(), set(), set() # Nothing to do.
# NOTE(kearnes): Process one dataset at a time to avoid OOM errors.
change_stats = {}
for file_status in inputs:
if file_status.status == "D":
dataset = None
else:
dataset = message_helpers.load_message(file_status.filename, dataset_pb2.Dataset)
logger.info("%s: %d reactions", file_status.filename, len(dataset.reactions))
datasets = {file_status.filename: dataset}
if not kwargs["--no-validate"] and dataset is not None:
# Note: this does not check if IDs are malformed.
validations.validate_datasets(datasets, kwargs["--write_errors"])
# Check reaction sizes.
for reaction in dataset.reactions:
reaction_size = sys.getsizeof(reaction.SerializeToString()) / 1e6
if reaction_size > float(kwargs["--max_size"]):
raise ValueError(
"Reaction is larger than --max_size " f'({reaction_size} vs {kwargs["--max_size"]}'
)
if kwargs["--base"]:
added, removed, changed = get_change_stats(datasets, [file_status], base=kwargs["--base"])
change_stats[file_status.filename] = (added, removed, changed)
logger.info(
"Summary: +%d -%d Δ%d reaction IDs",
len(added),
len(removed),
len(changed),
)
if kwargs["--update"] and dataset is not None:
_run_updates(datasets, kwargs)
if change_stats:
total_added, total_removed, total_changed = set(), set(), set()
comment = [
"Change summary:",
"| Filename | Added | Removed | Changed |",
"| -------- | ----- | ------- | ------- |",
]
for filename, (added, removed, changed) in change_stats.items():
comment.append(f"| {filename} | " f"{len(added)} | {len(removed)} | {len(changed)} |")
total_added |= added
total_removed |= removed
total_changed |= changed
comment.append(f"| | **{len(total_added)}** | " f"**{len(total_removed)}** | " f"**{len(total_changed)}** |")
if kwargs["--issue"] and kwargs["--token"]:
client = github.Github(kwargs["--token"])
repo = client.get_repo(os.environ["GITHUB_REPOSITORY"])
issue = repo.get_issue(kwargs["--issue"])
issue.create_comment("\n".join(comment))
else:
total_added, total_removed, total_changed = None, None, None
return total_added, total_removed, total_changed
def main(kwargs):
RDLogger.DisableLog("rdApp.*") # Disable RDKit logging.
run(kwargs)
if __name__ == "__main__":
main(docopt.docopt(__doc__))
|
the-stack_0_19377 | # Copyright 2016 AT&T Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tempest test-case to test namespace objects using RBAC roles
"""
from oslo_log import log as logging
from patrole_tempest_plugin import rbac_rule_validation
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tungsten_tempest_plugin.tests.api.contrail import rbac_base
CONF = config.CONF
LOG = logging.getLogger(__name__)
class NamespaceContrailTest(rbac_base.BaseContrailTest):
"""Test class to test namespace objects using RBAC roles"""
def _create_namespace(self):
fq_name = data_utils.rand_name('namespace')
post_body = {
'parent_type': 'domain',
'fq_name': ['default-domain', fq_name]
}
resp_body = self.namespace_client.create_namespaces(**post_body)
namespace_uuid = resp_body['namespace']['uuid']
self.addCleanup(self._try_delete_resource,
self.namespace_client.delete_namespace,
namespace_uuid)
return namespace_uuid
def _update_namespace(self, namespace_uuid):
put_body = {
'display_name': data_utils.rand_name('namespace')
}
self.namespace_client.update_namespace(namespace_uuid, **put_body)
@rbac_rule_validation.action(service="Contrail",
rules=["list_namespaces"])
@decorators.idempotent_id('e436390d-d669-4047-9838-421ea93e94be')
def test_list_namespaces(self):
"""test method for list namespace objects"""
with self.rbac_utils.override_role(self):
self.namespace_client.list_namespaces()
@rbac_rule_validation.action(service="Contrail",
rules=["create_namespaces"])
@decorators.idempotent_id('503ae445-7e67-4db6-989a-af0b7f9a7e95')
def test_create_namespaces(self):
"""test method for create namespace objects"""
with self.rbac_utils.override_role(self):
self._create_namespace()
@rbac_rule_validation.action(service="Contrail",
rules=["show_namespace"])
@decorators.idempotent_id('f916971a-7c07-4386-b887-8b78d8a1e528')
def test_show_namespace(self):
"""test method for show namespace objects"""
namespace_uuid = self._create_namespace()
with self.rbac_utils.override_role(self):
self.namespace_client.show_namespace(namespace_uuid)
@rbac_rule_validation.action(service="Contrail",
rules=["update_namespace"])
@decorators.idempotent_id('3649f65a-922a-4b8a-9b8b-520c333e192e')
def test_update_namespace(self):
"""test method for update namespace objects"""
namespace_uuid = self._create_namespace()
with self.rbac_utils.override_role(self):
self._update_namespace(namespace_uuid)
@rbac_rule_validation.action(service="Contrail",
rules=["delete_namespace"])
@decorators.idempotent_id('80e736bf-fc7d-4274-8173-a50c883776a9')
def test_delete_namespace(self):
"""test method for delete namespace objects"""
namespace_uuid = self._create_namespace()
with self.rbac_utils.override_role(self):
self.namespace_client.delete_namespace(namespace_uuid)
|
the-stack_0_19378 | """Test check utilities."""
# Authors: MNE Developers
# Stefan Appelhoff <[email protected]>
#
# License: BSD (3-clause)
import os
import os.path as op
import sys
import numpy as np
import pytest
from pathlib import Path
import mne
from mne import read_vectorview_selection
from mne.datasets import testing
from mne.io.pick import pick_channels_cov
from mne.utils import (check_random_state, _check_fname, check_fname,
_check_subject, requires_mayavi, traits_test,
_check_mayavi_version, _check_info_inv, _check_option,
check_version, _check_path_like, _validate_type,
_suggest, _on_missing, requires_nibabel, _safe_input)
data_path = testing.data_path(download=False)
base_dir = op.join(data_path, 'MEG', 'sample')
fname_raw = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_event = op.join(base_dir, 'sample_audvis_trunc_raw-eve.fif')
fname_fwd = op.join(base_dir, 'sample_audvis_trunc-meg-vol-7-fwd.fif')
fname_mgz = op.join(data_path, 'subjects', 'sample', 'mri', 'aseg.mgz')
reject = dict(grad=4000e-13, mag=4e-12)
@testing.requires_testing_data
def test_check(tmpdir):
"""Test checking functions."""
pytest.raises(ValueError, check_random_state, 'foo')
pytest.raises(TypeError, _check_fname, 1)
_check_fname(Path('./'))
fname = str(tmpdir.join('foo'))
with open(fname, 'wb'):
pass
assert op.isfile(fname)
_check_fname(fname, overwrite='read', must_exist=True)
orig_perms = os.stat(fname).st_mode
os.chmod(fname, 0)
if not sys.platform.startswith('win'):
with pytest.raises(PermissionError, match='read permissions'):
_check_fname(fname, overwrite='read', must_exist=True)
os.chmod(fname, orig_perms)
os.remove(fname)
assert not op.isfile(fname)
pytest.raises(IOError, check_fname, 'foo', 'tets-dip.x', (), ('.fif',))
pytest.raises(ValueError, _check_subject, None, None)
pytest.raises(TypeError, _check_subject, None, 1)
pytest.raises(TypeError, _check_subject, 1, None)
# smoke tests for permitted types
check_random_state(None).choice(1)
check_random_state(0).choice(1)
check_random_state(np.random.RandomState(0)).choice(1)
if check_version('numpy', '1.17'):
check_random_state(np.random.default_rng(0)).choice(1)
@testing.requires_testing_data
@pytest.mark.parametrize('suffix',
('_meg.fif', '_eeg.fif', '_ieeg.fif',
'_meg.fif.gz', '_eeg.fif.gz', '_ieeg.fif.gz'))
def test_check_fname_suffixes(suffix, tmpdir):
"""Test checking for valid filename suffixes."""
new_fname = str(tmpdir.join(op.basename(fname_raw)
.replace('_raw.fif', suffix)))
raw = mne.io.read_raw_fif(fname_raw).crop(0, 0.1)
raw.save(new_fname)
mne.io.read_raw_fif(new_fname)
@requires_mayavi
@traits_test
def test_check_mayavi():
"""Test mayavi version check."""
pytest.raises(RuntimeError, _check_mayavi_version, '100.0.0')
def _get_data():
"""Read in data used in tests."""
# read forward model
forward = mne.read_forward_solution(fname_fwd)
# read data
raw = mne.io.read_raw_fif(fname_raw, preload=True)
events = mne.read_events(fname_event)
event_id, tmin, tmax = 1, -0.1, 0.15
# decimate for speed
left_temporal_channels = read_vectorview_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True,
selection=left_temporal_channels)
picks = picks[::2]
raw.pick_channels([raw.ch_names[ii] for ii in picks])
del picks
raw.info.normalize_proj() # avoid projection warnings
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
baseline=(None, 0.), preload=True, reject=reject)
noise_cov = mne.compute_covariance(epochs, tmin=None, tmax=0.)
data_cov = mne.compute_covariance(epochs, tmin=0.01, tmax=0.15)
return epochs, data_cov, noise_cov, forward
@testing.requires_testing_data
def test_check_info_inv():
"""Test checks for common channels across fwd model and cov matrices."""
epochs, data_cov, noise_cov, forward = _get_data()
# make sure same channel lists exist in data to make testing life easier
assert epochs.info['ch_names'] == data_cov.ch_names
assert epochs.info['ch_names'] == noise_cov.ch_names
# check whether bad channels get excluded from the channel selection
# info
info_bads = epochs.info.copy()
info_bads['bads'] = info_bads['ch_names'][1:3] # include two bad channels
picks = _check_info_inv(info_bads, forward, noise_cov=noise_cov)
assert [1, 2] not in picks
# covariance matrix
data_cov_bads = data_cov.copy()
data_cov_bads['bads'] = data_cov_bads.ch_names[0]
picks = _check_info_inv(epochs.info, forward, data_cov=data_cov_bads)
assert 0 not in picks
# noise covariance matrix
noise_cov_bads = noise_cov.copy()
noise_cov_bads['bads'] = noise_cov_bads.ch_names[1]
picks = _check_info_inv(epochs.info, forward, noise_cov=noise_cov_bads)
assert 1 not in picks
# test whether reference channels get deleted
info_ref = epochs.info.copy()
info_ref['chs'][0]['kind'] = 301 # pretend to have a ref channel
picks = _check_info_inv(info_ref, forward, noise_cov=noise_cov)
assert 0 not in picks
# pick channels in all inputs and make sure common set is returned
epochs.pick_channels([epochs.ch_names[ii] for ii in range(10)])
data_cov = pick_channels_cov(data_cov, include=[data_cov.ch_names[ii]
for ii in range(5, 20)])
noise_cov = pick_channels_cov(noise_cov, include=[noise_cov.ch_names[ii]
for ii in range(7, 12)])
picks = _check_info_inv(epochs.info, forward, noise_cov=noise_cov,
data_cov=data_cov)
assert list(range(7, 10)) == picks
def test_check_option():
"""Test checking the value of a parameter against a list of options."""
allowed_values = ['valid', 'good', 'ok']
# Value is allowed
assert _check_option('option', 'valid', allowed_values)
assert _check_option('option', 'good', allowed_values)
assert _check_option('option', 'ok', allowed_values)
assert _check_option('option', 'valid', ['valid'])
# Check error message for invalid value
msg = ("Invalid value for the 'option' parameter. Allowed values are "
"'valid', 'good', and 'ok', but got 'bad' instead.")
with pytest.raises(ValueError, match=msg):
assert _check_option('option', 'bad', allowed_values)
# Special error message if only one value is allowed
msg = ("Invalid value for the 'option' parameter. The only allowed value "
"is 'valid', but got 'bad' instead.")
with pytest.raises(ValueError, match=msg):
assert _check_option('option', 'bad', ['valid'])
def test_check_path_like():
"""Test _check_path_like()."""
str_path = str(base_dir)
pathlib_path = Path(base_dir)
no_path = dict(foo='bar')
assert _check_path_like(str_path) is True
assert _check_path_like(pathlib_path) is True
assert _check_path_like(no_path) is False
def test_validate_type():
"""Test _validate_type."""
_validate_type(1, 'int-like')
with pytest.raises(TypeError, match='int-like'):
_validate_type(False, 'int-like')
@requires_nibabel()
@testing.requires_testing_data
def test_suggest():
"""Test suggestions."""
names = mne.get_volume_labels_from_aseg(fname_mgz)
sug = _suggest('', names)
assert sug == '' # nothing
sug = _suggest('Left-cerebellum', names)
assert sug == " Did you mean 'Left-Cerebellum-Cortex'?"
sug = _suggest('Cerebellum-Cortex', names)
assert sug == " Did you mean one of ['Left-Cerebellum-Cortex', 'Right-Cerebellum-Cortex', 'Left-Cerebral-Cortex']?" # noqa: E501
def test_on_missing():
"""Test _on_missing."""
msg = 'test'
with pytest.raises(ValueError, match=msg):
_on_missing('raise', msg)
with pytest.warns(RuntimeWarning, match=msg):
_on_missing('warn', msg)
_on_missing('ignore', msg)
with pytest.raises(ValueError,
match='Invalid value for the \'on_missing\' parameter'):
_on_missing('foo', msg)
def _matlab_input(msg):
raise EOFError()
def test_safe_input(monkeypatch):
"""Test _safe_input."""
monkeypatch.setattr(mne.utils.check, 'input', _matlab_input)
with pytest.raises(RuntimeError, match='Could not use input'):
_safe_input('whatever', alt='nothing')
assert _safe_input('whatever', use='nothing') == 'nothing'
|
the-stack_0_19379 | import logging
import aug
import numpy as np
from functools import partial
import cv2
import torch
import torch.optim as optim
import tqdm
import yaml
from joblib import cpu_count
from torch.utils.data import DataLoader
from adversarial_trainer import GANFactory
from dataset import PairedDataset, _read_img
from metric_counter import MetricCounter
from models.losses import get_loss
from models.models import get_model
from models.networks import get_nets
from schedulers import LinearDecay, WarmRestart
cv2.setNumThreads(0)
class Trainer:
def __init__(self, config, train: DataLoader, val: DataLoader):
self.config = config
self.train_dataset = train
self.val_dataset = val
self.adv_lambda = config['model']['adv_lambda']
self.metric_counter = MetricCounter(config['experiment_desc'])
self.warmup_epochs = config['warmup_num']
def train(self):
self._init_params()
checkpoint = torch.load('last_{}.h5'.format(self.config['experiment_desc']))
self.netG.load_state_dict(checkpoint['model'])
last_epoch = checkpoint.get('epoch') or -1
print("Starting from epoch:", last_epoch)
logging.debug("Starting from epoch:", last_epoch)
for epoch in range(last_epoch+1, config['num_epochs']):
if (epoch == self.warmup_epochs) and not (self.warmup_epochs == 0):
self.netG.module.unfreeze()
self.optimizer_G = self._get_optim(self.netG.parameters())
self.scheduler_G = self._get_scheduler(self.optimizer_G)
self._run_epoch(epoch)
# self._validate(epoch)
self.scheduler_G.step()
self.scheduler_D.step()
if self.metric_counter.update_best_model():
torch.save({
'model': self.netG.state_dict(),
}, 'best_{}.h5'.format(self.config['experiment_desc']))
torch.save({
'model': self.netG.state_dict(),
'epoch': epoch
}, 'last_{}.h5'.format(self.config['experiment_desc']))
print(self.metric_counter.loss_message())
logging.debug("Experiment Name: %s, Epoch: %d, Loss: %s" % (
self.config['experiment_desc'], epoch, self.metric_counter.loss_message()))
def _run_epoch(self, epoch):
self.metric_counter.clear()
for param_group in self.optimizer_G.param_groups:
lr = param_group['lr']
epoch_size = config.get('train_batches_per_epoch') or len(self.train_dataset)
tq = tqdm.tqdm(self.train_dataset, total=epoch_size)
tq.set_description('Epoch {}, lr {}'.format(epoch, lr))
i = 0
# transform_fn = aug.get_transforms(size=config['size'], scope=config['scope'], crop=config['crop'])
normalize_fn = aug.get_normalize()
# corrupt_fn = aug.get_corrupt_function(config['corrupt'])
def _preprocess(img, res):
def transpose(x):
return np.transpose(x, (2, 0, 1))
return map(transpose, normalize_fn(img, res))
for data in tq:
a, b = data['a'][0], data['b'][0]
a, b = map(_read_img, (a, b))
# a, b = transform_fn(a, b)
# a = corrupt_fn(a)
a, b = _preprocess(a, b)
a, b = np.expand_dims(a, axis=0), np.expand_dims(b, axis=0)
a, b = torch.from_numpy(a), torch.from_numpy(b)
data = {'a': a, 'b': b}
inputs, targets = self.model.get_input(data)
outputs = self.netG(inputs)
loss_D = self._update_d(outputs, targets)
self.optimizer_G.zero_grad()
loss_content = self.criterionG(outputs, targets)
loss_adv = self.adv_trainer.loss_g(outputs, targets)
loss_G = loss_content + self.adv_lambda * loss_adv
loss_G.backward()
self.optimizer_G.step()
self.metric_counter.add_losses(loss_G.item(), loss_content.item(), loss_D)
curr_psnr, curr_ssim, img_for_vis = self.model.get_images_and_metrics(inputs, outputs, targets)
self.metric_counter.add_metrics(curr_psnr, curr_ssim)
tq.set_postfix(loss=self.metric_counter.loss_message())
if not i:
self.metric_counter.add_image(img_for_vis, tag='train')
i += 1
if i > epoch_size:
break
tq.close()
self.metric_counter.write_to_tensorboard(epoch)
def _validate(self, epoch):
self.metric_counter.clear()
epoch_size = config.get('val_batches_per_epoch') or len(self.val_dataset)
tq = tqdm.tqdm(self.val_dataset, total=epoch_size)
tq.set_description('Validation')
i = 0
for data in tq:
inputs, targets = self.model.get_input(data)
outputs = self.netG(inputs)
loss_content = self.criterionG(outputs, targets)
loss_adv = self.adv_trainer.loss_g(outputs, targets)
loss_G = loss_content + self.adv_lambda * loss_adv
self.metric_counter.add_losses(loss_G.item(), loss_content.item())
curr_psnr, curr_ssim, img_for_vis = self.model.get_images_and_metrics(inputs, outputs, targets)
self.metric_counter.add_metrics(curr_psnr, curr_ssim)
if not i:
self.metric_counter.add_image(img_for_vis, tag='val')
i += 1
if i > epoch_size:
break
tq.close()
self.metric_counter.write_to_tensorboard(epoch, validation=True)
def _update_d(self, outputs, targets):
if self.config['model']['d_name'] == 'no_gan':
return 0
self.optimizer_D.zero_grad()
loss_D = self.adv_lambda * self.adv_trainer.loss_d(outputs, targets)
loss_D.backward(retain_graph=True)
self.optimizer_D.step()
return loss_D.item()
def _get_optim(self, params):
if self.config['optimizer']['name'] == 'adam':
optimizer = optim.Adam(params, lr=self.config['optimizer']['lr'])
elif self.config['optimizer']['name'] == 'sgd':
optimizer = optim.SGD(params, lr=self.config['optimizer']['lr'])
elif self.config['optimizer']['name'] == 'adadelta':
optimizer = optim.Adadelta(params, lr=self.config['optimizer']['lr'])
else:
raise ValueError("Optimizer [%s] not recognized." % self.config['optimizer']['name'])
return optimizer
def _get_scheduler(self, optimizer):
if self.config['scheduler']['name'] == 'plateau':
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
mode='min',
patience=self.config['scheduler']['patience'],
factor=self.config['scheduler']['factor'],
min_lr=self.config['scheduler']['min_lr'])
elif self.config['optimizer']['name'] == 'sgdr':
scheduler = WarmRestart(optimizer)
elif self.config['scheduler']['name'] == 'linear':
scheduler = LinearDecay(optimizer,
min_lr=self.config['scheduler']['min_lr'],
num_epochs=self.config['num_epochs'],
start_epoch=self.config['scheduler']['start_epoch'])
else:
raise ValueError("Scheduler [%s] not recognized." % self.config['scheduler']['name'])
return scheduler
@staticmethod
def _get_adversarial_trainer(d_name, net_d, criterion_d):
if d_name == 'no_gan':
return GANFactory.create_model('NoGAN')
elif d_name == 'patch_gan' or d_name == 'multi_scale':
return GANFactory.create_model('SingleGAN', net_d, criterion_d)
elif d_name == 'double_gan':
return GANFactory.create_model('DoubleGAN', net_d, criterion_d)
else:
raise ValueError("Discriminator Network [%s] not recognized." % d_name)
def _init_params(self):
self.criterionG, criterionD = get_loss(self.config['model'])
self.netG, netD = get_nets(self.config['model'])
self.netG.cuda()
self.adv_trainer = self._get_adversarial_trainer(self.config['model']['d_name'], netD, criterionD)
self.model = get_model(self.config['model'])
self.optimizer_G = self._get_optim(filter(lambda p: p.requires_grad, self.netG.parameters()))
self.optimizer_D = self._get_optim(self.adv_trainer.get_params())
self.scheduler_G = self._get_scheduler(self.optimizer_G)
self.scheduler_D = self._get_scheduler(self.optimizer_D)
if __name__ == '__main__':
with open('config/config.yaml', 'r') as f:
config = yaml.load(f)
batch_size = config.pop('batch_size')
get_dataloader = partial(DataLoader, batch_size=batch_size, num_workers=cpu_count(), shuffle=True, drop_last=True)
datasets = map(config.pop, ('train', 'val'))
datasets = map(PairedDataset.from_config, datasets)
train, val = map(get_dataloader, datasets)
trainer = Trainer(config, train=train, val=val)
trainer.train()
|
the-stack_0_19380 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
from collections import abc
import copy
import inspect
import torch
import warnings
import gc
from ._fallback import _FallbackManager, ORTModuleIOError, ORTModuleONNXModelException, wrap_exception
from ._utils import warn_of_constant_inputs
class _OutputIdentityOp(torch.autograd.Function):
'''Internal class used to prepend Identity ops in model's outputs
This class is required to support ONNX models which passthrough [some of] the models's inputs
directly to the graph output. This is an issue because ONNX Runtime cannot build proper
gradient graph based on this pattern.
Adding a direct Identity Op to the user model doesn't work as the ONNX exporter would optimize it away,
resulting in the same issue.
Therefore a custom Autograd function was introduced to add an Identity right before the output
in a way the ONNX exporter will not optimize it away.
Given the model below
.. code-block:: python
class PassthroughNet(torch.nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(PassthroughNet, self).__init__()
self.fc1_1 = torch.nn.Linear(input_size, hidden_size)
self.relu1 = torch.nn.ReLU()
self.fc1_2 = torch.nn.Linear(hidden_size, num_classes)
def forward(self, input1, passthrough_input):
out1 = self.fc1_2(self.relu1(self.fc1_1(input1)))
# use shape from passthrough_input
out1 = out1.view(passthrough_input.size()[0], -1)
return out1, passthrough_input
We can see `passthrough_input` is part of both model input and output and the resulting
ONNX subgraph would contain something like `output2 -> output2`.
By prepending each model output to an :class:`_OutputIdentityOp` op, the resulting
onnx subgraph for this example would be `passthrough_input -> Identity -> output2`.
TODO: Remove once PyTorch 1.8.2 or newer is released
'''
@staticmethod
def forward(ctx, input):
return torch.nn.Identity()(input)
@staticmethod
def backward(ctx, grad_output):
return grad_output
@staticmethod
def symbolic(g, self):
return g.op("Identity", self)
class _PrimitiveType(object):
_primitive_types = {int, bool, float}
@staticmethod
def is_primitive_type(value):
return type(value) in _PrimitiveType._primitive_types
@staticmethod
def get_tensor(value, device):
return torch.tensor(value, device=device)
@staticmethod
def get_primitive_dtype(value):
# If `value` is a boolean, save the value of the boolean in dtype.
# This way, if the value changes from one forward call to the next, the schema will mismatch,
# and the model will be re-exported.
return f"{str(type(value))}_{value}" if isinstance(value, bool) else str(type(value))
class _InputInfo(object):
def __init__(self,
names,
shape,
require_grad_names=None,
dynamic_axes=None,
schema=None,
num_positionals=0,
num_expanded_positionals_non_none=0,
keyword_names=None):
self.names = names
self.shape = shape
self.require_grad_names = require_grad_names if require_grad_names else []
self.dynamic_axes = dynamic_axes if dynamic_axes else {}
self.schema = schema if schema else []
self.num_positionals = num_positionals
self.num_expanded_positionals_non_none = num_expanded_positionals_non_none
self.keyword_names = keyword_names
def __repr__(self) -> str:
return f'''_InputInfo class:
\tNames: {self.names}
\tShape: {self.shape}
\tRequire gradient: {self.require_grad_names}
\tDynamic axes: {self.dynamic_axes}
\tSchema: {self.schema}
\t#Positionals (total): {self.num_positionals}
\t#Expanded Positionals (non-None): {self.num_expanded_positionals_non_none}
\tKeyword names: {self.keyword_names}'''
def flatten(self, args, kwargs, device):
'''Flatten args and kwargs in a single tuple of tensors with strict ordering'''
ret = [_PrimitiveType.get_tensor(arg, device) if _PrimitiveType.is_primitive_type(arg) else arg for arg in args]
ret += [_PrimitiveType.get_tensor(kwargs[name], device) if _PrimitiveType.is_primitive_type(kwargs[name])
else kwargs[name] for name in self.names if name in kwargs]
# if kwargs is empty, append an empty dictionary at the end of the sample inputs to make exporter
# happy. This is because the exporter is confused with kwargs and dictionary inputs otherwise.
if not kwargs:
ret.append({})
return ret
def unflatten(self, flat_args):
'''Unflatten tuple of tensors into args and kwargs'''
args = tuple(flat_args[:self.num_positionals])
kwargs = {name: arg for name, arg in zip(self.names[self.num_expanded_positionals_non_none:], flat_args[self.num_positionals:]) \
if name in self.keyword_names}
return args, kwargs
def _combine_input_buffers_initializers(params, onnx_input_names, input_info, buffer_names, inputs, kwargs, device):
'''Creates forward `*inputs` list from user input and PyTorch initializers
ONNX Runtime forward requires an ordered list of:
* User input: computed from forward InferenceSession
* Initializers: computed from original PyTorch model parameters
'''
def _expand_inputs(current_input, non_none_inputs):
# The exporter handles input lists by expanding them so that each
# element of the list is its own input.
# ORTModule must match this behavior by also expanding the inputs.
if current_input is None or isinstance(current_input, str):
# Drop all None and string inputs
return
if isinstance(current_input, abc.Sequence):
# If the input is a sequence (like a list), expand the list so that
# each element of the list is an input by itself
for inp in current_input:
_expand_inputs(inp, non_none_inputs)
elif isinstance(current_input, abc.Mapping):
# If the input is a mapping (like a dict), expand the dict so that
# each element of the dict is an input by itself
for _, val in current_input.items():
_expand_inputs(val, non_none_inputs)
else:
# else just collect all the non none inputs within non_none_inputs
non_none_inputs.append(current_input)
# User inputs
non_none_inputs = []
_expand_inputs(inputs, non_none_inputs)
buffer_names_dict = {buffer_name: inp for buffer_name, inp in buffer_names}
result = []
for input_idx, name in enumerate(onnx_input_names):
inp = None
if name in kwargs and kwargs[name] is not None:
# Only use keywords coming from user that are expected by ONNX model
inp = kwargs[name]
if inp is None:
try:
# Only use positionals coming from user that are expected by ONNX model
# if input_idx >= len(input_info.names), IndexError will be thrown
if name != input_info.names[input_idx]:
# When ONNX drops unused inputs, get correct index from user input
# if name is not in input_info.names, ValueError will be thrown
input_idx = input_info.names.index(name)
inp = non_none_inputs[input_idx]
except (IndexError, ValueError):
# ONNX input name is not present in input_info.names.
pass
if inp is None:
# Registered buffers are translated to user_input+initializer in ONNX
try:
inp = buffer_names_dict[name]
except KeyError:
# ONNX input name is not present in the registered buffer dict.
pass
if inp is not None:
if _PrimitiveType.is_primitive_type(inp):
inp = _PrimitiveType.get_tensor(inp, device)
result.append(inp)
else:
raise wrap_exception(ORTModuleONNXModelException,
RuntimeError(f'Input is present in ONNX graph but not provided: {name}.'))
# params is a list of all initializers known to the onnx graph
result.extend(params)
return result
def deepcopy_model_input(*inputs, **kwargs):
def extract_tensor(value):
if isinstance(value, torch.Tensor):
if value.requires_grad:
return value.data.requires_grad_()
else:
return value.data
else:
return value
sample_inputs_copy = [extract_tensor(value) for value in inputs]
sample_inputs_copy = copy.deepcopy(tuple(sample_inputs_copy))
sample_kwargs_copy = {}
for name, value in kwargs.items():
sample_kwargs_copy[name] = extract_tensor(value)
sample_kwargs_copy = copy.deepcopy(sample_kwargs_copy)
return sample_inputs_copy, sample_kwargs_copy
class _TensorStub(object):
'''Tensor stub class used to represent model's input or output'''
__slots__ = ['name', 'dtype', 'shape', 'shape_dims']
def __init__(self, name=None, dtype=None, shape=None, shape_dims=None):
self.name = name
self.dtype = dtype
self.shape = shape
self.shape_dims = shape_dims
def __repr__(self) -> str:
result = '_TensorStub('
if self.name is not None:
result += f'name={self.name}'
if self.dtype is not None:
if result[-1] != '(':
result += ', '
result += f'dtype={self.dtype}'
if self.shape is not None:
if result[-1] != '(':
result += ', '
result += f'shape={self.shape}'
if self.shape_dims is not None:
if result[-1] != '(':
result += ', '
result += f'shape_dims={self.shape_dims}'
result += ')'
return result
def __eq__(self, other):
if not other:
return False
elif not isinstance(other, _TensorStub):
raise NotImplemented('_TensorStub must only be compared to another _TensorStub instance!')
elif self.name != other.name:
return False
elif self.dtype != other.dtype:
return False
elif self.shape != other.shape:
return False
elif self.shape_dims != other.shape_dims:
return False
return True
def unflatten_user_output(output_schema, outputs):
"""Follows the schema to generate an output that is expected by the user"""
def _replace_stub_with_tensor_value(user_output, outputs, output_idx):
# Recursively traverse across user_output and replace all _TensorStub
# with torch.Tensor values from outputs following output_idx
if user_output is None:
return None
elif isinstance(user_output, _TensorStub):
out = outputs[output_idx[0]]
output_idx[0] += 1
return out
if isinstance(user_output, abc.Sequence):
sequence_type = type(user_output)
if hasattr(sequence_type, '_make'): # namedtuple
sequence_type = type(user_output)
user_output = sequence_type._make(
_replace_stub_with_tensor_value(uo, outputs, output_idx)
for uo in user_output)
else:
user_output = sequence_type(
_replace_stub_with_tensor_value(uo, outputs, output_idx)
for uo in user_output)
elif isinstance(user_output, abc.Mapping):
new_user_output = copy.copy(user_output)
for key in sorted(user_output):
new_user_output[key] = _replace_stub_with_tensor_value(new_user_output[key], outputs, output_idx)
user_output = new_user_output
else:
raise wrap_exception(ORTModuleIOError,
TypeError(f'ORTModule does not support the following model output type {type(user_output)}.'))
return user_output
# It is expected that the outputs are ordered in the way defined in the exported onnx model
# which is the order in which the output schema was saved.
output_idx = [0]
user_output = _replace_stub_with_tensor_value(output_schema, outputs, output_idx)
return user_output
def _extract_schema(data):
"""Extract the data schema by replacing every torch.Tensor value with _TensorStub"""
if data is None:
return data
elif isinstance(data, str):
warn_of_constant_inputs(data)
return data
elif _PrimitiveType.is_primitive_type(data):
if isinstance(data, bool):
warn_of_constant_inputs(data)
return _TensorStub(dtype=_PrimitiveType.get_primitive_dtype(data), shape_dims=0)
# Depth first traversal to iterate over the data to replace every tensor with a stub
elif isinstance(data, torch.Tensor):
return _TensorStub(dtype=str(data.dtype), shape_dims=len(data.size()))
# Instead of replacing the tensor with a stub in the original user input, build the stubbed_schema
# from scratch from the user input.
stubbed_schema = None
if isinstance(data, abc.Sequence):
sequence_type = type(data)
stubbed_schema = [_extract_schema(val) for val in data]
try:
# namedtuple can be created by passing the list sequence to method _make
stubbed_schema = sequence_type._make(stubbed_schema)
except AttributeError:
# If attribute error encountered, create the sequence directly
stubbed_schema = sequence_type(stubbed_schema)
elif isinstance(data, abc.Mapping):
dict_type = type(data)
stubbed_schema = {key: _extract_schema(data[key]) for key in data}
stubbed_schema = dict_type(**stubbed_schema)
else:
raise wrap_exception(ORTModuleIOError,
TypeError(f'ORTModule does not support the following model data type {type(data)}'))
return stubbed_schema
def _parse_outputs_and_extract_names_and_dynamic_axes(module_output):
"""Parses through the module output and returns output names and dynamic axes"""
def _populate_output_names_and_dynamic_axes(output, output_names, output_dynamic_axes, output_idx):
# Depth first traversal to traverse through the entire output collecting output names and dynamic axes
if output is None:
return
elif isinstance(output, torch.Tensor):
# Naming the outputs with a hyphen ensures that there can be no input with the same
# name, preventing collisions with other NodeArgs (for example an input to forward called output0)
output_name = f'output-{output_idx[0]}'
output_idx[0] += 1
output_names.append(output_name)
output_dynamic_axes[output_name] = {}
for dim_idx in range(len(output.shape)):
output_dynamic_axes[output_name].update({dim_idx: f'{output_name}_dim{dim_idx}'})
return
if isinstance(output, abc.Sequence):
for value in output:
_populate_output_names_and_dynamic_axes(value, output_names, output_dynamic_axes, output_idx)
elif isinstance(output, abc.Mapping):
for _, value in sorted(output.items()):
_populate_output_names_and_dynamic_axes(value, output_names, output_dynamic_axes, output_idx)
else:
raise wrap_exception(ORTModuleIOError,
TypeError(f'ORTModule does not support the following model output type {type(output)}'))
output_names = []
output_dynamic_axes = {}
output_idx = [0]
_populate_output_names_and_dynamic_axes(module_output, output_names, output_dynamic_axes, output_idx)
return output_names, output_dynamic_axes
def _transform_output_to_flat_tuple(data):
"""Converts the data to a flat tuple by iterating over the entire data structure"""
def _flatten_data(data, flat_data):
# Recursively traverse over the data and populate the flat_data with torch.Tensors
if data is None:
return
elif isinstance(data, torch.Tensor):
identity = _OutputIdentityOp.apply
flat_data.append(identity(data))
elif isinstance(data, abc.Sequence):
for value in data:
_flatten_data(value, flat_data)
elif isinstance(data, abc.Mapping):
for _, value in sorted(data.items()):
_flatten_data(value, flat_data)
else:
raise wrap_exception(ORTModuleIOError,
TypeError(f'ORTModule does not support the following data type {type(data)}.'))
flat_data = []
_flatten_data(data, flat_data)
return tuple(flat_data)
class _FlattenedModule(torch.nn.Module):
def __init__(self, original_module):
super(_FlattenedModule, self).__init__()
self._original_module = original_module
# Before `forward` is called, _ort_module must be assigned
# Updated input info is needed to expand args into *args, **kwargs
self._input_info = None
def forward(self, *args):
new_args, new_kwargs = self._input_info.unflatten(args)
return _transform_output_to_flat_tuple(self._original_module(*new_args, **new_kwargs))
def parse_inputs_for_onnx_export(all_input_parameters, onnx_graph, schema, inputs, kwargs):
def _add_dynamic_shape(name, input):
dynamic_axes[name] = {}
for dim_idx in range(len(input.shape)):
dynamic_axes[name].update({dim_idx: f'{name}_dim{dim_idx}'})
return dynamic_axes
def _add_input(name, input, onnx_graph, onnx_graph_input_names):
"""Returns number of expanded non none inputs that _add_input processed"""
if input is None or isinstance(input, str):
# Drop all None and string inputs and return 0.
return 0
num_expanded_non_none_inputs = 0
if isinstance(input, abc.Sequence):
# If the input is a sequence (like a list), expand the list so that
# each element of the list is an input by itself.
for i, val in enumerate(input):
# Name each input with the index appended to the original name of the
# argument.
num_expanded_non_none_inputs += \
_add_input(f"{name}_{i}", val, onnx_graph, onnx_graph_input_names)
# Return here since the list by itself is not a valid input.
# All the elements of the list have already been added as inputs individually.
return num_expanded_non_none_inputs
elif isinstance(input, abc.Mapping):
# If the input is a mapping (like a dict), expand the dict so that
# each element of the dict is an input by itself.
for key, val in input.items():
num_expanded_non_none_inputs += \
_add_input(f"{name}_{key}", val, onnx_graph, onnx_graph_input_names)
# Return here since the dict by itself is not a valid input.
# All the elements of the dict have already been added as inputs individually.
return num_expanded_non_none_inputs
# InputInfo should contain all the names irrespective of whether they are
# a part of the onnx graph or not.
input_names.append(name)
if (onnx_graph is None or name in onnx_graph_input_names) and isinstance(input, torch.Tensor):
if input.requires_grad:
input_names_require_grad.append(name)
dynamic_axes.update(_add_dynamic_shape(name, input))
input_shape.append(list(input.size()))
# A single input non none input was processed, return 1
return 1
# Ignore optional inputs explicitly specified as None
# ONNX exporter may remove unused inputs
onnx_graph_input_names = []
if onnx_graph is not None:
onnx_graph_input_names = {inp.name for inp in onnx_graph.graph.input}
input_names = []
dynamic_axes = {}
input_names_require_grad = []
input_shape = []
var_positional_idx = 0
num_expanded_non_none_positional_inputs = 0
for input_idx, input_parameter in enumerate(all_input_parameters):
if input_parameter.kind == inspect.Parameter.VAR_POSITIONAL:
# VAR_POSITIONAL parameter carries all *args parameters from original forward method
for args_i in range(input_idx, len(inputs)):
name = f'{input_parameter.name}_{var_positional_idx}'
var_positional_idx += 1
inp = inputs[args_i]
num_expanded_non_none_positional_inputs += \
_add_input(name, inp, onnx_graph, onnx_graph_input_names)
elif input_parameter.kind == inspect.Parameter.POSITIONAL_ONLY or\
input_parameter.kind == inspect.Parameter.POSITIONAL_OR_KEYWORD or\
input_parameter.kind == inspect.Parameter.KEYWORD_ONLY:
# All positional non-*args and non-**kwargs are processed here
name = input_parameter.name
inp = None
input_idx += var_positional_idx
is_positional = True
if input_idx < len(inputs) and inputs[input_idx] is not None:
inp = inputs[input_idx]
elif name in kwargs and kwargs[name] is not None:
inp = kwargs[name]
is_positional = False
num_expanded_non_none_inputs_local = \
_add_input(name, inp, onnx_graph, onnx_graph_input_names)
if is_positional:
num_expanded_non_none_positional_inputs += num_expanded_non_none_inputs_local
elif input_parameter.kind == inspect.Parameter.VAR_KEYWORD:
# **kwargs is always the last argument of forward()
for name,inp in kwargs.items():
if name not in input_names:
_add_input(name, inp, onnx_graph, onnx_graph_input_names)
# input_names have been expanded so to get the correct number of non none
# positional names, we need to collect the num_expanded_non_none_positional_inputs.
return _InputInfo(names=input_names,
shape=input_shape,
require_grad_names=input_names_require_grad,
dynamic_axes=dynamic_axes,
schema=schema,
num_positionals=len(inputs),
num_expanded_positionals_non_none=num_expanded_non_none_positional_inputs,
keyword_names=list(kwargs.keys()))
def parse_outputs_for_onnx_export_and_extract_schema(module, inputs, kwargs):
# Perform a forward call to grab outputs
output_names = None
output_dynamic_axes = None
is_deepcopy = False
with torch.no_grad():
# Deepcopy inputs, since input values may change after model run.
sample_inputs_copy, sample_kwargs_copy = deepcopy_model_input(*inputs, **kwargs)
try:
# Deepcopy model, in case model is stateful and changes after model run.
model_copy = copy.deepcopy(module)
is_deepcopy = True
except Exception:
model_copy = module
warnings.warn("This model cannot be deep copied (or pickled), "
"which is a required step for stateful models to be properly exported to ONNX."
" Compute will continue, but unexpected results may occur!")
sample_outputs = model_copy(*sample_inputs_copy, **sample_kwargs_copy)
# Parse the output and extract the output_names and output_dynamic_axes to be used for onnx export
output_names, output_dynamic_axes = _parse_outputs_and_extract_names_and_dynamic_axes(sample_outputs)
output_schema = _extract_schema(sample_outputs)
if is_deepcopy:
del model_copy
gc.collect()
# Return output names, output dynamic axes and output schema
return output_names, output_dynamic_axes, output_schema
|
the-stack_0_19381 | """Heap queue algorithm (a.k.a. priority queue).
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
Usage:
heap = [] # creates an empty heap
heappush(heap, item) # pushes a new item on the heap
item = heappop(heap) # pops the smallest item from the heap
item = heap[0] # smallest item on the heap without popping it
heapify(x) # transforms list into a heap, in-place, in linear time
item = heapreplace(heap, item) # pops and returns smallest item, and adds
# new item; the heap size is unchanged
Our API differs from textbook heap algorithms as follows:
- We use 0-based indexing. This makes the relationship between the
index for a node and the indexes for its children slightly less
obvious, but is more suitable since Python uses 0-based indexing.
- Our heappop() method returns the smallest item, not the largest.
These two make it possible to view the heap as a regular Python list
without surprises: heap[0] is the smallest item, and heap.sort()
maintains the heap invariant!
"""
# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
__about__ = """Heap queues
[explanation by Francois Pinard]
Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
all k, counting elements from 0. For the sake of comparison,
non-existing elements are considered to be infinite. The interesting
property of a heap is that a[0] is always its smallest element.
The strange invariant above is meant to be an efficient memory
representation for a tournament. The numbers below are `k', not a[k]:
0
1 2
3 4 5 6
7 8 9 10 11 12 13 14
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
a usual binary tournament we see in sports, each cell is the winner
over the two cells it tops, and we can trace the winner down the tree
to see all opponents s/he had. However, in many computer applications
of such tournaments, we do not need to trace the history of a winner.
To be more memory efficient, when a winner is promoted, we try to
replace it by something else at a lower level, and the rule becomes
that a cell and the two cells it tops contain three different items,
but the top cell "wins" over the two topped cells.
If this heap invariant is protected at all time, index 0 is clearly
the overall winner. The simplest algorithmic way to remove it and
find the "next" winner is to move some loser (let's say cell 30 in the
diagram above) into the 0 position, and then percolate this new 0 down
the tree, exchanging values, until the invariant is re-established.
This is clearly logarithmic on the total number of items in the tree.
By iterating over all items, you get an O(n ln n) sort.
A nice feature of this sort is that you can efficiently insert new
items while the sort is going on, provided that the inserted items are
not "better" than the last 0'th element you extracted. This is
especially useful in simulation contexts, where the tree holds all
incoming events, and the "win" condition means the smallest scheduled
time. When an event schedule other events for execution, they are
scheduled into the future, so they can easily go into the heap. So, a
heap is a good structure for implementing schedulers (this is what I
used for my MIDI sequencer :-).
Various structures for implementing schedulers have been extensively
studied, and heaps are good for this, as they are reasonably speedy,
the speed is almost constant, and the worst case is not much different
than the average case. However, there are other representations which
are more efficient overall, yet the worst cases might be terrible.
Heaps are also very useful in big disk sorts. You most probably all
know that a big sort implies producing "runs" (which are pre-sorted
sequences, which size is usually related to the amount of CPU memory),
followed by a merging passes for these runs, which merging is often
very cleverly organised[1]. It is very important that the initial
sort produces the longest runs possible. Tournaments are a good way
to that. If, using all the memory available to hold a tournament, you
replace and percolate items that happen to fit the current run, you'll
produce runs which are twice the size of the memory for random input,
and much better for input fuzzily ordered.
Moreover, if you output the 0'th item on disk and get an input which
may not fit in the current tournament (because the value "wins" over
the last output value), it cannot fit in the heap, so the size of the
heap decreases. The freed memory could be cleverly reused immediately
for progressively building a second heap, which grows at exactly the
same rate the first heap is melting. When the first heap completely
vanishes, you switch heaps and start a new run. Clever and quite
effective!
In a word, heaps are useful memory structures to know. I use them in
a few applications, and I think it is good to keep a `heap' module
around. :-)
--------------------
[1] The disk balancing algorithms which are current, nowadays, are
more annoying than clever, and this is a consequence of the seeking
capabilities of the disks. On devices which cannot seek, like big
tape drives, the story was quite different, and one had to be very
clever to ensure (far in advance) that each tape movement will be the
most effective possible (that is, will best participate at
"progressing" the merge). Some tapes were even able to read
backwards, and this was also used to avoid the rewinding time.
Believe me, real good tape sorts were quite spectacular to watch!
From all times, sorting has always been a Great Art! :-)
"""
import itertools
__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
'nlargest', 'nsmallest', 'heappushpop']
def heappush(heap, item):
"""Push item onto heap, maintaining the heap invariant."""
heap.append(item)
_siftdown(heap, 0, len(heap)-1)
def heappop(heap):
"""Pop the smallest item off the heap, maintaining the heap invariant."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup(heap, 0)
return returnitem
return lastelt
def heapreplace(heap, item):
"""Pop and return the current smallest value, and add the new item.
This is more efficient than heappop() followed by heappush(), and can be
more appropriate when using a fixed-size heap. Note that the value
returned may be larger than item! That constrains reasonable uses of
this routine unless written as part of a conditional replacement:
if item > heap[0]:
item = heapreplace(heap, item)
"""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup(heap, 0)
return returnitem
def heappushpop(heap, item):
"""Fast version of a heappush followed by a heappop."""
if heap and heap[0] < item:
item, heap[0] = heap[0], item
_siftup(heap, 0)
return item
def heapify(x):
"""Transform list into a heap, in-place, in O(len(x)) time."""
n = len(x)
# Transform bottom-up. The largest index there's any point to looking at
# is the largest with a child index in-range, so must have 2*i + 1 < n,
# or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
# j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
# (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
for i in reversed(range(n//2)):
_siftup(x, i)
def _heappop_max(heap):
"""Maxheap version of a heappop."""
lastelt = heap.pop() # raises appropriate IndexError if heap is empty
if heap:
returnitem = heap[0]
heap[0] = lastelt
_siftup_max(heap, 0)
return returnitem
return lastelt
def _heapreplace_max(heap, item):
"""Maxheap version of a heappop followed by a heappush."""
returnitem = heap[0] # raises appropriate IndexError if heap is empty
heap[0] = item
_siftup_max(heap, 0)
return returnitem
def _heapify_max(x):
"""Transform list into a maxheap, in-place, in O(len(x)) time."""
n = len(x)
for i in reversed(range(n//2)):
_siftup_max(x, i)
# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
# is the index of a leaf with a possibly out-of-order value. Restore the
# heap invariant.
def _siftdown(heap, startpos, pos):
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if newitem < parent:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
# The child indices of heap index pos are already heaps, and we want to make
# a heap at index pos too. We do this by bubbling the smaller child of
# pos up (and so on with that child's children, etc) until hitting a leaf,
# then using _siftdown to move the oddball originally at index pos into place.
#
# We *could* break out of the loop as soon as we find a pos where newitem <=
# both its children, but turns out that's not a good idea, and despite that
# many books write the algorithm that way. During a heap pop, the last array
# element is sifted in, and that tends to be large, so that comparing it
# against values starting from the root usually doesn't pay (= usually doesn't
# get us out of the loop early). See Knuth, Volume 3, where this is
# explained and quantified in an exercise.
#
# Cutting the # of comparisons is important, since these routines have no
# way to extract "the priority" from an array element, so that intelligence
# is likely to be hiding in custom comparison methods, or in array elements
# storing (priority, record) tuples. Comparisons are thus potentially
# expensive.
#
# On random arrays of length 1000, making this change cut the number of
# comparisons made by heapify() a little, and those made by exhaustive
# heappop() a lot, in accord with theory. Here are typical results from 3
# runs (3 just to demonstrate how small the variance is):
#
# Compares needed by heapify Compares needed by 1000 heappops
# -------------------------- --------------------------------
# 1837 cut to 1663 14996 cut to 8680
# 1855 cut to 1659 14966 cut to 8678
# 1847 cut to 1660 15024 cut to 8703
#
# Building the heap by using heappush() 1000 times instead required
# 2198, 2148, and 2219 compares: heapify() is more efficient, when
# you can use it.
#
# The total compares needed by list.sort() on the same lists were 8627,
# 8627, and 8632 (this should be compared to the sum of heapify() and
# heappop() compares): list.sort() is (unsurprisingly!) more efficient
# for sorting.
def _siftup(heap, pos):
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the smaller child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of smaller child.
rightpos = childpos + 1
if rightpos < endpos and not heap[childpos] < heap[rightpos]:
childpos = rightpos
# Move the smaller child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown(heap, startpos, pos)
def _siftdown_max(heap, startpos, pos):
'Maxheap variant of _siftdown'
newitem = heap[pos]
# Follow the path to the root, moving parents down until finding a place
# newitem fits.
while pos > startpos:
parentpos = (pos - 1) >> 1
parent = heap[parentpos]
if parent < newitem:
heap[pos] = parent
pos = parentpos
continue
break
heap[pos] = newitem
def _siftup_max(heap, pos):
'Maxheap variant of _siftup'
endpos = len(heap)
startpos = pos
newitem = heap[pos]
# Bubble up the larger child until hitting a leaf.
childpos = 2*pos + 1 # leftmost child position
while childpos < endpos:
# Set childpos to index of larger child.
rightpos = childpos + 1
if rightpos < endpos and not heap[rightpos] < heap[childpos]:
childpos = rightpos
# Move the larger child up.
heap[pos] = heap[childpos]
pos = childpos
childpos = 2*pos + 1
# The leaf at pos is empty now. Put newitem there, and bubble it up
# to its final resting place (by sifting its parents down).
heap[pos] = newitem
_siftdown_max(heap, startpos, pos)
def merge(*iterables, key=None, reverse=False):
'''Merge multiple sorted inputs into a single sorted output.
Similar to sorted(itertools.chain(*iterables)) but returns a generator,
does not pull the data into memory all at once, and assumes that each of
the input streams is already sorted (smallest to largest).
>>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
[0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
If *key* is not None, applies a key function to each element to determine
its sort order.
>>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len))
['dog', 'cat', 'fish', 'horse', 'kangaroo']
'''
h = []
h_append = h.append
if reverse:
_heapify = _heapify_max
_heappop = _heappop_max
_heapreplace = _heapreplace_max
direction = -1
else:
_heapify = heapify
_heappop = heappop
_heapreplace = heapreplace
direction = 1
if key is None:
for order, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
h_append([next(), order * direction, next])
except StopIteration:
pass
_heapify(h)
while len(h) > 1:
try:
while True:
value, order, next = s = h[0]
yield value
s[0] = next() # raises StopIteration when exhausted
_heapreplace(h, s) # restore heap condition
except StopIteration:
_heappop(h) # remove empty iterator
if h:
# fast case when only a single iterator remains
value, order, next = h[0]
yield value
yield from next.__self__
return
for order, it in enumerate(map(iter, iterables)):
try:
next = it.__next__
value = next()
h_append([key(value), order * direction, value, next])
except StopIteration:
pass
_heapify(h)
while len(h) > 1:
try:
while True:
key_value, order, value, next = s = h[0]
yield value
value = next()
s[0] = key(value)
s[2] = value
_heapreplace(h, s)
except StopIteration:
_heappop(h)
if h:
key_value, order, value, next = h[0]
yield value
yield from next.__self__
# Algorithm notes for nlargest() and nsmallest()
# ==============================================
#
# Make a single pass over the data while keeping the k most extreme values
# in a heap. Memory consumption is limited to keeping k values in a list.
#
# Measured performance for random inputs:
#
# number of comparisons
# n inputs k-extreme values (average of 5 trials) % more than min()
# ------------- ---------------- --------------------- -----------------
# 1,000 100 3,317 231.7%
# 10,000 100 14,046 40.5%
# 100,000 100 105,749 5.7%
# 1,000,000 100 1,007,751 0.8%
# 10,000,000 100 10,009,401 0.1%
#
# Theoretical number of comparisons for k smallest of n random inputs:
#
# Step Comparisons Action
# ---- -------------------------- ---------------------------
# 1 1.66 * k heapify the first k-inputs
# 2 n - k compare remaining elements to top of heap
# 3 k * (1 + lg2(k)) * ln(n/k) replace the topmost value on the heap
# 4 k * lg2(k) - (k/2) final sort of the k most extreme values
#
# Combining and simplifying for a rough estimate gives:
#
# comparisons = n + k * (log(k, 2) * log(n/k) + log(k, 2) + log(n/k))
#
# Computing the number of comparisons for step 3:
# -----------------------------------------------
# * For the i-th new value from the iterable, the probability of being in the
# k most extreme values is k/i. For example, the probability of the 101st
# value seen being in the 100 most extreme values is 100/101.
# * If the value is a new extreme value, the cost of inserting it into the
# heap is 1 + log(k, 2).
# * The probability times the cost gives:
# (k/i) * (1 + log(k, 2))
# * Summing across the remaining n-k elements gives:
# sum((k/i) * (1 + log(k, 2)) for i in range(k+1, n+1))
# * This reduces to:
# (H(n) - H(k)) * k * (1 + log(k, 2))
# * Where H(n) is the n-th harmonic number estimated by:
# gamma = 0.5772156649
# H(n) = log(n, e) + gamma + 1 / (2 * n)
# http://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#Rate_of_divergence
# * Substituting the H(n) formula:
# comparisons = k * (1 + log(k, 2)) * (log(n/k, e) + (1/n - 1/k) / 2)
#
# Worst-case for step 3:
# ----------------------
# In the worst case, the input data is reversed sorted so that every new element
# must be inserted in the heap:
#
# comparisons = 1.66 * k + log(k, 2) * (n - k)
#
# Alternative Algorithms
# ----------------------
# Other algorithms were not used because they:
# 1) Took much more auxiliary memory,
# 2) Made multiple passes over the data.
# 3) Made more comparisons in common cases (small k, large n, semi-random input).
# See the more detailed comparison of approach at:
# http://code.activestate.com/recipes/577573-compare-algorithms-for-heapqsmallest
def nsmallest(n, iterable, key=None):
"""Find the n smallest elements in a dataset.
Equivalent to: sorted(iterable, key=key)[:n]
"""
# Short-cut for n==1 is to use min()
if n == 1:
it = iter(iterable)
sentinel = object()
if key is None:
result = min(it, default=sentinel)
else:
result = min(it, default=sentinel, key=key)
return [] if result is sentinel else [result]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key)[:n]
# When key is none, use simpler decoration
if key is None:
it = iter(iterable)
# put the range(n) first so that zip() doesn't
# consume one too many elements from the iterator
result = [(elem, i) for i, elem in zip(range(n), it)]
if not result:
return result
_heapify_max(result)
top = result[0][0]
order = n
_heapreplace = _heapreplace_max
for elem in it:
if elem < top:
_heapreplace(result, (elem, order))
top, _order = result[0]
order += 1
result.sort()
return [elem for (elem, order) in result]
# General case, slowest method
it = iter(iterable)
result = [(key(elem), i, elem) for i, elem in zip(range(n), it)]
if not result:
return result
_heapify_max(result)
top = result[0][0]
order = n
_heapreplace = _heapreplace_max
for elem in it:
k = key(elem)
if k < top:
_heapreplace(result, (k, order, elem))
top, _order, _elem = result[0]
order += 1
result.sort()
return [elem for (k, order, elem) in result]
def nlargest(n, iterable, key=None):
"""Find the n largest elements in a dataset.
Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
"""
# Short-cut for n==1 is to use max()
if n == 1:
it = iter(iterable)
sentinel = object()
if key is None:
result = max(it, default=sentinel)
else:
result = max(it, default=sentinel, key=key)
return [] if result is sentinel else [result]
# When n>=size, it's faster to use sorted()
try:
size = len(iterable)
except (TypeError, AttributeError):
pass
else:
if n >= size:
return sorted(iterable, key=key, reverse=True)[:n]
# When key is none, use simpler decoration
if key is None:
it = iter(iterable)
result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)]
if not result:
return result
heapify(result)
top = result[0][0]
order = -n
_heapreplace = heapreplace
for elem in it:
if top < elem:
_heapreplace(result, (elem, order))
top, _order = result[0]
order -= 1
result.sort(reverse=True)
return [elem for (elem, order) in result]
# General case, slowest method
it = iter(iterable)
result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)]
if not result:
return result
heapify(result)
top = result[0][0]
order = -n
_heapreplace = heapreplace
for elem in it:
k = key(elem)
if top < k:
_heapreplace(result, (k, order, elem))
top, _order, _elem = result[0]
order -= 1
result.sort(reverse=True)
return [elem for (k, order, elem) in result]
# If available, use C implementation
try:
from _heapq import *
except ImportError:
pass
try:
from _heapq import _heapreplace_max
except ImportError:
pass
try:
from _heapq import _heapify_max
except ImportError:
pass
try:
from _heapq import _heappop_max
except ImportError:
pass
# -------------------------- --------------------------------
class PriorityQueue():
def __init__(self):
self.pq = [] # list of entries arranged in a heap
self.entry_finder = {} # mapping of tasks to entries
self.REMOVED = '<removed-task>' # placeholder for a removed task
self.counter = itertools.count() # unique sequence count
def __str__(self):
return "{0}".format(self.pq)
def addTask(self, task, priority=0):
'Add a new task or update the priority of an existing task'
if task in self.entry_finder:
self.removeTask(task)
count = next(self.counter)
entry = [priority, count, task]
self.entry_finder[task] = entry
heappush(self.pq, entry)
def removeTask(self, task):
'Mark an existing task as REMOVED. Print warning if not found.'
try:
entry = self.entry_finder.pop(task)
entry[-1] = self.REMOVED
return 0
except:
prt_str = "Warning: removeTask() task {0} has not been in priority queue prior to this call.".format(task)
print(prt_str)
return prt_str
def isAlreadyRemovedOrRemoveTask(self, task):
'Mark an existing task as REMOVED. Print warning if not found.'
try:
entry = self.entry_finder.pop(task)
entry[-1] = self.REMOVED
return 0
except:
#prt_str = "Warning: removeTask() task {0} has not been in priority queue prior to this call.".format(task)
#print(prt_str)
return 1
def popTask(self):
'Remove and return the lowest priority task. Raise KeyError if empty.'
while self.pq:
priority, count, task = heappop(self.pq)
if task is not self.REMOVED:
del self.entry_finder[task]
return task
raise KeyError('pop from an empty priority queue')
def popTaskPriority(self):
'Remove and return the lowest priority task. Raise KeyError if empty.'
while self.pq:
priority, count, task = heappop(self.pq)
if task is not self.REMOVED:
del self.entry_finder[task]
return (task, priority)
raise KeyError('pop from an empty priority queue')
def getBestTask(self):
'Return the lowest priority task without removing. Raise KeyError if empty.'
while self.pq:
priority, count, task = heappop(self.pq)
if task is not self.REMOVED:
entry = [priority, count, task]
heappush(self.pq, entry)
return task
raise KeyError('pop from an empty priority queue')
def findBestTask(self):
'''Self written:
- Does not remove and reenter task if not necessary.
- Returns None if heap is empty'''
try:
entry = self.pq[0]
# if this task has not been remove by removeTask() the best entry is here
if entry[2] != '<removed-task>':
task = entry[2]
# if best task has been removed by removeTask() the slower functionality is called
else:
task = self.getBestTask()
except:
task = None
return task
def hasElements(self):
if self.pq and self.findBestTask() is not None:
return True
else:
return False
# -------------------------- --------------------------------
if __name__ == "__main__":
import doctest
print(doctest.testmod())
pq = PriorityQueue() |
the-stack_0_19382 | # vim: ft=python fileencoding=utf-8 sw=4 et sts=4
"""Test vimiv's trash management."""
import os
import tempfile
import time
from unittest import TestCase, main
from gi import require_version
require_version('Gtk', '3.0')
from vimiv import trash_manager
from vimiv.helpers import get_user_data_dir
class TrashTest(TestCase):
"""TrashManager Tests."""
@classmethod
def setUpClass(cls):
# Run in tmp
cls.tmpdir = tempfile.TemporaryDirectory(prefix="vimiv-tests-")
os.environ["XDG_DATA_HOME"] = cls.tmpdir.name
cls.trash_manager = trash_manager.TrashManager()
cls.files_directory = os.path.join(get_user_data_dir(),
"Trash/files")
cls.info_directory = os.path.join(get_user_data_dir(),
"Trash/info")
cls.testfile = ""
cls.basename = ""
def setUp(self):
_, self.testfile = tempfile.mkstemp()
self.basename = os.path.basename(self.testfile)
self._create_file()
self.assertTrue(os.path.exists(self.testfile))
def test_move_to_trash(self):
"""Move a file to the trash directory."""
self.trash_manager.delete(self.testfile)
self.assertFalse(os.path.exists(self.testfile)) # File should not exist
# Trash file should exist and contain "something"
expected_trashfile = os.path.join(self.files_directory, self.basename)
self.assertTrue(os.path.exists(expected_trashfile))
with open(expected_trashfile) as f:
content = f.read()
self.assertEqual(content, "something\n")
# Info file should exist and contain path, and date
expected_infofile = os.path.join(self.info_directory,
self.basename + ".trashinfo")
self.assertTrue(os.path.exists(expected_infofile))
with open(expected_infofile) as f:
lines = f.readlines()
self.assertEqual(lines[0], "[Trash Info]\n")
self.assertEqual(lines[1], "Path=%s\n" % (self.testfile))
self.assertIn("DeletionDate=%s" % (time.strftime("%Y%m")), lines[2])
def test_undelete_from_trash(self):
"""Undelete a file from trash."""
# First delete a file
self.trash_manager.delete(self.testfile)
self.assertFalse(os.path.exists(self.testfile))
# Now undelete it
self.trash_manager.undelete(self.basename)
self.assertTrue(os.path.exists(self.testfile))
def test_delete_file_with_same_name(self):
"""Delete a file with the same name more than twice."""
def run_one_round(suffix=""):
"""Test if self.testfile + suffix exists in trash."""
self.trash_manager.delete(self.testfile)
self.assertFalse(os.path.exists(self.testfile))
expected_trashfile = \
os.path.join(self.files_directory, self.basename + suffix)
self.assertTrue(os.path.exists(expected_trashfile))
self._create_file()
run_one_round()
run_one_round(".2")
run_one_round(".3")
def _create_file(self):
with open(self.testfile, "w") as f:
f.write("something\n")
def tearDown(self):
if os.path.exists(self.testfile):
os.remove(self.testfile)
if __name__ == "__main__":
main()
|
the-stack_0_19383 | # Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Common utility functions for RPN and RetinaNet minibtach blobs preparation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
import logging
import numpy as np
import threading
from core.config import cfg
from modeling.generate_anchors import generate_anchors
import utils.boxes as box_utils
logger = logging.getLogger(__name__)
# octave and aspect fields are only used on RetinaNet. Octave corresponds to the
# scale of the anchor and aspect denotes which aspect ratio is used in the range
# of aspect ratios
FieldOfAnchors = namedtuple(
'FieldOfAnchors', [
'field_of_anchors', 'num_cell_anchors', 'stride', 'field_size',
'octave', 'aspect'
]
)
# Cache for memoizing _get_field_of_anchors
_threadlocal_foa = threading.local()
def get_field_of_anchors(
stride, anchor_sizes, anchor_aspect_ratios, octave=None, aspect=None
):
global _threadlocal_foa
if not hasattr(_threadlocal_foa, 'cache'):
_threadlocal_foa.cache = {}
cache_key = str(stride) + str(anchor_sizes) + str(anchor_aspect_ratios)
if cache_key in _threadlocal_foa.cache:
return _threadlocal_foa.cache[cache_key]
# Anchors at a single feature cell
cell_anchors = generate_anchors(
stride=stride, sizes=anchor_sizes, aspect_ratios=anchor_aspect_ratios
)
num_cell_anchors = cell_anchors.shape[0]
# Generate canonical proposals from shifted anchors
# Enumerate all shifted positions on the (H, W) grid
# FPN max size
fpn_max_size = cfg.FPN.COARSEST_STRIDE * np.ceil(
cfg.TRAIN.MAX_SIZE / float(cfg.FPN.COARSEST_STRIDE)
)
# RefineNet max size
if cfg.MODEL.REFINE_ON:
refinenet_stride = (1./cfg.REFINENET.SPATIAL_SCALE) * cfg.REFINENET.STRIDE
refinenet_max_size = refinenet_stride * np.ceil(
cfg.TRAIN.MAX_SIZE / float(refinenet_stride)
)
fpn_max_size = max(fpn_max_size, refinenet_max_size)
field_size = int(np.ceil(fpn_max_size / float(stride)))
shifts = np.arange(0, field_size) * stride
shift_x, shift_y = np.meshgrid(shifts, shifts)
shift_x = shift_x.ravel()
shift_y = shift_y.ravel()
shifts = np.vstack((shift_x, shift_y, shift_x, shift_y)).transpose()
# Broacast anchors over shifts to enumerate all anchors at all positions
# in the (H, W) grid:
# - add A cell anchors of shape (1, A, 4) to
# - K shifts of shape (K, 1, 4) to get
# - all shifted anchors of shape (K, A, 4)
# - reshape to (K*A, 4) shifted anchors
A = num_cell_anchors
K = shifts.shape[0]
field_of_anchors = (
cell_anchors.reshape((1, A, 4)) +
shifts.reshape((1, K, 4)).transpose((1, 0, 2))
)
field_of_anchors = field_of_anchors.reshape((K * A, 4))
foa = FieldOfAnchors(
field_of_anchors=field_of_anchors.astype(np.float32),
num_cell_anchors=num_cell_anchors,
stride=stride,
field_size=field_size,
octave=octave,
aspect=aspect
)
_threadlocal_foa.cache[cache_key] = foa
return foa
def unmap(data, count, inds, fill=0):
"""Unmap a subset of item (data) back to the original set of items (of
size count)"""
if count == len(inds):
return data
if len(data.shape) == 1:
ret = np.empty((count, ), dtype=data.dtype)
ret.fill(fill)
ret[inds] = data
else:
ret = np.empty((count, ) + data.shape[1:], dtype=data.dtype)
ret.fill(fill)
ret[inds, :] = data
return ret
def compute_targets(ex_rois, gt_rois, weights=(1.0, 1.0, 1.0, 1.0)):
"""Compute bounding-box regression targets for an image."""
return box_utils.bbox_transform_inv(ex_rois, gt_rois, weights).astype(
np.float32, copy=False
)
|
the-stack_0_19384 | import os
from concurrent.futures import ProcessPoolExecutor
from functools import partial
import numpy as np
from tqdm import tqdm
from src.etc import audio
from src.tac.core.wavenet_vocoder.util import (is_mulaw, is_mulaw_quantize,
mulaw, mulaw_quantize)
from src.tac.hparams import hparams
from src.tac.preprocessing.audio.utterance_processor import process_utterance
from src.tac.preprocessing.parser.LJSpeechDatasetParser import \
LJSpeechDatasetParser
def get_wav_dir(caching_dir: str) -> str:
''' The directory to write the preprocessed wav into. '''
return os.path.join(caching_dir, 'preprocessing/audio')
def get_mel_dir(caching_dir: str) -> str:
''' The directory to write the mel spectograms into. '''
return os.path.join(caching_dir, 'preprocessing/mels')
def get_lin_dir(caching_dir: str) -> str:
''' The directory to write the linear spectrograms into. '''
return os.path.join(caching_dir, 'preprocessing/linear')
class WavProcessor():
def __init__(self, hp: hparams, caching_dir: str):
self.hp = hp
self._set_paths(caching_dir)
self._ensure_folders_exist()
def process(self, dataset: LJSpeechDatasetParser, n_jobs):
utterances = dataset.parse()
executor = ProcessPoolExecutor(max_workers=n_jobs)
futures = []
for basename, text, wav in utterances:
process = partial(process_utterance, self.mel_dir, self.linear_dir, self.wav_dir, basename, wav, self.hp)
x = executor.submit(process)
futures.append(x)
self.processing_result = [future.result() for future in tqdm(futures) if future.result() is not None]
return self.processing_result
def show_stats(self):
assert self.processing_result
timesteps_sum = sum([int(m[1]) for m in self.processing_result])
timesteps_max = max([int(m[1]) for m in self.processing_result])
mel_frames_sum = sum([int(m[2]) for m in self.processing_result])
mel_frames_max = max([int(m[2]) for m in self.processing_result])
hours = timesteps_sum / self.hp.sample_rate / 3600
print('Written {} utterances, {} mel frames, {} audio timesteps, ({:.2f} hours)'.format(len(self.processing_result), mel_frames_sum, timesteps_sum, hours))
print('Max audio timesteps length: {}'.format(timesteps_max))
print('Max mel frames length: {}'.format(mel_frames_max))
def _set_paths(self, caching_dir: str):
self.caching_dir = caching_dir
self.mel_dir = get_mel_dir(caching_dir)
self.wav_dir = get_wav_dir(caching_dir)
self.linear_dir = get_lin_dir(caching_dir)
def _ensure_folders_exist(self):
os.makedirs(self.caching_dir, exist_ok=True)
os.makedirs(self.mel_dir, exist_ok=True)
os.makedirs(self.wav_dir, exist_ok=True)
os.makedirs(self.linear_dir, exist_ok=True)
|
the-stack_0_19385 | import argparse
import logging
import os.path
from configparser import ConfigParser
from typing import Any, Dict, List
from minio import Minio
from redis import StrictRedis
from .__version__ import __version__
from .backend import KartonBackend
from .config import Config
from .karton import Consumer, LogConsumer
log = logging.getLogger(__name__)
class CliLogger(LogConsumer):
identity = "karton.cli-logger"
def process_log(self, event: Dict[str, Any]) -> Any:
if event.get("type") == "log":
level = event.get("levelname")
name = event.get("name")
msg = event.get("message")
print(f"[{level}] {name}: {msg}")
def get_user_option(prompt: str, default: str) -> str:
user_input = input(f"{prompt}\n[{default}] ")
print("") # just for style
return user_input.strip() or default
def configuration_wizard(config_filename: str) -> None:
config = ConfigParser()
log.info("Configuring MinIO")
minio_access_key = "minioadmin"
minio_secret_key = "minioadmin"
minio_address = "localhost:9000"
minio_bucket = "karton"
minio_secure = "0"
while True:
minio_access_key = get_user_option(
"Enter the MinIO access key", default=minio_access_key
)
minio_secret_key = get_user_option(
"Enter the MinIO secret key", default=minio_secret_key
)
minio_address = get_user_option(
"Enter the MinIO address", default=minio_address
)
minio_bucket = get_user_option(
"Enter the MinIO bucket to use", default=minio_bucket
)
minio_secure = get_user_option('Use SSL ("0", "1")?', default=minio_secure)
log.info("Testing MinIO connection...")
minio = Minio(
endpoint=minio_address,
access_key=minio_access_key,
secret_key=minio_secret_key,
secure=bool(int(minio_secure)),
)
bucket_exists = False
try:
bucket_exists = minio.bucket_exists(minio_bucket)
except Exception as e:
log.info("Error while connecting to MinIO: %s", e, exc_info=True)
retry = get_user_option(
'Do you want to try with different MinIO settings ("yes", "no")?',
default="yes",
)
if retry != "yes":
log.info("Quitting configuration")
return
else:
continue
log.info("Connected to MinIO successfully")
if not bucket_exists:
log.info(
(
"The required bucket %s does not exist. To create it automatically,"
" start karton-system with --setup-bucket flag"
),
minio_bucket,
)
break
config["minio"] = {
"access_key": minio_access_key,
"secret_key": minio_secret_key,
"address": minio_address,
"bucket": minio_bucket,
"secure": minio_secure,
}
log.info("Configuring Redis")
redis_host = "localhost"
redis_port = "6379"
while True:
redis_host = get_user_option("Enter the Redis host", default=redis_host)
redis_port = get_user_option("Enter the Redis port", default=redis_port)
redis_password = get_user_option(
"Enter the Redis password (enter to skip)", default=""
)
log.info("Testing the Redis connection...")
redis = StrictRedis(
host=redis_host,
port=int(redis_port),
password=redis_password or None,
decode_responses=True,
)
try:
redis.ping()
except Exception as e:
log.info("Error while connecting to Redis: %s", e, exc_info=True)
retry = get_user_option(
'Do you want to try with different Redis settings ("yes", "no")?',
default="yes",
)
if retry != "yes":
log.info("Quitting configuration")
return
else:
continue
log.info("Connected to Redis successfully")
break
config["redis"] = {
"host": redis_host,
"port": str(int(redis_port)),
}
if redis_password:
config["redis"]["password"] = redis_password
with open(config_filename, "w") as configfile:
config.write(configfile)
log.info("Saved the new configuration file in %s", os.path.abspath(config_filename))
def print_bind_list(config: Config) -> None:
backend = KartonBackend(config=config)
for bind in backend.get_binds():
print(bind)
def delete_bind(config: Config, karton_name: str) -> None:
backend = KartonBackend(config=config)
binds = {k.identity: k for k in backend.get_binds()}
consumers = backend.get_online_consumers()
if karton_name not in binds:
log.error("Trying to delete a karton bind that doesn't exist")
return
if consumers.get(karton_name, []):
log.error(
"This bind has active replicas that need to be downscaled "
"before it can be deleted"
)
return
class KartonDummy(Consumer):
persistent = False
filters: List[Dict[str, Any]] = []
def process(self, task):
pass
karton = KartonDummy(config=config, identity=karton_name)
karton.shutdown = True
karton.loop()
def main() -> None:
parser = argparse.ArgumentParser(description="Your red pill to the karton-verse")
parser.add_argument("--version", action="version", version=__version__)
parser.add_argument("-c", "--config-file", help="Alternative configuration path")
parser.add_argument(
"-v", "--verbose", action="store_true", help="More verbose log output"
)
subparsers = parser.add_subparsers(dest="command", help="sub-command help")
subparsers.add_parser("list", help="List active karton binds")
logs_parser = subparsers.add_parser("logs", help="Start streaming logs")
logs_parser.add_argument(
"--filter",
help='Service identity filter e.g. "karton.classifier-*"',
required=False,
)
delete_parser = subparsers.add_parser("delete", help="Delete an unused karton bind")
delete_parser.add_argument("identity", help="Karton bind identity to remove")
configure_parser = subparsers.add_parser(
"configure", help="Create a new configuration file"
)
configure_parser.add_argument(
"-f",
"--force",
action="store_true",
help="Overwrite the existing configuration file",
)
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
if args.command == "configure":
config_filename = args.config_file or "./karton.ini"
log.debug("Creating a new configuration file in %s", config_filename)
if not args.force and os.path.exists(config_filename):
log.error(
(
"There's already a configuration file under %s. Please delete "
"it or specify a different filename using the -c argument"
),
config_filename,
)
return
configuration_wizard(config_filename)
return
try:
config = Config(args.config_file)
except RuntimeError as e:
log.error("Error while initializing the karton config: %s", e)
log.error(
(
"Please correct the configuration file or run `karton configure` "
"to initialize it"
)
)
return
if args.command == "list":
print_bind_list(config)
elif args.command == "delete":
karton_name = args.identity
print(
f"Are you sure you want to remove binds for karton {karton_name}?\n"
"Type in the karton name to confirm deletion."
)
if input().strip() == karton_name:
delete_bind(config, karton_name)
else:
log.info("Aborted.")
elif args.command == "logs":
CliLogger.logger_filter = args.filter
CliLogger(config=config).loop()
else:
parser.print_help()
|
the-stack_0_19386 |
from setuptools import setup,Extension
from Cython.Build import cythonize
sourcefiles = ['riwrapper.pyx', 'ritest.cc']
extensions = [Extension("riwrapper", sourcefiles,
include_dirs=['include', 'rilib'],
language="c++",
extra_compile_args=["-O3"])]
setup(
ext_modules = cythonize(extensions)
)
|
the-stack_0_19388 | # -*- coding: utf-8 -*-
"""Test the manager's citation utilities."""
from __future__ import unicode_literals
import os
import unittest
import time
from pybel import BELGraph
from pybel.constants import (
CITATION, CITATION_AUTHORS, CITATION_DATE, CITATION_NAME, CITATION_TYPE_PUBMED,
)
from pybel.dsl import protein
from pybel.manager.citation_utils import enrich_pubmed_citations, get_citations_by_pmids, sanitize_date
from pybel.manager.models import Citation
from pybel.testing.cases import TemporaryCacheMixin
from pybel.testing.utils import n
class TestSanitizeDate(unittest.TestCase):
"""Test sanitization of dates in various formats."""
def test_sanitize_1(self):
"""Test YYYY Mon DD."""
self.assertEqual('2012-12-19', sanitize_date('2012 Dec 19'))
def test_sanitize_2(self):
"""Test YYYY Mon."""
self.assertEqual('2012-12-01', sanitize_date('2012 Dec'))
def test_sanitize_3(self):
"""Test YYYY."""
self.assertEqual('2012-01-01', sanitize_date('2012'))
def test_sanitize_4(self):
"""Test YYYY Mon-Mon."""
self.assertEqual('2012-10-01', sanitize_date('2012 Oct-Dec'))
def test_sanitize_5(self):
"""Test YYYY Season."""
self.assertEqual('2012-03-01', sanitize_date('2012 Spring'))
def test_sanitize_6(self):
"""Test YYYY Mon DD-DD."""
self.assertEqual('2012-12-12', sanitize_date('2012 Dec 12-15'))
def test_sanitize_7(self):
"""Test YYYY Mon DD-Mon DD."""
self.assertEqual('2005-01-29', sanitize_date('2005 Jan 29-Feb 4'))
def test_sanitize_nope(self):
"""Test failure."""
self.assertEqual(None, sanitize_date('2012 Early Spring'))
class TestCitations(TemporaryCacheMixin):
"""Tests for citations."""
def setUp(self):
super(TestCitations, self).setUp()
self.u, self.v = (protein(n(), n()) for _ in range(2))
self.pmid = "9611787"
self.graph = BELGraph()
self.graph.add_increases(self.u, self.v, citation=self.pmid, evidence=n())
def test_enrich(self):
""""""
self.assertEqual(0, self.manager.count_citations())
get_citations_by_pmids(manager=self.manager, pmids=[self.pmid])
self.assertEqual(1, self.manager.count_citations())
c = self.manager.get_citation_by_pmid(self.pmid)
self.assertIsNotNone(c)
self.assertIsInstance(c, Citation)
self.assertEqual(CITATION_TYPE_PUBMED, c.type)
self.assertEqual(self.pmid, c.reference)
def test_enrich_list(self):
pmids = [
'25818332',
'27003210',
'26438529',
'26649137',
]
get_citations_by_pmids(manager=self.manager, pmids=pmids)
citation = self.manager.get_or_create_citation(type=CITATION_TYPE_PUBMED, reference='25818332')
self.assertIsNotNone(citation)
def test_enrich_list_grouped(self):
pmids = [
'25818332',
'27003210',
'26438529',
'26649137',
]
get_citations_by_pmids(manager=self.manager, pmids=pmids, group_size=2)
citation = self.manager.get_citation_by_pmid('25818332')
self.assertIsNotNone(citation)
def test_enrich_overwrite(self):
citation = self.manager.get_or_create_citation(type=CITATION_TYPE_PUBMED, reference=self.pmid)
self.manager.session.commit()
self.assertIsNone(citation.date)
self.assertIsNone(citation.name)
enrich_pubmed_citations(manager=self.manager, graph=self.graph)
_, _, d = list(self.graph.edges(data=True))[0]
citation_dict = d[CITATION]
self.assertIn(CITATION_NAME, citation_dict)
self.assertIn(CITATION_DATE, citation_dict)
self.assertEqual('1998-05-01', citation_dict[CITATION_DATE])
self.assertIn(CITATION_AUTHORS, citation_dict)
self.assertEqual(
{'Lewell XQ', 'Judd DB', 'Watson SP', 'Hann MM'},
set(citation_dict[CITATION_AUTHORS])
)
def test_enrich_graph(self):
enrich_pubmed_citations(manager=self.manager, graph=self.graph)
_, _, d = list(self.graph.edges(data=True))[0]
citation_dict = d[CITATION]
self.assertIn(CITATION_NAME, citation_dict)
self.assertIn(CITATION_DATE, citation_dict)
self.assertEqual('1998-05-01', citation_dict[CITATION_DATE])
self.assertIn(CITATION_AUTHORS, citation_dict)
self.assertEqual(
{'Lewell XQ', 'Judd DB', 'Watson SP', 'Hann MM'},
set(citation_dict[CITATION_AUTHORS])
)
@unittest.skipIf(os.environ.get('DB') == 'mysql', reason='MySQL collation is wonky')
def test_accent_duplicate(self):
"""Test when two authors, Gomez C and Goméz C are both checked that they are not counted as duplicates."""
g1 = 'Gomez C'
g2 = 'Gómez C'
pmid_1, pmid_2 = pmids = [
'29324713',
'29359844',
]
get_citations_by_pmids(manager=self.manager, pmids=pmids)
time.sleep(1)
x = self.manager.get_citation_by_pmid(pmid_1)
self.assertIsNotNone(x)
self.assertEqual('Martínez-Guillén JR', x.first.name)
self.assertIn(g1, self.manager.object_cache_author)
self.assertIn(g2, self.manager.object_cache_author)
a1 = self.manager.get_author_by_name(g1)
self.assertEqual(g1, a1.name)
a2 = self.manager.get_author_by_name(g2)
self.assertEqual(g2, a2.name)
|
the-stack_0_19389 | import csv
import os
import sys
from django.core.management.base import BaseCommand
from workshops.models import Award, Badge, Person
class Command(BaseCommand):
help = "Report inconsistencies in PDF certificates."
def add_arguments(self, parser):
parser.add_argument(
"path",
help="Path to root directory of certificates repository",
)
def handle(self, *args, **options):
"""Main entry point."""
path_to_root = options["path"]
badges = self.get_badges()
result = [
[
"which",
"badge",
"event",
"awarded_by",
"username",
"person",
"email",
"awarded",
]
]
for (name, badge) in badges:
db_records = self.get_db_records(badge)
db_people = db_records.keys()
cert_path = os.path.join(path_to_root, name)
if not os.path.isdir(cert_path):
print("No directory {0}".format(name), file=sys.stderr)
else:
file_people = self.get_file_people(cert_path)
self.missing(
result, "database-disk", name, db_people - file_people, db_records
)
self.missing(
result, "disk-database", name, file_people - db_people, db_records
)
csv.writer(sys.stdout).writerows(result)
def get_badges(self):
"""Get all available badges as list of lower-case name and badge pairs."""
return [(b.name.lower(), b) for b in Badge.objects.all()]
def get_db_records(self, badge):
"""Get set of usernames of all people with the given badge."""
objects = Award.objects.filter(badge=badge).values_list(
"person__username", "awarded", "event__slug", "awarded_by__username"
)
return dict(
(obj[0], {"awarded": obj[1], "event": obj[2], "awarded_by": obj[3]})
for obj in objects
)
def get_file_people(self, path):
"""Get names of all people with the given certificate."""
return set(
[os.path.splitext(e)[0] for e in os.listdir(path) if e.endswith(".pdf")]
)
def missing(self, report, title, kind, usernames, records):
"""Report missing usernames."""
for uid in usernames:
try:
receiver = Person.objects.get(username=uid)
except Person.DoesNotExist:
self.stderr.write("{0} does not exist".format(uid))
else:
name = receiver.full_name
if uid in records:
event = records[uid]["event"]
awarded = records[uid]["awarded"]
username = records[uid]["awarded_by"]
try:
awarded_by = Person.objects.get(username=username).full_name
except Person.DoesNotExist:
self.stderr.write(
"Person with username={0} who awarded {1} "
"does not exist".format(username, uid)
)
else:
report.append(
[
title,
kind,
event,
awarded_by,
uid,
name,
receiver.email,
awarded,
]
)
else:
event, awarded, awarded_by = "", "", ""
report.append(
[
title,
kind,
event,
awarded_by,
uid,
name,
receiver.email,
awarded,
]
)
|
the-stack_0_19390 | from abc import abstractmethod, ABC
from typing import List, Optional, Pattern, Dict
from datetime import datetime
from collections import namedtuple
import regex
from recognizers_text.extractor import Extractor, ExtractResult
from .constants import Constants, TimeTypeConstants
from .extractors import DateTimeExtractor
from .parsers import DateTimeParser, DateTimeParseResult
from .base_date import BaseDateParser
from .base_time import BaseTimeParser
from .base_datetime import BaseDateTimeParser
from .base_holiday import BaseHolidayParser
from .base_dateperiod import BaseDatePeriodParser
from .base_timeperiod import BaseTimePeriodParser
from .base_datetimeperiod import BaseDateTimePeriodParser
from .base_duration import BaseDurationParser
from .base_set import BaseSetParser
from .utilities import Token, merge_all_tokens, RegExpUtility, DateTimeOptions, DateTimeFormatUtil, DateUtils
MatchedIndex = namedtuple('MatchedIndex', ['matched', 'index'])
class MergedExtractorConfiguration:
@property
@abstractmethod
def date_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def time_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def date_time_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def date_period_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def time_period_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def date_time_period_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def holiday_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def duration_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def set_extractor(self) -> DateTimeExtractor:
raise NotImplementedError
@property
@abstractmethod
def integer_extractor(self) -> Extractor:
raise NotImplementedError
@property
@abstractmethod
def after_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def before_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def since_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def from_to_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def single_ambiguous_month_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def preposition_suffix_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def number_ending_pattern(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def filter_word_regex_list(self) -> List[Pattern]:
raise NotImplementedError
class BaseMergedExtractor(DateTimeExtractor):
@property
def extractor_type_name(self) -> str:
return Constants.SYS_DATETIME_MERGED
def __init__(self, config: MergedExtractorConfiguration, options: DateTimeOptions):
self.config = config
self.options = options
def extract(self, source: str, reference: datetime = None) -> List[ExtractResult]:
if reference is None:
reference = datetime.now()
result: List[ExtractResult] = list()
result = self.add_to(result, self.config.date_extractor.extract(source, reference), source)
result = self.add_to(result, self.config.time_extractor.extract(source, reference), source)
result = self.add_to(result, self.config.duration_extractor.extract(source, reference), source)
result = self.add_to(result, self.config.date_period_extractor.extract(source, reference), source)
result = self.add_to(result, self.config.date_time_extractor.extract(source, reference), source)
result = self.add_to(result, self.config.time_period_extractor.extract(source, reference), source)
result = self.add_to(result, self.config.date_time_period_extractor.extract(source, reference), source)
result = self.add_to(result, self.config.set_extractor.extract(source, reference), source)
result = self.add_to(result, self.config.holiday_extractor.extract(source, reference), source)
# this should be at the end since if need the extractor to determine the previous text contains time or not
result = self.add_to(result, self.number_ending_regex_match(source, result), source)
result = self.add_mod(result, source)
# filtering
if self.options & DateTimeOptions.CALENDAR:
result = self.check_calendar_filter_list(result, source)
result = sorted(result, key=lambda x: x.start)
return result
def add_to(self, destination: List[ExtractResult], source: List[ExtractResult], text: str) -> List[ExtractResult]:
for value in source:
if self.options & DateTimeOptions.SKIP_FROM_TO_MERGE and self.should_skip_from_merge(value):
continue
is_found = False
overlap_indexes: List[int] = list()
first_index = -1
for index, dest in enumerate(destination):
if dest.overlap(value):
is_found = True
if dest.cover(value):
if first_index == -1:
first_index = index
overlap_indexes.append(index)
else:
continue
if not is_found:
destination.append(value)
elif overlap_indexes:
temp_dst: List[ExtractResult] = list()
for index, dest in enumerate(destination):
if index not in overlap_indexes:
temp_dst.append(dest)
# insert at the first overlap occurence to keep the order
temp_dst.insert(first_index, value)
destination = temp_dst
return destination
def should_skip_from_merge(self, source: ExtractResult) -> bool:
return regex.search(self.config.from_to_regex, source.text)
def number_ending_regex_match(self, source: str, extract_results: List[ExtractResult]) -> List[ExtractResult]:
tokens: List[Token] = list()
for extract_result in extract_results:
if extract_result.type in [Constants.SYS_DATETIME_TIME, Constants.SYS_DATETIME_DATETIME]:
after_str = source[extract_result.start + extract_result.length:]
match = regex.search(self.config.number_ending_pattern, after_str)
if match:
new_time = RegExpUtility.get_group(match, 'newTime')
num_res = self.config.integer_extractor.extract(new_time)
if not num_res:
continue
start_position = extract_result.start + extract_result.length + match.group().index(new_time)
tokens.append(Token(start_position, start_position + len(new_time)))
return merge_all_tokens(tokens, source, Constants.SYS_DATETIME_TIME)
def add_mod(self, ers: List[ExtractResult], source: str) -> List[ExtractResult]:
return list(map(lambda x: self.add_mod_item(x, source), ers))
def add_mod_item(self, er: ExtractResult, source: str) -> ExtractResult:
before_str = source[0:er.start]
before = self.has_token_index(before_str.strip(), self.config.before_regex)
if before.matched:
mod_len = len(before_str) - before.index
er.length += mod_len
er.start -= mod_len
er.text = source[er.start:er.start + er.length]
after = self.has_token_index(before_str.strip(), self.config.after_regex)
if after.matched:
mod_len = len(before_str) - after.index
er.length += mod_len
er.start -= mod_len
er.text = source[er.start:er.start + er.length]
since = self.has_token_index(before_str.strip(), self.config.since_regex)
if since.matched:
mod_len = len(before_str) - since.index
er.length += mod_len
er.start -= mod_len
er.text = source[er.start:er.start + er.length]
return er
def has_token_index(self, source: str, pattern: Pattern) -> MatchedIndex:
match = regex.search(pattern, source)
if match:
return MatchedIndex(True, match.start())
return MatchedIndex(False, -1)
def check_calendar_filter_list(self, ers: List[ExtractResult], source: str) -> List[ExtractResult]:
for er in reversed(ers):
for pattern in self.config.filter_word_regex_list:
if regex.search(pattern, er.text):
ers.remove(er)
break
return ers
class MergedParserConfiguration(ABC):
@property
@abstractmethod
def before_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def after_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def since_regex(self) -> Pattern:
raise NotImplementedError
@property
@abstractmethod
def date_parser(self) -> BaseDateParser:
raise NotImplementedError
@property
@abstractmethod
def holiday_parser(self) -> BaseHolidayParser:
raise NotImplementedError
@property
@abstractmethod
def time_parser(self) -> BaseTimeParser:
raise NotImplementedError
@property
@abstractmethod
def date_time_parser(self) -> BaseDateTimeParser:
raise NotImplementedError
@property
@abstractmethod
def date_period_parser(self) -> BaseDatePeriodParser:
raise NotImplementedError
@property
@abstractmethod
def time_period_parser(self) -> BaseTimePeriodParser:
raise NotImplementedError
@property
@abstractmethod
def date_time_period_parser(self) -> BaseDateTimePeriodParser:
raise NotImplementedError
@property
@abstractmethod
def duration_parser(self) -> BaseDurationParser:
raise NotImplementedError
@property
@abstractmethod
def set_parser(self) -> BaseSetParser:
raise NotImplementedError
class BaseMergedParser(DateTimeParser):
@property
def parser_type_name(self) -> str:
return Constants.SYS_DATETIME_MERGED
def __init__(self, config: MergedParserConfiguration, options: DateTimeOptions):
self.__date_min_value = DateTimeFormatUtil.format_date(DateUtils.min_value)
self.__date_time_min_value = DateTimeFormatUtil.format_date_time(DateUtils.min_value)
self.config = config
self.options = options
def parse(self, source: ExtractResult, reference: datetime = None) -> Optional[DateTimeParseResult]:
if not reference:
reference = datetime.now()
result = None
has_before = False
has_after = False
has_since = False
mod_str = ''
before_match = self.config.before_regex.match(source.text)
after_match = self.config.after_regex.match(source.text)
since_match = self.config.since_regex.match(source.text)
if before_match:
has_before = True
source.start += before_match.end()
source.length -= before_match.end()
source.text = source.text[before_match.end():]
mod_str = before_match.group()
elif after_match:
has_after = True
source.start += after_match.end()
source.length -= after_match.end()
source.text = source.text[after_match.end():]
mod_str = after_match.group()
elif since_match:
has_since = True
source.start += since_match.end()
source.length -= since_match.end()
source.text = source.text[since_match.end():]
mod_str = since_match.group()
if source.type == Constants.SYS_DATETIME_DATE:
result = self.config.date_parser.parse(source, reference)
if not result.value:
result = self.config.holiday_parser.parse(source, reference)
elif source.type == Constants.SYS_DATETIME_TIME:
result = self.config.time_parser.parse(source, reference)
elif source.type == Constants.SYS_DATETIME_DATETIME:
result = self.config.date_time_parser.parse(source, reference)
elif source.type == Constants.SYS_DATETIME_DATEPERIOD:
result = self.config.date_period_parser.parse(source, reference)
elif source.type == Constants.SYS_DATETIME_TIMEPERIOD:
result = self.config.time_period_parser.parse(source, reference)
elif source.type == Constants.SYS_DATETIME_DATETIMEPERIOD:
result = self.config.date_time_period_parser.parse(source, reference)
elif source.type == Constants.SYS_DATETIME_DURATION:
result = self.config.duration_parser.parse(source, reference)
elif source.type == Constants.SYS_DATETIME_SET:
result = self.config.set_parser.parse(source, reference)
else:
return None
if has_before and result.value:
result.length += len(mod_str)
result.start -= len(mod_str)
result.text = mod_str + result.text
val = result.value
val.mod = TimeTypeConstants.BEFORE_MOD
result.value = val
if has_after and result.value:
result.length += len(mod_str)
result.start -= len(mod_str)
result.text = mod_str + result.text
val = result.value
val.mod = TimeTypeConstants.AFTER_MOD
result.value = val
if has_since and result.value:
result.length += len(mod_str)
result.start -= len(mod_str)
result.text = mod_str + result.text
val = result.value
val.mod = TimeTypeConstants.SINCE_MOD
result.value = val
if self.options & DateTimeOptions.SPLIT_DATE_AND_TIME and result.value and result.value.sub_date_time_entities:
result.value = self._date_time_resolution_for_split(result)
else:
result = self.set_parse_result(result, has_before, has_after, has_since)
return result
def set_parse_result(self, slot: DateTimeParseResult, has_before: bool, has_after: bool, has_since: bool) -> DateTimeParseResult:
slot.value = self._date_time_resolution(slot, has_before, has_after, has_since)
slot.type = f'{self.parser_type_name}.{self._determine_date_time_types(slot.type, has_before, has_after, has_since)}'
return slot
def _get_parse_result(self, extractor_result: Extractor, reference: datetime) -> DateTimeParseResult:
extractor_type = extractor_result.type
if extractor_type == Constants.SYS_DATETIME_DATE:
result = self.config.date_parser.parse(extractor_result, reference)
if not result.value:
result = self.config.holiday_parser.parse(extractor_result, reference)
return result
elif extractor_type == Constants.SYS_DATETIME_TIME:
return self.config.time_parser.parse(extractor_result, reference)
elif extractor_type == Constants.SYS_DATETIME_DATETIME:
return self.config.date_time_parser.parse(extractor_result, reference)
elif extractor_type == Constants.SYS_DATETIME_DATEPERIOD:
return self.config.date_period_parser.parse(extractor_result, reference)
elif extractor_type == Constants.SYS_DATETIME_TIMEPERIOD:
return self.config.time_period_parser.parse(extractor_result, reference)
elif extractor_type == Constants.SYS_DATETIME_DATETIMEPERIOD:
return self.config.date_time_period_parser.parse(extractor_result, reference)
elif extractor_type == Constants.SYS_DATETIME_DURATION:
return self.config.duration_parser.parse(extractor_result, reference)
elif extractor_type == Constants.SYS_DATETIME_SET:
return self.config.set_parser.parse(extractor_result, reference)
else:
return None
def _determine_date_time_types(self, dtype: str, has_before: bool, has_after: bool, has_since: bool) -> str:
if self.options & DateTimeOptions.SPLIT_DATE_AND_TIME:
if dtype == Constants.SYS_DATETIME_DATETIME:
return Constants.SYS_DATETIME_TIME
else:
if has_before or has_after or has_since:
if dtype == Constants.SYS_DATETIME_DATE:
return Constants.SYS_DATETIME_DATEPERIOD
if dtype == Constants.SYS_DATETIME_TIME:
return Constants.SYS_DATETIME_TIMEPERIOD
if dtype == Constants.SYS_DATETIME_DATETIME:
return Constants.SYS_DATETIME_DATETIMEPERIOD
return dtype
def _date_time_resolution_for_split(self, slot: DateTimeParseResult) -> List[DateTimeParseResult]:
results = []
if slot.value.sub_date_time_entities:
sub_entities = slot.value.sub_date_time_entities
for sub_entity in sub_entities:
result = sub_entity
result.start += slot.start
results += self._date_time_resolution_for_split(result)
else:
slot.value = self._date_time_resolution(slot, False, False, False)
slot.type = f'{self.parser_type_name}.{self._determine_date_time_types(slot.type, False, False, False)}'
results.append(slot)
return results
def _date_time_resolution(self, slot: DateTimeParseResult, has_before, has_after, has_since) -> List[Dict[str, str]]:
if not slot:
return None
result = dict()
resolutions = []
dtype = slot.type
output_type = self._determine_date_time_types(dtype, has_before, has_after, has_since)
timex = slot.timex_str
value = slot.value
if not value:
return None
is_lunar = value.is_lunar
mod = value.mod
comment = value.comment
self._add_resolution_fields_any(result, Constants.TimexKey, timex)
self._add_resolution_fields_any(result, Constants.CommentKey, comment)
self._add_resolution_fields_any(result, Constants.ModKey, mod)
self._add_resolution_fields_any(result, Constants.TypeKey, output_type)
self._add_resolution_fields_any(result, Constants.IsLunarKey, str(is_lunar).lower() if is_lunar else '')
future_resolution = value.future_resolution
past_resolution = value.past_resolution
future = self._generate_from_resolution(dtype, future_resolution, mod)
past = self._generate_from_resolution(dtype, past_resolution, mod)
future_values = sorted(future.values())
past_values = sorted(past.values())
intersect_values = [i for i, j in zip(future_values, past_values) if i == j]
if len(intersect_values) == len(past_values) and len(intersect_values) == len(future_values):
if past_values:
self._add_resolution_fields_any(result, Constants.ResolveKey, past)
else:
if past_values:
self._add_resolution_fields_any(result, Constants.ResolveToPastKey, past)
if future_resolution:
self._add_resolution_fields_any(result, Constants.ResolveToFutureKey, future)
if comment == 'ampm':
if 'resolve' in result:
self._resolve_ampm(result, 'resolve')
else:
self._resolve_ampm(result, 'resolveToPast')
self._resolve_ampm(result, 'resolveToFuture')
for value in result.values():
if isinstance(value, dict):
new_values = {}
self._add_resolution_fields(new_values, Constants.TimexKey, timex)
self._add_resolution_fields(new_values, Constants.ModKey, mod)
self._add_resolution_fields(new_values, Constants.TypeKey, output_type)
self._add_resolution_fields(new_values, Constants.IsLunarKey, str(is_lunar).lower() if is_lunar else '')
for inner_key in value:
new_values[inner_key] = value[inner_key]
resolutions.append(new_values)
if not past and not future:
o = {}
o['timex'] = timex
o['type'] = output_type
o['value'] = 'not resolved'
resolutions.append(o)
return {'values': resolutions}
def _add_resolution_fields_any(self, dic: Dict[str, str], key: str, value: object):
if isinstance(value, str):
if value:
dic[key] = value
else:
dic[key] = value
def _add_resolution_fields(self, dic: [str, str], key: str, value: str):
if value:
dic[key] = value
def _generate_from_resolution(self, dtype: str, resolution: Dict[str, str], mod: str) -> Dict[str, str]:
result = {}
if dtype == Constants.SYS_DATETIME_DATETIME:
self.__add_single_date_time_to_resolution(resolution, TimeTypeConstants.DATETIME, mod, result)
elif dtype == Constants.SYS_DATETIME_TIME:
self.__add_single_date_time_to_resolution(resolution, TimeTypeConstants.TIME, mod, result)
elif dtype == Constants.SYS_DATETIME_DATE:
self.__add_single_date_time_to_resolution(resolution, TimeTypeConstants.DATE, mod, result)
elif dtype == Constants.SYS_DATETIME_DURATION:
if TimeTypeConstants.DURATION in resolution:
result[TimeTypeConstants.VALUE] = resolution[TimeTypeConstants.DURATION]
if dtype == Constants.SYS_DATETIME_TIMEPERIOD:
self.__add_period_to_resolution(resolution, TimeTypeConstants.START_TIME, TimeTypeConstants.END_TIME, mod, result)
if dtype == Constants.SYS_DATETIME_DATEPERIOD:
self.__add_period_to_resolution(resolution, TimeTypeConstants.START_DATE, TimeTypeConstants.END_DATE, mod, result)
if dtype == Constants.SYS_DATETIME_DATETIMEPERIOD:
self.__add_period_to_resolution(resolution, TimeTypeConstants.START_DATETIME, TimeTypeConstants.END_DATETIME, mod, result)
return result
def __add_single_date_time_to_resolution(self, resolutions: Dict[str, str], dtype: str, mod: str, result: Dict[str, str]):
key = TimeTypeConstants.VALUE
value = resolutions[dtype]
if not value or self.__date_min_value == value or self.__date_time_min_value == value:
return
if mod:
if mod == TimeTypeConstants.BEFORE_MOD:
key = TimeTypeConstants.END
elif mod == TimeTypeConstants.AFTER_MOD:
key = TimeTypeConstants.START
elif mod == TimeTypeConstants.SINCE_MOD:
key = TimeTypeConstants.START
result[key] = value
def __add_period_to_resolution(self, resolutions: Dict[str, str], start_type: str, end_type: str, mod: str, result: Dict[str, str]):
start = resolutions.get(start_type, None)
end = resolutions.get(end_type, None)
if mod:
if mod == TimeTypeConstants.BEFORE_MOD:
result[TimeTypeConstants.END] = start
return
if mod == TimeTypeConstants.AFTER_MOD:
result[TimeTypeConstants.START] = end
return
if mod == TimeTypeConstants.SINCE_MOD:
result[TimeTypeConstants.START] = start
return
if not (start and end):
return
result[TimeTypeConstants.START] = start
result[TimeTypeConstants.END] = end
def _resolve_ampm(self, values_map: Dict[str, str], keyname: str):
if not keyname in values_map:
return
resolution = values_map[keyname]
if not 'timex' in values_map:
return
timex = values_map['timex']
values_map.pop(keyname, None)
values_map[keyname + 'Am'] = resolution
resolution_pm = {}
if values_map['type'] == Constants.SYS_DATETIME_TIME:
resolution_pm[TimeTypeConstants.VALUE] = DateTimeFormatUtil.to_pm(resolution[TimeTypeConstants.VALUE])
resolution_pm['timex'] = DateTimeFormatUtil.to_pm(timex)
elif values_map['type'] == Constants.SYS_DATETIME_DATETIME:
split_value = resolution[TimeTypeConstants.VALUE].split(' ')
resolution_pm[TimeTypeConstants.VALUE] = f'{split_value[0]} {DateTimeFormatUtil.to_pm(split_value[1])}'
resolution_pm['timex'] = DateTimeFormatUtil.all_str_to_pm(timex)
elif values_map['type'] == Constants.SYS_DATETIME_TIMEPERIOD:
if TimeTypeConstants.START in resolution:
resolution_pm[TimeTypeConstants.START] = DateTimeFormatUtil.to_pm(resolution[TimeTypeConstants.START])
if TimeTypeConstants.END in resolution:
resolution_pm[TimeTypeConstants.END] = DateTimeFormatUtil.to_pm(resolution[TimeTypeConstants.END])
resolution_pm['timex'] = DateTimeFormatUtil.all_str_to_pm(timex)
elif values_map['type'] == Constants.SYS_DATETIME_DATETIMEPERIOD:
if TimeTypeConstants.START in resolution:
split_value = resolution[TimeTypeConstants.START].split(' ')
resolution_pm[TimeTypeConstants.START] = f'{split_value[0]} {DateTimeFormatUtil.to_pm(split_value[1])}'
if TimeTypeConstants.END in resolution:
split_value = resolution[TimeTypeConstants.END].split(' ')
resolution_pm[TimeTypeConstants.END] = f'{split_value[0]} {DateTimeFormatUtil.to_pm(split_value[1])}'
resolution_pm['timex'] = DateTimeFormatUtil.all_str_to_pm(timex)
values_map[keyname + 'Pm'] = resolution_pm
|
the-stack_0_19392 | """Selector event loop for Unix with signal handling."""
import errno
import io
import os
import selectors
import signal
import socket
import stat
import subprocess
import sys
import threading
import warnings
from . import base_events
from . import base_subprocess
from . import constants
from . import coroutines
from . import events
from . import exceptions
from . import futures
from . import selector_events
from . import tasks
from . import transports
from .log import logger
__all__ = (
'SelectorEventLoop',
'AbstractChildWatcher', 'SafeChildWatcher',
'FastChildWatcher', 'DefaultEventLoopPolicy',
)
if sys.platform == 'win32': # pragma: no cover
raise ImportError('Signals are not really supported on Windows')
def _sighandler_noop(signum, frame):
"""Dummy signal handler."""
pass
class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop):
"""Unix event loop.
Adds signal handling and UNIX Domain Socket support to SelectorEventLoop.
"""
def __init__(self, selector=None):
super().__init__(selector)
self._signal_handlers = {}
def close(self):
super().close()
if not sys.is_finalizing():
for sig in list(self._signal_handlers):
self.remove_signal_handler(sig)
else:
if self._signal_handlers:
warnings.warn(f"Closing the loop {self!r} "
f"on interpreter shutdown "
f"stage, skipping signal handlers removal",
ResourceWarning,
source=self)
self._signal_handlers.clear()
def _process_self_data(self, data):
for signum in data:
if not signum:
# ignore null bytes written by _write_to_self()
continue
self._handle_signal(signum)
def add_signal_handler(self, sig, callback, *args):
"""Add a handler for a signal. UNIX only.
Raise ValueError if the signal number is invalid or uncatchable.
Raise RuntimeError if there is a problem setting up the handler.
"""
if (coroutines.iscoroutine(callback) or
coroutines.iscoroutinefunction(callback)):
raise TypeError("coroutines cannot be used "
"with add_signal_handler()")
self._check_signal(sig)
self._check_closed()
try:
# set_wakeup_fd() raises ValueError if this is not the
# main thread. By calling it early we ensure that an
# event loop running in another thread cannot add a signal
# handler.
signal.set_wakeup_fd(self._csock.fileno())
except (ValueError, OSError) as exc:
raise RuntimeError(str(exc))
handle = events.Handle(callback, args, self, None)
self._signal_handlers[sig] = handle
try:
# Register a dummy signal handler to ask Python to write the signal
# number in the wakup file descriptor. _process_self_data() will
# read signal numbers from this file descriptor to handle signals.
signal.signal(sig, _sighandler_noop)
# Set SA_RESTART to limit EINTR occurrences.
signal.siginterrupt(sig, False)
except OSError as exc:
del self._signal_handlers[sig]
if not self._signal_handlers:
try:
signal.set_wakeup_fd(-1)
except (ValueError, OSError) as nexc:
logger.info('set_wakeup_fd(-1) failed: %s', nexc)
if exc.errno == errno.EINVAL:
raise RuntimeError(f'sig {sig} cannot be caught')
else:
raise
def _handle_signal(self, sig):
"""Internal helper that is the actual signal handler."""
handle = self._signal_handlers.get(sig)
if handle is None:
return # Assume it's some race condition.
if handle._cancelled:
self.remove_signal_handler(sig) # Remove it properly.
else:
self._add_callback_signalsafe(handle)
def remove_signal_handler(self, sig):
"""Remove a handler for a signal. UNIX only.
Return True if a signal handler was removed, False if not.
"""
self._check_signal(sig)
try:
del self._signal_handlers[sig]
except KeyError:
return False
if sig == signal.SIGINT:
handler = signal.default_int_handler
else:
handler = signal.SIG_DFL
try:
signal.signal(sig, handler)
except OSError as exc:
if exc.errno == errno.EINVAL:
raise RuntimeError(f'sig {sig} cannot be caught')
else:
raise
if not self._signal_handlers:
try:
signal.set_wakeup_fd(-1)
except (ValueError, OSError) as exc:
logger.info('set_wakeup_fd(-1) failed: %s', exc)
return True
def _check_signal(self, sig):
"""Internal helper to validate a signal.
Raise ValueError if the signal number is invalid or uncatchable.
Raise RuntimeError if there is a problem setting up the handler.
"""
if not isinstance(sig, int):
raise TypeError(f'sig must be an int, not {sig!r}')
if sig not in signal.valid_signals():
raise ValueError(f'invalid signal number {sig}')
def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
return _UnixReadPipeTransport(self, pipe, protocol, waiter, extra)
def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
extra=None):
return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra)
async def _make_subprocess_transport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
extra=None, **kwargs):
with events.get_child_watcher() as watcher:
waiter = self.create_future()
transp = _UnixSubprocessTransport(self, protocol, args, shell,
stdin, stdout, stderr, bufsize,
waiter=waiter, extra=extra,
**kwargs)
watcher.add_child_handler(transp.get_pid(),
self._child_watcher_callback, transp)
try:
await waiter
except Exception:
transp.close()
await transp._wait()
raise
return transp
def _child_watcher_callback(self, pid, returncode, transp):
self.call_soon_threadsafe(transp._process_exited, returncode)
async def create_unix_connection(
self, protocol_factory, path=None, *,
ssl=None, sock=None,
server_hostname=None,
ssl_handshake_timeout=None):
assert server_hostname is None or isinstance(server_hostname, str)
if ssl:
if server_hostname is None:
raise ValueError(
'you have to pass server_hostname when using ssl')
else:
if server_hostname is not None:
raise ValueError('server_hostname is only meaningful with ssl')
if ssl_handshake_timeout is not None:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if path is not None:
if sock is not None:
raise ValueError(
'path and sock can not be specified at the same time')
path = os.fspath(path)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0)
try:
sock.setblocking(False)
await self.sock_connect(sock, path)
except:
sock.close()
raise
else:
if sock is None:
raise ValueError('no path and sock were specified')
if (sock.family != socket.AF_UNIX or
sock.type != socket.SOCK_STREAM):
raise ValueError(
f'A UNIX Domain Stream Socket was expected, got {sock!r}')
sock.setblocking(False)
transport, protocol = await self._create_connection_transport(
sock, protocol_factory, ssl, server_hostname,
ssl_handshake_timeout=ssl_handshake_timeout)
return transport, protocol
async def create_unix_server(
self, protocol_factory, path=None, *,
sock=None, backlog=100, ssl=None,
ssl_handshake_timeout=None,
start_serving=True):
if isinstance(ssl, bool):
raise TypeError('ssl argument must be an SSLContext or None')
if ssl_handshake_timeout is not None and not ssl:
raise ValueError(
'ssl_handshake_timeout is only meaningful with ssl')
if path is not None:
if sock is not None:
raise ValueError(
'path and sock can not be specified at the same time')
path = os.fspath(path)
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# Check for abstract socket. `str` and `bytes` paths are supported.
if path[0] not in (0, '\x00'):
try:
if stat.S_ISSOCK(os.stat(path).st_mode):
os.remove(path)
except FileNotFoundError:
pass
except OSError as err:
# Directory may have permissions only to create socket.
logger.error('Unable to check or remove stale UNIX socket '
'%r: %r', path, err)
try:
sock.bind(path)
except OSError as exc:
sock.close()
if exc.errno == errno.EADDRINUSE:
# Let's improve the error message by adding
# with what exact address it occurs.
msg = f'Address {path!r} is already in use'
raise OSError(errno.EADDRINUSE, msg) from None
else:
raise
except:
sock.close()
raise
else:
if sock is None:
raise ValueError(
'path was not specified, and no sock specified')
if (sock.family != socket.AF_UNIX or
sock.type != socket.SOCK_STREAM):
raise ValueError(
f'A UNIX Domain Stream Socket was expected, got {sock!r}')
sock.setblocking(False)
server = base_events.Server(self, [sock], protocol_factory,
ssl, backlog, ssl_handshake_timeout)
if start_serving:
server._start_serving()
# Skip one loop iteration so that all 'loop.add_reader'
# go through.
await tasks.sleep(0, loop=self)
return server
async def _sock_sendfile_native(self, sock, file, offset, count):
try:
os.sendfile
except AttributeError as exc:
raise exceptions.SendfileNotAvailableError(
"os.sendfile() is not available")
try:
fileno = file.fileno()
except (AttributeError, io.UnsupportedOperation) as err:
raise exceptions.SendfileNotAvailableError("not a regular file")
try:
fsize = os.fstat(fileno).st_size
except OSError as err:
raise exceptions.SendfileNotAvailableError("not a regular file")
blocksize = count if count else fsize
if not blocksize:
return 0 # empty file
fut = self.create_future()
self._sock_sendfile_native_impl(fut, None, sock, fileno,
offset, count, blocksize, 0)
return await fut
def _sock_sendfile_native_impl(self, fut, registered_fd, sock, fileno,
offset, count, blocksize, total_sent):
fd = sock.fileno()
if registered_fd is not None:
# Remove the callback early. It should be rare that the
# selector says the fd is ready but the call still returns
# EAGAIN, and I am willing to take a hit in that case in
# order to simplify the common case.
self.remove_writer(registered_fd)
if fut.cancelled():
self._sock_sendfile_update_filepos(fileno, offset, total_sent)
return
if count:
blocksize = count - total_sent
if blocksize <= 0:
self._sock_sendfile_update_filepos(fileno, offset, total_sent)
fut.set_result(total_sent)
return
try:
sent = os.sendfile(fd, fileno, offset, blocksize)
except (BlockingIOError, InterruptedError):
if registered_fd is None:
self._sock_add_cancellation_callback(fut, sock)
self.add_writer(fd, self._sock_sendfile_native_impl, fut,
fd, sock, fileno,
offset, count, blocksize, total_sent)
except OSError as exc:
if (registered_fd is not None and
exc.errno == errno.ENOTCONN and
type(exc) is not ConnectionError):
# If we have an ENOTCONN and this isn't a first call to
# sendfile(), i.e. the connection was closed in the middle
# of the operation, normalize the error to ConnectionError
# to make it consistent across all Posix systems.
new_exc = ConnectionError(
"socket is not connected", errno.ENOTCONN)
new_exc.__cause__ = exc
exc = new_exc
if total_sent == 0:
# We can get here for different reasons, the main
# one being 'file' is not a regular mmap(2)-like
# file, in which case we'll fall back on using
# plain send().
err = exceptions.SendfileNotAvailableError(
"os.sendfile call failed")
self._sock_sendfile_update_filepos(fileno, offset, total_sent)
fut.set_exception(err)
else:
self._sock_sendfile_update_filepos(fileno, offset, total_sent)
fut.set_exception(exc)
except Exception as exc:
self._sock_sendfile_update_filepos(fileno, offset, total_sent)
fut.set_exception(exc)
else:
if sent == 0:
# EOF
self._sock_sendfile_update_filepos(fileno, offset, total_sent)
fut.set_result(total_sent)
else:
offset += sent
total_sent += sent
if registered_fd is None:
self._sock_add_cancellation_callback(fut, sock)
self.add_writer(fd, self._sock_sendfile_native_impl, fut,
fd, sock, fileno,
offset, count, blocksize, total_sent)
def _sock_sendfile_update_filepos(self, fileno, offset, total_sent):
if total_sent > 0:
os.lseek(fileno, offset, os.SEEK_SET)
def _sock_add_cancellation_callback(self, fut, sock):
def cb(fut):
if fut.cancelled():
fd = sock.fileno()
if fd != -1:
self.remove_writer(fd)
fut.add_done_callback(cb)
class _UnixReadPipeTransport(transports.ReadTransport):
max_size = 256 * 1024 # max bytes we read in one event loop iteration
def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
super().__init__(extra)
self._extra['pipe'] = pipe
self._loop = loop
self._pipe = pipe
self._fileno = pipe.fileno()
self._protocol = protocol
self._closing = False
mode = os.fstat(self._fileno).st_mode
if not (stat.S_ISFIFO(mode) or
stat.S_ISSOCK(mode) or
stat.S_ISCHR(mode)):
self._pipe = None
self._fileno = None
self._protocol = None
raise ValueError("Pipe transport is for pipes/sockets only.")
os.set_blocking(self._fileno, False)
self._loop.call_soon(self._protocol.connection_made, self)
# only start reading when connection_made() has been called
self._loop.call_soon(self._loop._add_reader,
self._fileno, self._read_ready)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
self._loop.call_soon(futures._set_result_unless_cancelled,
waiter, None)
def __repr__(self):
info = [self.__class__.__name__]
if self._pipe is None:
info.append('closed')
elif self._closing:
info.append('closing')
info.append(f'fd={self._fileno}')
selector = getattr(self._loop, '_selector', None)
if self._pipe is not None and selector is not None:
polling = selector_events._test_selector_event(
selector, self._fileno, selectors.EVENT_READ)
if polling:
info.append('polling')
else:
info.append('idle')
elif self._pipe is not None:
info.append('open')
else:
info.append('closed')
return '<{}>'.format(' '.join(info))
def _read_ready(self):
try:
data = os.read(self._fileno, self.max_size)
except (BlockingIOError, InterruptedError):
pass
except OSError as exc:
self._fatal_error(exc, 'Fatal read error on pipe transport')
else:
if data:
self._protocol.data_received(data)
else:
if self._loop.get_debug():
logger.info("%r was closed by peer", self)
self._closing = True
self._loop._remove_reader(self._fileno)
self._loop.call_soon(self._protocol.eof_received)
self._loop.call_soon(self._call_connection_lost, None)
def pause_reading(self):
self._loop._remove_reader(self._fileno)
def resume_reading(self):
self._loop._add_reader(self._fileno, self._read_ready)
def set_protocol(self, protocol):
self._protocol = protocol
def get_protocol(self):
return self._protocol
def is_closing(self):
return self._closing
def close(self):
if not self._closing:
self._close(None)
def __del__(self):
if self._pipe is not None:
warnings.warn(f"unclosed transport {self!r}", ResourceWarning,
source=self)
self._pipe.close()
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
# should be called by exception handler only
if (isinstance(exc, OSError) and exc.errno == errno.EIO):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
self._close(exc)
def _close(self, exc):
self._closing = True
self._loop._remove_reader(self._fileno)
self._loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
try:
self._protocol.connection_lost(exc)
finally:
self._pipe.close()
self._pipe = None
self._protocol = None
self._loop = None
class _UnixWritePipeTransport(transports._FlowControlMixin,
transports.WriteTransport):
def __init__(self, loop, pipe, protocol, waiter=None, extra=None):
super().__init__(extra, loop)
self._extra['pipe'] = pipe
self._pipe = pipe
self._fileno = pipe.fileno()
self._protocol = protocol
self._buffer = bytearray()
self._conn_lost = 0
self._closing = False # Set when close() or write_eof() called.
mode = os.fstat(self._fileno).st_mode
is_char = stat.S_ISCHR(mode)
is_fifo = stat.S_ISFIFO(mode)
is_socket = stat.S_ISSOCK(mode)
if not (is_char or is_fifo or is_socket):
self._pipe = None
self._fileno = None
self._protocol = None
raise ValueError("Pipe transport is only for "
"pipes, sockets and character devices")
os.set_blocking(self._fileno, False)
self._loop.call_soon(self._protocol.connection_made, self)
# On AIX, the reader trick (to be notified when the read end of the
# socket is closed) only works for sockets. On other platforms it
# works for pipes and sockets. (Exception: OS X 10.4? Issue #19294.)
if is_socket or (is_fifo and not sys.platform.startswith("aix")):
# only start reading when connection_made() has been called
self._loop.call_soon(self._loop._add_reader,
self._fileno, self._read_ready)
if waiter is not None:
# only wake up the waiter when connection_made() has been called
self._loop.call_soon(futures._set_result_unless_cancelled,
waiter, None)
def __repr__(self):
info = [self.__class__.__name__]
if self._pipe is None:
info.append('closed')
elif self._closing:
info.append('closing')
info.append(f'fd={self._fileno}')
selector = getattr(self._loop, '_selector', None)
if self._pipe is not None and selector is not None:
polling = selector_events._test_selector_event(
selector, self._fileno, selectors.EVENT_WRITE)
if polling:
info.append('polling')
else:
info.append('idle')
bufsize = self.get_write_buffer_size()
info.append(f'bufsize={bufsize}')
elif self._pipe is not None:
info.append('open')
else:
info.append('closed')
return '<{}>'.format(' '.join(info))
def get_write_buffer_size(self):
return len(self._buffer)
def _read_ready(self):
# Pipe was closed by peer.
if self._loop.get_debug():
logger.info("%r was closed by peer", self)
if self._buffer:
self._close(BrokenPipeError())
else:
self._close()
def write(self, data):
assert isinstance(data, (bytes, bytearray, memoryview)), repr(data)
if isinstance(data, bytearray):
data = memoryview(data)
if not data:
return
if self._conn_lost or self._closing:
if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES:
logger.warning('pipe closed by peer or '
'os.write(pipe, data) raised exception.')
self._conn_lost += 1
return
if not self._buffer:
# Attempt to send it right away first.
try:
n = os.write(self._fileno, data)
except (BlockingIOError, InterruptedError):
n = 0
except Exception as exc:
self._conn_lost += 1
self._fatal_error(exc, 'Fatal write error on pipe transport')
return
if n == len(data):
return
elif n > 0:
data = memoryview(data)[n:]
self._loop._add_writer(self._fileno, self._write_ready)
self._buffer += data
self._maybe_pause_protocol()
def _write_ready(self):
assert self._buffer, 'Data should not be empty'
try:
n = os.write(self._fileno, self._buffer)
except (BlockingIOError, InterruptedError):
pass
except Exception as exc:
self._buffer.clear()
self._conn_lost += 1
# Remove writer here, _fatal_error() doesn't it
# because _buffer is empty.
self._loop._remove_writer(self._fileno)
self._fatal_error(exc, 'Fatal write error on pipe transport')
else:
if n == len(self._buffer):
self._buffer.clear()
self._loop._remove_writer(self._fileno)
self._maybe_resume_protocol() # May append to buffer.
if self._closing:
self._loop._remove_reader(self._fileno)
self._call_connection_lost(None)
return
elif n > 0:
del self._buffer[:n]
def can_write_eof(self):
return True
def write_eof(self):
if self._closing:
return
assert self._pipe
self._closing = True
if not self._buffer:
self._loop._remove_reader(self._fileno)
self._loop.call_soon(self._call_connection_lost, None)
def set_protocol(self, protocol):
self._protocol = protocol
def get_protocol(self):
return self._protocol
def is_closing(self):
return self._closing
def close(self):
if self._pipe is not None and not self._closing:
# write_eof is all what we needed to close the write pipe
self.write_eof()
def __del__(self):
if self._pipe is not None:
warnings.warn(f"unclosed transport {self!r}", ResourceWarning,
source=self)
self._pipe.close()
def abort(self):
self._close(None)
def _fatal_error(self, exc, message='Fatal error on pipe transport'):
# should be called by exception handler only
if isinstance(exc, base_events._FATAL_ERROR_IGNORE):
if self._loop.get_debug():
logger.debug("%r: %s", self, message, exc_info=True)
else:
self._loop.call_exception_handler({
'message': message,
'exception': exc,
'transport': self,
'protocol': self._protocol,
})
self._close(exc)
def _close(self, exc=None):
self._closing = True
if self._buffer:
self._loop._remove_writer(self._fileno)
self._buffer.clear()
self._loop._remove_reader(self._fileno)
self._loop.call_soon(self._call_connection_lost, exc)
def _call_connection_lost(self, exc):
try:
self._protocol.connection_lost(exc)
finally:
self._pipe.close()
self._pipe = None
self._protocol = None
self._loop = None
class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport):
def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
stdin_w = None
if stdin == subprocess.PIPE:
# Use a socket pair for stdin, since not all platforms
# support selecting read events on the write end of a
# socket (which we use in order to detect closing of the
# other end). Notably this is needed on AIX, and works
# just fine on other platforms.
stdin, stdin_w = socket.socketpair()
self._proc = subprocess.Popen(
args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr,
universal_newlines=False, bufsize=bufsize, **kwargs)
if stdin_w is not None:
stdin.close()
self._proc.stdin = open(stdin_w.detach(), 'wb', buffering=bufsize)
class AbstractChildWatcher:
"""Abstract base class for monitoring child processes.
Objects derived from this class monitor a collection of subprocesses and
report their termination or interruption by a signal.
New callbacks are registered with .add_child_handler(). Starting a new
process must be done within a 'with' block to allow the watcher to suspend
its activity until the new process if fully registered (this is needed to
prevent a race condition in some implementations).
Example:
with watcher:
proc = subprocess.Popen("sleep 1")
watcher.add_child_handler(proc.pid, callback)
Notes:
Implementations of this class must be thread-safe.
Since child watcher objects may catch the SIGCHLD signal and call
waitpid(-1), there should be only one active object per process.
"""
def add_child_handler(self, pid, callback, *args):
"""Register a new child handler.
Arrange for callback(pid, returncode, *args) to be called when
process 'pid' terminates. Specifying another callback for the same
process replaces the previous handler.
Note: callback() must be thread-safe.
"""
raise NotImplementedError()
def remove_child_handler(self, pid):
"""Removes the handler for process 'pid'.
The function returns True if the handler was successfully removed,
False if there was nothing to remove."""
raise NotImplementedError()
def attach_loop(self, loop):
"""Attach the watcher to an event loop.
If the watcher was previously attached to an event loop, then it is
first detached before attaching to the new loop.
Note: loop may be None.
"""
raise NotImplementedError()
def close(self):
"""Close the watcher.
This must be called to make sure that any underlying resource is freed.
"""
raise NotImplementedError()
def __enter__(self):
"""Enter the watcher's context and allow starting new processes
This function must return self"""
raise NotImplementedError()
def __exit__(self, a, b, c):
"""Exit the watcher's context"""
raise NotImplementedError()
class BaseChildWatcher(AbstractChildWatcher):
def __init__(self):
self._loop = None
self._callbacks = {}
def close(self):
self.attach_loop(None)
def _do_waitpid(self, expected_pid):
raise NotImplementedError()
def _do_waitpid_all(self):
raise NotImplementedError()
def attach_loop(self, loop):
assert loop is None or isinstance(loop, events.AbstractEventLoop)
if self._loop is not None and loop is None and self._callbacks:
warnings.warn(
'A loop is being detached '
'from a child watcher with pending handlers',
RuntimeWarning)
if self._loop is not None:
self._loop.remove_signal_handler(signal.SIGCHLD)
self._loop = loop
if loop is not None:
loop.add_signal_handler(signal.SIGCHLD, self._sig_chld)
# Prevent a race condition in case a child terminated
# during the switch.
self._do_waitpid_all()
def _sig_chld(self):
try:
self._do_waitpid_all()
except Exception as exc:
# self._loop should always be available here
# as '_sig_chld' is added as a signal handler
# in 'attach_loop'
self._loop.call_exception_handler({
'message': 'Unknown exception in SIGCHLD handler',
'exception': exc,
})
def _compute_returncode(self, status):
if os.WIFSIGNALED(status):
# The child process died because of a signal.
return -os.WTERMSIG(status)
elif os.WIFEXITED(status):
# The child process exited (e.g sys.exit()).
return os.WEXITSTATUS(status)
else:
# The child exited, but we don't understand its status.
# This shouldn't happen, but if it does, let's just
# return that status; perhaps that helps debug it.
return status
class SafeChildWatcher(BaseChildWatcher):
"""'Safe' child watcher implementation.
This implementation avoids disrupting other code spawning processes by
polling explicitly each process in the SIGCHLD handler instead of calling
os.waitpid(-1).
This is a safe solution but it has a significant overhead when handling a
big number of children (O(n) each time SIGCHLD is raised)
"""
def close(self):
self._callbacks.clear()
super().close()
def __enter__(self):
return self
def __exit__(self, a, b, c):
pass
def add_child_handler(self, pid, callback, *args):
if self._loop is None:
raise RuntimeError(
"Cannot add child handler, "
"the child watcher does not have a loop attached")
self._callbacks[pid] = (callback, args)
# Prevent a race condition in case the child is already terminated.
self._do_waitpid(pid)
def remove_child_handler(self, pid):
try:
del self._callbacks[pid]
return True
except KeyError:
return False
def _do_waitpid_all(self):
for pid in list(self._callbacks):
self._do_waitpid(pid)
def _do_waitpid(self, expected_pid):
assert expected_pid > 0
try:
pid, status = os.waitpid(expected_pid, os.WNOHANG)
except ChildProcessError:
# The child process is already reaped
# (may happen if waitpid() is called elsewhere).
pid = expected_pid
returncode = 255
logger.warning(
"Unknown child process pid %d, will report returncode 255",
pid)
else:
if pid == 0:
# The child process is still alive.
return
returncode = self._compute_returncode(status)
if self._loop.get_debug():
logger.debug('process %s exited with returncode %s',
expected_pid, returncode)
try:
callback, args = self._callbacks.pop(pid)
except KeyError: # pragma: no cover
# May happen if .remove_child_handler() is called
# after os.waitpid() returns.
if self._loop.get_debug():
logger.warning("Child watcher got an unexpected pid: %r",
pid, exc_info=True)
else:
callback(pid, returncode, *args)
class FastChildWatcher(BaseChildWatcher):
"""'Fast' child watcher implementation.
This implementation reaps every terminated processes by calling
os.waitpid(-1) directly, possibly breaking other code spawning processes
and waiting for their termination.
There is no noticeable overhead when handling a big number of children
(O(1) each time a child terminates).
"""
def __init__(self):
super().__init__()
self._lock = threading.Lock()
self._zombies = {}
self._forks = 0
def close(self):
self._callbacks.clear()
self._zombies.clear()
super().close()
def __enter__(self):
with self._lock:
self._forks += 1
return self
def __exit__(self, a, b, c):
with self._lock:
self._forks -= 1
if self._forks or not self._zombies:
return
collateral_victims = str(self._zombies)
self._zombies.clear()
logger.warning(
"Caught subprocesses termination from unknown pids: %s",
collateral_victims)
def add_child_handler(self, pid, callback, *args):
assert self._forks, "Must use the context manager"
if self._loop is None:
raise RuntimeError(
"Cannot add child handler, "
"the child watcher does not have a loop attached")
with self._lock:
try:
returncode = self._zombies.pop(pid)
except KeyError:
# The child is running.
self._callbacks[pid] = callback, args
return
# The child is dead already. We can fire the callback.
callback(pid, returncode, *args)
def remove_child_handler(self, pid):
try:
del self._callbacks[pid]
return True
except KeyError:
return False
def _do_waitpid_all(self):
# Because of signal coalescing, we must keep calling waitpid() as
# long as we're able to reap a child.
while True:
try:
pid, status = os.waitpid(-1, os.WNOHANG)
except ChildProcessError:
# No more child processes exist.
return
else:
if pid == 0:
# A child process is still alive.
return
returncode = self._compute_returncode(status)
with self._lock:
try:
callback, args = self._callbacks.pop(pid)
except KeyError:
# unknown child
if self._forks:
# It may not be registered yet.
self._zombies[pid] = returncode
if self._loop.get_debug():
logger.debug('unknown process %s exited '
'with returncode %s',
pid, returncode)
continue
callback = None
else:
if self._loop.get_debug():
logger.debug('process %s exited with returncode %s',
pid, returncode)
if callback is None:
logger.warning(
"Caught subprocess termination from unknown pid: "
"%d -> %d", pid, returncode)
else:
callback(pid, returncode, *args)
class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy):
"""UNIX event loop policy with a watcher for child processes."""
_loop_factory = _UnixSelectorEventLoop
def __init__(self):
super().__init__()
self._watcher = None
def _init_watcher(self):
with events._lock:
if self._watcher is None: # pragma: no branch
self._watcher = SafeChildWatcher()
if isinstance(threading.current_thread(),
threading._MainThread):
self._watcher.attach_loop(self._local._loop)
def set_event_loop(self, loop):
"""Set the event loop.
As a side effect, if a child watcher was set before, then calling
.set_event_loop() from the main thread will call .attach_loop(loop) on
the child watcher.
"""
super().set_event_loop(loop)
if (self._watcher is not None and
isinstance(threading.current_thread(), threading._MainThread)):
self._watcher.attach_loop(loop)
def get_child_watcher(self):
"""Get the watcher for child processes.
If not yet set, a SafeChildWatcher object is automatically created.
"""
if self._watcher is None:
self._init_watcher()
return self._watcher
def set_child_watcher(self, watcher):
"""Set the watcher for child processes."""
assert watcher is None or isinstance(watcher, AbstractChildWatcher)
if self._watcher is not None:
self._watcher.close()
self._watcher = watcher
SelectorEventLoop = _UnixSelectorEventLoop
DefaultEventLoopPolicy = _UnixDefaultEventLoopPolicy
|
the-stack_0_19393 | import unittest
from tests.test_undos.test_coreutils import common
from undo import expand, resolve
class TestLink(unittest.TestCase):
def test_link(self):
command = "link SOURCE DEST"
expected = ["rm DEST"]
actual = [expand.expand(undo, env, ("%", "%"), "; ")
for env, undo in
resolve.resolve(command, [common.COREUTILS_UNDO_DIR], False, False, "sh")]
self.assertListEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_19394 | import torch
import torch.nn as nn
from torch.utils.model_zoo import load_url as load_state_dict_from_url
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
}
def _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs):
if pretrained:
kwargs['init_weights'] = False
model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
return model
def vgg11(pretrained=False, progress=True, **kwargs):
r"""VGG 11-layer model (configuration "A") from
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)
def vgg11_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 11-layer model (configuration "A") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs)
def vgg13(pretrained=False, progress=True, **kwargs):
r"""VGG 13-layer model (configuration "B")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs)
def vgg13_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 13-layer model (configuration "B") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs)
def vgg16(pretrained=False, progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)
def vgg16_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs)
def vgg19(pretrained=False, progress=True, **kwargs):
r"""VGG 19-layer model (configuration "E")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs)
def vgg19_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 19-layer model (configuration 'E') with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs)
|
the-stack_0_19397 | # NEON AI (TM) SOFTWARE, Software Development Kit & Application Development System
# All trademark and other rights reserved by their respective owners
# Copyright 2008-2021 Neongecko.com Inc.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
# and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from setuptools import setup, find_packages
from os import getenv, path
def get_requirements(requirements_filename: str):
requirements_file = path.join(path.abspath(path.dirname(__file__)), "requirements", requirements_filename)
with open(requirements_file, 'r', encoding='utf-8') as r:
requirements = r.readlines()
requirements = [r.strip() for r in requirements if r.strip() and not r.strip().startswith("#")]
for i in range(0, len(requirements)):
r = requirements[i]
if "@" in r:
parts = [p.lower() if p.strip().startswith("git+http") else p for p in r.split('@')]
r = "@".join(parts)
if getenv("GITHUB_TOKEN"):
if "github.com" in r:
r = r.replace("github.com", f"{getenv('GITHUB_TOKEN')}@github.com")
requirements[i] = r
return requirements
with open("README.md", "r") as f:
long_description = f.read()
with open("./version.py", "r", encoding="utf-8") as v:
for line in v.readlines():
if line.startswith("__version__"):
if '"' in line:
version = line.split('"')[1]
else:
version = line.split("'")[1]
setup(
name='neon-audio',
version=version,
description=long_description,
url='https://github.com/NeonGeckoCom/neon_audio',
author='Neongecko',
author_email='[email protected]',
license='NeonAI License v1.0',
packages=find_packages(),
install_requires=get_requirements("requirements.txt"),
zip_safe=True,
classifiers=[
'Intended Audience :: Developers',
'Programming Language :: Python :: 3.6',
],
entry_points={
'console_scripts': [
'neon_audio_client=neon_audio.__main__:main'
]
}
)
|
the-stack_0_19401 | import torch as th
import torch.nn as nn
import numpy as np
from utilities.util import select_action
from models.model import Model
from critics.mlp_critic import MLPCritic
class SQDDPG(Model):
def __init__(self, args, target_net=None):
super(SQDDPG, self).__init__(args)
self.construct_model()
self.apply(self.init_weights)
if target_net != None:
self.target_net = target_net
self.reload_params_to_target()
self.sample_size = self.args.sample_size
self.batchnorm = nn.BatchNorm1d(self.args.agent_num).to(self.device)
def construct_value_net(self):
if self.args.agent_id:
input_shape = (self.obs_dim + self.act_dim) * self.n_ + self.n_
else:
input_shape = (self.obs_dim + self.act_dim) * self.n_
output_shape = 1
if self.args.shared_params:
self.value_dicts = nn.ModuleList( [ MLPCritic(input_shape, output_shape, self.args) ] )
else:
self.value_dicts = nn.ModuleList( [ MLPCritic(input_shape, output_shape, self.args) for _ in range(self.n_) ] )
def construct_model(self):
self.construct_value_net()
self.construct_policy_net()
def sample_grandcoalitions(self, batch_size):
seq_set = th.tril(th.ones(self.n_, self.n_), diagonal=0, out=None).to(self.device)
grand_coalitions = th.multinomial(th.ones(batch_size*self.sample_size, self.n_)/self.n_, self.n_, replacement=False).to(self.device)
individual_map = th.zeros(batch_size*self.sample_size*self.n_, self.n_).to(self.device)
individual_map.scatter_(1, grand_coalitions.contiguous().view(-1, 1), 1)
individual_map = individual_map.contiguous().view(batch_size, self.sample_size, self.n_, self.n_)
subcoalition_map = th.matmul(individual_map, seq_set)
grand_coalitions = grand_coalitions.unsqueeze(1).expand(batch_size*self.sample_size, self.n_, self.n_).contiguous().view(batch_size, self.sample_size, self.n_, self.n_) # shape = (b, n_s, n, n)
return subcoalition_map, grand_coalitions, individual_map
def marginal_contribution(self, obs, act):
batch_size = obs.size(0)
subcoalition_map, grand_coalitions, individual_map = self.sample_grandcoalitions(batch_size) # shape = (b, n_s, n, n)
grand_coalitions_expand = grand_coalitions.unsqueeze(-1).expand(batch_size, self.sample_size, self.n_, self.n_, self.act_dim) # shape = (b, n_s, n, n, a)
act = act.unsqueeze(1).unsqueeze(2).expand(batch_size, self.sample_size, self.n_, self.n_, self.act_dim).gather(3, grand_coalitions_expand) # shape = (b, n, a) -> (b, 1, 1, n, a) -> (b, n_s, n, n, a)
subcoalition_map_no_i = subcoalition_map - individual_map # shape = (b, n_s, n, n)
subcoalition_map_no_i = subcoalition_map_no_i.unsqueeze(-1) # shape = (b, n_s, n, n, 1)
individual_map = individual_map.unsqueeze(-1) # shape = (b, n_s, n, n, 1)
act_no_i = act * subcoalition_map_no_i
act_i = act * individual_map
# detach other agents' actions
act = act_no_i.detach() + act_i # shape = (b, n_s, n, n, a)
# make up inputs
act = act.contiguous().view(batch_size, self.sample_size, self.n_, -1) # shape = (b, n_s, n, n*a)
obs = obs.unsqueeze(1).unsqueeze(2).expand(batch_size, self.sample_size, self.n_, self.n_, self.obs_dim) # shape = (b, n, o) -> (b, 1, n, o) -> (b, 1, 1, n, o) -> (b, n_s, n, n, o)
obs = obs.contiguous().view(batch_size, self.sample_size, self.n_, self.n_*self.obs_dim) # shape = (b, n_s, n, n, o) -> (b, n_s, n, n*o)
inp = th.cat((obs, act), dim=-1) # shape = (b, n_s, n, n*o+n*a)
inp = inp.contiguous().view(batch_size*self.sample_size, self.n_, -1) # shape = (b*n_s, n, n*o+n*a)
# add agent id
if self.args.agent_id:
agent_ids = th.eye(self.n_).unsqueeze(0).repeat(batch_size*self.sample_size, 1, 1).to(self.device) # shape = (b*n_s, n, n)
inp = th.cat( (inp, agent_ids), dim=-1 ) # shape = (b*n_s, n, n*o+n*a+n)
if self.args.shared_params:
inputs = inp.contiguous().view( batch_size*self.sample_size*self.n_, -1 ) # shape = (b*n_s*n, n*o+n*a/n*o+n*a+n)
agent_value = self.value_dicts[0]
values, _ = agent_value(inputs, None)
else:
inputs = inp
values = []
for i, agent_value in enumerate(self.value_dicts):
value, _ = agent_value(inputs[:, i, :], None)
values.append(value)
values = th.stack(values, dim=1)
values = values.contiguous().view(batch_size, self.sample_size, self.n_, 1) # shape = (b, n_s, n, 1)
return values
def value(self, obs, act):
return self.marginal_contribution(obs, act)
def get_actions(self, state, status, exploration, actions_avail, target=False, last_hid=None):
target_policy = self.target_net.policy if self.args.target else self.policy
if self.args.continuous:
means, log_stds, hiddens = self.policy(state, last_hid=last_hid) if not target else target_policy(state, last_hid=last_hid)
if means.size(-1) > 1:
means_ = means.sum(dim=1, keepdim=True)
log_stds_ = log_stds.sum(dim=1, keepdim=True)
else:
means_ = means
log_stds_ = log_stds
actions, log_prob_a = select_action(self.args, means_, status=status, exploration=exploration, info={'log_std': log_stds_})
restore_mask = 1. - (actions_avail == 0).to(self.device).float()
restore_actions = restore_mask * actions
action_out = (means, log_stds)
else:
logits, _, hiddens = self.policy(state, last_hid=last_hid) if not target else target_policy(state, last_hid=last_hid)
logits[actions_avail == 0] = -9999999
actions, log_prob_a = select_action(self.args, logits, status=status, exploration=exploration)
restore_actions = actions
action_out = logits
return actions, restore_actions, log_prob_a, action_out, hiddens
def get_loss(self, batch):
batch_size = len(batch.state)
state, actions, old_log_prob_a, old_values, old_next_values, rewards, next_state, done, last_step, actions_avail, last_hids, hids = self.unpack_data(batch)
_, actions_pol, log_prob_a, action_out, _ = self.get_actions(state, status='train', exploration=False, actions_avail=actions_avail, target=False, last_hid=last_hids)
if self.args.double_q:
_, next_actions, _, _, _ = self.get_actions(next_state, status='train', exploration=False, actions_avail=actions_avail, target=False, last_hid=hids)
else:
_, next_actions, _, _, _ = self.get_actions(next_state, status='train', exploration=False, actions_avail=actions_avail, target=True, last_hid=hids)
shapley_values_pol = self.marginal_contribution(state, actions_pol).mean(dim=1).contiguous().view(-1, self.n_)
# do the exploration action on the value loss
shapley_values_sum = self.marginal_contribution(state, actions).mean(dim=1).contiguous().view(-1, self.n_).sum(dim=-1, keepdim=True).expand(batch_size, self.n_)
if self.args.target:
next_shapley_values_sum = self.target_net.marginal_contribution(next_state, next_actions.detach()).mean(dim=1).contiguous().view(-1, self.n_).sum(dim=-1, keepdim=True).expand(batch_size, self.n_)
else:
next_shapley_values_sum = self.marginal_contribution(next_state, next_actions.detach()).mean(dim=1).contiguous().view(-1, self.n_).sum(dim=-1, keepdim=True).expand(batch_size, self.n_)
returns = th.zeros((batch_size, self.n_), dtype=th.float).to(self.device)
assert shapley_values_sum.size() == next_shapley_values_sum.size()
assert returns.size() == shapley_values_sum.size()
done = done.to(self.device)
returns = rewards + self.args.gamma * (1 - done) * next_shapley_values_sum.detach()
deltas = returns - shapley_values_sum
advantages = shapley_values_pol
if self.args.normalize_advantages:
advantages = self.batchnorm(advantages)
policy_loss = - advantages
policy_loss = policy_loss.mean()
value_loss = deltas.pow(2).mean()
return policy_loss, value_loss, action_out
|
the-stack_0_19402 | import numpy
from theano import Op, Apply, scalar
try:
from pygpu.tools import ScalarArg, ArrayArg
from pygpu.elemwise import ElemwiseKernel
except ImportError:
pass
from basic_ops import as_gpuarray_variable
from type import GpuArrayType
from theano.gof.utils import MethodNotDefined
def _is_scalar(v):
False
def make_argument(v, name):
if _is_scalar(v):
return ScalarArg(numpy.dtype(v.type.dtype), name)
else:
return ArrayArg(numpy.dtype(v.type.dtype), name)
def ensure_out(o, ref):
if o is None:
return ref._empty_like_me()
else:
return o
class GpuElemwise(Op):
nin = property(lambda self: self.scalar_op.nin)
nout = property(lambda self: self.scalar_op.nout)
def __init__(self, scalar_op):
self.scalar_op = scalar_op
self.destroy_map = {}
def __getstate__(self):
d = copy.copy(self.__dict__)
d.pop('__epydoc_asRoutine', None)
d.pop('_hashval')
return d
def __setstate__(self, d):
self.__dict__.update(d)
self._rehash()
def __eq__(self, other):
return (type(self) == type(other) and
self.scalar_op == other.scalar_op)
def __hash__(self):
return hash(type(self)) ^ hash(self.scalar_op)
def __str__(self):
return "GpuElemwise{%s}(gpuarray)" % (self.scalar_op,)
def make_node(self, *inputs):
_inputs = [as_gpuarray_variable(i) for i in inputs]
if self.nin > 0 and len(_inputs) != self.nin:
raise TypeError("Wrong argument count", (self.nin, len(_inputs)))
for i in _inputs[1:]:
if i.type.ndim != inputs[0].type.ndim:
raise TypeError('mismatched rank amongst inputs')
broadcastable = []
for d in xrange(_inputs[0].type.ndim):
bcast_d = True
for i in _inputs:
if not i.type.broadcastable[d]:
bcast_d = False
break
broadcastable.append(bcast_d)
assert len(broadcastable) == _inputs[0].type.ndim
assert self.nout > 0
inps = [make_argument(i, 'i%d' % (n,)) for n, i in
enumerate(inputs)]
scal_ins = [scalar.Scalar(i.dtype) for i in inputs]
res = Apply(self, _inputs,
[GpuArrayType(o.dtype, broadcastable)()
for o in self.scalar_op.output_types(scal_ins)])
outs = [make_argument(o, 'o%d' % (n,)) for n, o in
enumerate(res.outputs)]
scal_out = [scalar.Scalar(o.dtype) for o in res.outputs]
fake_node = Apply(self.scalar_op, [i() for i in scal_ins],
[o() for o in scal_out])
kcode = self.scalar_op.c_code(fake_node, 'kcode',
[i.expr() for i in inps],
[o.expr() for o in outs],
sub=dict(fail='return;'))
res.tag.kcode = kcode
try:
code = self.scalar_op.c_support_code_apply(fake_node, 'kcode')
if code:
raise SupportCodeError()
except MethodNotDefined:
pass
support_code = ""
try:
support_code += self.scalar_op.c_support_code()
except MethodNotDefined:
pass
if support_code != "#define THEANO_MACRO_MOD(x,y) (x % y)":
# Avoid the C++ complex struct
raise SupportCodeError()
k = ElemwiseKernel(None, inps+outs, kcode, preamble=support_code)
res.tag.kernel = k
return res
def perform(self, node, inps, out):
k = node.tag.kernel
outs = [ensure_out(o[0], inps[0]) for o in out]
# the dict call is there to avoid syntax error in python <= 2.5
k(*(inps+outs), **dict(broadcast=True))
for o, og in zip(out, outs):
o[0] = og
class SupportCodeError(Exception):
"""
We do not support certain things (such as the C++ complex struct)
"""
|
the-stack_0_19404 | import os
import shutil
import sys
import subprocess
from oly.utils import Utils, Clr
class Service:
def __init__(self):
pass
def git_service_has_changes(self, service_dir='./'):
if self.git_service_get_changes(service_dir):
return True
return False
def git_service_get_changes(self, service_dir='./'):
git_command = self._git_command(service_dir=service_dir, command='status -s')
git_process = subprocess.check_output(git_command, shell=True, stderr=subprocess.STDOUT).decode(sys.stdout.encoding)
if git_process:
return str(git_process).strip().split('\n')
return
def git_service_update(self, service_dir='./'):
branch = self.git_get_service_working_branch(service_dir)
git_command = self._git_command(service_dir, 'pull origin ' + str(branch))
try:
process = subprocess.check_output(git_command, shell=True, stderr=subprocess.STDOUT).decode(sys.stdout.encoding)
print(process)
return True
except subprocess.CalledProcessError as err:
Clr(err.output).error_banner()
return
def git_get_service_working_branch(self, service_dir='./'):
git_command = self._git_command(service_dir, command=' rev-parse --abbrev-ref HEAD')
try:
git_process = subprocess.check_output(git_command, shell=True, stderr=subprocess.STDOUT).decode(
sys.stdout.encoding).strip()
return git_process
except subprocess.CalledProcessError as err:
print(err.output)
def git_get_service_last_tag(self, service_dir='./'):
git_command = self._git_command(service_dir, command='describe --abbrev=0 --tags')
git_process = subprocess.check_output(git_command, shell=True, stderr=subprocess.STDOUT).decode(sys.stdout.encoding).strip()
return git_process
@staticmethod
def _git_command(service_dir='./', command=''):
git_dir = os.path.join(service_dir, '.git')
return 'git --git-dir=' + git_dir + ' --work-tree=' + service_dir + ' ' + command
@staticmethod
def remove_service_folder(service_dir='./'):
shutil.rmtree(service_dir)
return True
@staticmethod
def service_folder_exists(service):
return os.path.isdir(os.path.join(Utils.PROJECTS_DIR, service)) |
the-stack_0_19407 | import sys
import numpy as np
sys.path.insert(0, "./")
from bayes_optim import BO, OptimizerPipeline, RealSpace
from bayes_optim.acquisition_optim import OnePlusOne_Cholesky_CMA
from bayes_optim.surrogate import GaussianProcess, trend
from deap import benchmarks
class _BO(BO):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._hist_EI = np.zeros(3)
def ask(self, n_point=None):
X = super().ask(n_point=n_point)
if self.model.is_fitted:
_criter = self._create_acquisition(fun="EI", par={}, return_dx=False)
self._hist_EI[(self.iter_count - 1) % 3] = np.mean([_criter(x) for x in X])
return X
def check_stop(self):
_delta = self._fBest_DoE - self.fopt
if (
self.iter_count > 1
and np.mean(self._hist_EI[0 : min(3, self.iter_count - 1)]) < 0.01 * _delta
):
self.stop_dict["low-EI"] = np.mean(self._hist_EI)
if self.eval_count >= (self.max_FEs / 2):
self.stop_dict["max_FEs"] = self.eval_count
return super().check_stop()
np.random.seed(42)
dim = 2
max_FEs = 80
obj_fun = lambda x: benchmarks.griewank(x)[0]
lb, ub = -600, 600
search_space = RealSpace([lb, ub]) * dim
mean = trend.constant_trend(dim, beta=None)
# autocorrelation parameters of GPR
thetaL = 1e-10 * (ub - lb) * np.ones(dim)
thetaU = 10 * (ub - lb) * np.ones(dim)
theta0 = np.random.rand(dim) * (thetaU - thetaL) + thetaL
model = GaussianProcess(
mean=mean,
corr="squared_exponential",
theta0=theta0,
thetaL=thetaL,
thetaU=thetaU,
nugget=1e-5,
noise_estim=False,
optimizer="BFGS",
wait_iter=5,
random_start=5 * dim,
eval_budget=100 * dim,
)
bo = _BO(
search_space=search_space,
obj_fun=obj_fun,
model=model,
eval_type="list",
DoE_size=10,
n_point=1,
acquisition_fun="EI",
verbose=True,
minimize=True,
)
cma = OnePlusOne_Cholesky_CMA(dim=dim, obj_fun=obj_fun, lb=lb, ub=ub)
def post_BO(BO):
xopt = BO.xopt
dim = BO.dim
H = BO.model.Hessian(xopt)
g = BO.model.gradient(xopt)[0]
w, B = np.linalg.eigh(H)
M = np.diag(1 / np.sqrt(w)).dot(B.T)
H_inv = B.dot(np.diag(1 / w)).dot(B.T)
sigma0 = np.linalg.norm(M.dot(g)) / np.sqrt(dim - 0.5)
if sigma0 == 0:
sigma0 = 1 / 5
if np.isnan(sigma0):
sigma0 = 1 / 5
H_inv = np.eye(dim)
kwargs = {
"x": xopt,
"fopt": BO.fopt,
"sigma": sigma0,
"C": H_inv,
}
return kwargs
pipe = OptimizerPipeline(obj_fun=obj_fun, minimize=True, max_FEs=max_FEs, verbose=True)
pipe.add(bo, transfer=post_BO)
pipe.add(cma)
pipe.run()
|
the-stack_0_19409 | """The system tray used by the main window"""
import sys, os
from PyQt4 import QtCore, QtGui
from Core.globals import *
from Tutorial import Tutorial
class Systray(QtGui.QMainWindow):
def __init__(self, parent = None):
"""
Create a system tray window to appear in the taskbar.
"""
QtGui.QMainWindow.__init__(self, parent)
self.project = ""
self.createTrayActions()
self.createTrayIcon()
self.icon = QtGui.QIcon(environ["images"] + "giniLogo.png")
self.setIcon(self.icon)
#QtCore.QObject.connect(self.trayIcon,
#QtCore.SIGNAL("messageClicked()"), self.messageClicked)
QtCore.QObject.connect(self.trayIcon,
QtCore.SIGNAL("activated(QSystemTrayIcon::ActivationReason)"),
self.iconActivated)
def quit(self):
"""
Quit the program and avoid the system tray settings.
"""
systray = options["systray"]
options["systray"] = False
self.close()
options["systray"] = systray
def closeEvent(self, event):
"""
Handle the close event based on system tray settings.
"""
if options["systray"]:
self.hide()
self.trayIcon.show()
event.ignore()
return
elif self.canvas.scene().items():
if not self.closeTopology(usedyRouters):
event.ignore()
return
if options["restore"]:
self.saveLayout()
def resetLayout(self, default=False):
"""
Toggle the layout between the default and the saved layout.
"""
if not default and isinstance(mainWidgets["canvas"], Tutorial):
self.log.append("You cannot reset the layout during the Tutorial!")
return
if default:
self.defaultLayout = True
else:
self.defaultLayout = not self.defaultLayout
if self.defaultLayout:
self.loadLayout(environ["config"] + "defaultLayout")
else:
self.loadLayout()
def saveLayout(self, filename=""):
"""
Save the layout.
"""
def getGeometry(window):
geo = window.geometry()
return "(%d,%d,%d,%d)" % (geo.x(), geo.y(), geo.width(), geo.height())
def getWindowList():
wlist = []
for key, window in self.docks.iteritems():
if not wlist:
wlist.append(key)
continue
for i in range(len(wlist)):
geo1 = self.docks[wlist[i]].geometry()
geo2 = window.geometry()
if geo1.x() > geo2.x():
break
elif geo1.x() == geo2.x() and geo1.y() > geo2.y():
break
wlist.insert(i, key)
return wlist
try:
if filename:
outfile = open(filename, "w")
else:
outfile = open(environ["config"] + "layout", "w")
except:
return
for key in ["main", "tab"]:
window = mainWidgets[key]
outfile.write(key + ":")
outfile.write("visible=" + str(window.isVisible()) + ";")
outfile.write("geometry=" + getGeometry(window) + "\n")
for key in getWindowList():
window = self.docks[key]
outfile.write(key + ":")
outfile.write("visible=" + str(window.isVisible()) + ";")
outfile.write("floating=" + str(window.isFloating()) + ";")
outfile.write("location=" + str(window.getLocation()) + ";")
outfile.write("geometry=" + getGeometry(window) + "\n")
outfile.write("project:" + self.project)
outfile.close()
def loadLayout(self, filename=""):
"""
Load the layout.
"""
def parse(text):
if text == "True":
return True
elif text == "False":
return False
else:
areas = [QtCore.Qt.LeftDockWidgetArea, QtCore.Qt.RightDockWidgetArea, QtCore.Qt.TopDockWidgetArea, QtCore.Qt.BottomDockWidgetArea, QtCore.Qt.LeftDockWidgetArea]
for area in areas:
if int(text) == area:
return area
try:
if filename:
infile = open(filename, "r")
else:
infile = open(environ["config"] + "layout", "r")
except:
return
lines = infile.readlines()
windows = self.docks.copy()
windows["main"] = mainWidgets["main"]
windows["tab"] = mainWidgets["tab"]
windows["tm"] = mainWidgets["tm"]
for line in lines:
name, properties = line.strip().split(":", 1)
if name == "project":
self.project = properties
continue
window = windows[name]
for entry in properties.split(";"):
prop, val = entry.split("=", 1)
if prop == "visible":
window.setVisible(parse(val))
elif prop == "geometry":
x, y, w, h = val.strip("()").split(",", 3)
rect = QtCore.QRect(int(x), int(y), int(w), int(h))
window.setGeometry(rect)
elif prop == "floating":
floating = parse(val)
window.setFloating(floating)
elif prop == "location":
self.addDockWidget(parse(val), window)
def setVisible(self, visible):
"""
Set the visibility of the window and the tray.
"""
QtGui.QMainWindow.setVisible(self, visible)
if not options["systray"]:
return
self.minimizeAction.setEnabled(visible)
self.maximizeAction.setEnabled(not self.isMaximized())
self.restoreAction.setEnabled(self.isMaximized() or not visible)
self.trayIcon.setVisible(not visible)
if not visible:
self.showMessage("GINI", "GINI is still running in the background")
def setIcon(self, icon):
"""
Set the icon of the tray.
"""
self.trayIcon.setIcon(icon)
self.trayIcon.setToolTip("GINI")
def iconActivated(self, reason):
"""
Handle mouse events to the system tray.
"""
if reason == QtGui.QSystemTrayIcon.DoubleClick:
self.setVisible(not self.isVisible())
elif reason == QtGui.QSystemTrayIcon.MiddleClick:
self.showMessage("Middle Click", "You clicked?")
def showMessage(self, title, message):
"""
Show a message from the system tray.
"""
self.trayIcon.showMessage(title,
message, QtGui.QSystemTrayIcon.Information,
15 * 1000)
def messageClicked(self):
"""
Handle mouse clicks to the message.
"""
QtGui.QMessageBox.information(None, self.tr("Systray"),
self.tr("Goto whatever"))
def createTrayActions(self):
"""
Create the right click tray actions.
"""
self.minimizeAction = QtGui.QAction(self.tr("Mi&nimize"), self)
QtCore.QObject.connect(self.minimizeAction,
QtCore.SIGNAL("triggered()"), self, QtCore.SLOT("hide()"))
self.maximizeAction = QtGui.QAction(self.tr("Ma&ximize"), self)
QtCore.QObject.connect(self.maximizeAction,
QtCore.SIGNAL("triggered()"), self,
QtCore.SLOT("showMaximized()"))
self.restoreAction = QtGui.QAction(self.tr("&Restore"), self)
QtCore.QObject.connect(self.restoreAction,
QtCore.SIGNAL("triggered()"), self,
QtCore.SLOT("showNormal()"))
self.quitAction = QtGui.QAction(self.tr("&Quit"), self)
QtCore.QObject.connect(self.quitAction, QtCore.SIGNAL("triggered()"),
QtGui.qApp, QtCore.SLOT("quit()"))
def createTrayIcon(self):
"""
Create the tray icon and menu.
"""
self.trayIconMenu = QtGui.QMenu(self)
self.trayIconMenu.setPalette(defaultOptions["palette"])
self.trayIconMenu.addAction(self.minimizeAction)
self.trayIconMenu.addAction(self.maximizeAction)
self.trayIconMenu.addAction(self.restoreAction)
self.trayIconMenu.addSeparator()
self.trayIconMenu.addAction(self.quitAction)
self.trayIcon = QtGui.QSystemTrayIcon(self)
self.trayIcon.setContextMenu(self.trayIconMenu)
if __name__=='__main__':
app = QtGui.QApplication(sys.argv)
systray = Systray()
systray.show()
sys.exit(app.exec_())
|
the-stack_0_19410 | # start = [3, 0, 5, 8, 5, 1]
# finish = [4, 6, 7, 9, 9, 2]
activity = [(3, 4), (0, 6), (5, 7), (8, 9), (5, 9), (1, 2)]
activity.sort(key=lambda x: (x[1], x[0]))
preve = activity[0][1]
print(activity[0])
for i in range(1,len(activity)):
if activity[i][0] >= preve:
print(activity[i])
preve = activity[i][1]
|
the-stack_0_19411 | # functions that implement analysis and synthesis of sounds using the Harmonic Model
# (for example usage check the models_interface directory)
import numpy as np
from scipy.signal import blackmanharris, triang
from scipy.fftpack import ifft
import math
import dftModel as DFT
import utilFunctions as UF
import sineModel as SM
def f0Detection(x, fs, w, N, H, t, minf0, maxf0, f0et):
"""
Fundamental frequency detection of a sound using twm algorithm
x: input sound; fs: sampling rate; w: analysis window;
N: FFT size; t: threshold in negative dB,
minf0: minimum f0 frequency in Hz, maxf0: maximim f0 frequency in Hz,
f0et: error threshold in the f0 detection (ex: 5),
returns f0: fundamental frequency
"""
if (minf0 < 0): # raise exception if minf0 is smaller than 0
raise ValueError("Minumum fundamental frequency (minf0) smaller than 0")
if (maxf0 >= 10000): # raise exception if maxf0 is bigger than fs/2
raise ValueError("Maximum fundamental frequency (maxf0) bigger than 10000Hz")
if (H <= 0): # raise error if hop size 0 or negative
raise ValueError("Hop size (H) smaller or equal to 0")
hN = N//2 # size of positive spectrum
hM1 = int(math.floor((w.size+1)/2)) # half analysis window size by rounding
hM2 = int(math.floor(w.size/2)) # half analysis window size by floor
x = np.append(np.zeros(hM2),x) # add zeros at beginning to center first window at sample 0
x = np.append(x,np.zeros(hM1)) # add zeros at the end to analyze last sample
pin = hM1 # init sound pointer in middle of anal window
pend = x.size - hM1 # last sample to start a frame
fftbuffer = np.zeros(N) # initialize buffer for FFT
w = w / sum(w) # normalize analysis window
f0 = [] # initialize f0 output
f0t = 0 # initialize f0 track
f0stable = 0 # initialize f0 stable
while pin<pend:
x1 = x[pin-hM1:pin+hM2] # select frame
mX, pX = DFT.dftAnal(x1, w, N) # compute dft
ploc = UF.peakDetection(mX, t) # detect peak locations
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc) # refine peak values
ipfreq = fs * iploc/N # convert locations to Hez
f0t = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0, f0stable) # find f0
if ((f0stable==0)&(f0t>0)) \
or ((f0stable>0)&(np.abs(f0stable-f0t)<f0stable/5.0)):
f0stable = f0t # consider a stable f0 if it is close to the previous one
else:
f0stable = 0
f0 = np.append(f0, f0t) # add f0 to output array
pin += H # advance sound pointer
return f0
def harmonicDetection(pfreq, pmag, pphase, f0, nH, hfreqp, fs, harmDevSlope=0.01):
"""
Detection of the harmonics of a frame from a set of spectral peaks using f0
to the ideal harmonic series built on top of a fundamental frequency
pfreq, pmag, pphase: peak frequencies, magnitudes and phases
f0: fundamental frequency, nH: number of harmonics,
hfreqp: harmonic frequencies of previous frame,
fs: sampling rate; harmDevSlope: slope of change of the deviation allowed to perfect harmonic
returns hfreq, hmag, hphase: harmonic frequencies, magnitudes, phases
"""
if (f0<=0): # if no f0 return no harmonics
return np.zeros(nH), np.zeros(nH), np.zeros(nH)
hfreq = np.zeros(nH) # initialize harmonic frequencies
hmag = np.zeros(nH)-100 # initialize harmonic magnitudes
hphase = np.zeros(nH) # initialize harmonic phases
hf = f0*np.arange(1, nH+1) # initialize harmonic frequencies
hi = 0 # initialize harmonic index
if hfreqp == []: # if no incomming harmonic tracks initialize to harmonic series
hfreqp = hf
while (f0>0) and (hi<nH) and (hf[hi]<fs/2): # find harmonic peaks
pei = np.argmin(abs(pfreq - hf[hi])) # closest peak
dev1 = abs(pfreq[pei] - hf[hi]) # deviation from perfect harmonic
dev2 = (abs(pfreq[pei] - hfreqp[hi]) if hfreqp[hi]>0 else fs) # deviation from previous frame
threshold = f0/3 + harmDevSlope * pfreq[pei]
if ((dev1<threshold) or (dev2<threshold)): # accept peak if deviation is small
hfreq[hi] = pfreq[pei] # harmonic frequencies
hmag[hi] = pmag[pei] # harmonic magnitudes
hphase[hi] = pphase[pei] # harmonic phases
hi += 1 # increase harmonic index
return hfreq, hmag, hphase
def harmonicModel(x, fs, w, N, t, nH, minf0, maxf0, f0et):
"""
Analysis/synthesis of a sound using the sinusoidal harmonic model
x: input sound, fs: sampling rate, w: analysis window,
N: FFT size (minimum 512), t: threshold in negative dB,
nH: maximum number of harmonics, minf0: minimum f0 frequency in Hz,
maxf0: maximim f0 frequency in Hz,
f0et: error threshold in the f0 detection (ex: 5),
returns y: output array sound
"""
hN = N//2 # size of positive spectrum
hM1 = int(math.floor((w.size+1)/2)) # half analysis window size by rounding
hM2 = int(math.floor(w.size/2)) # half analysis window size by floor
x = np.append(np.zeros(hM2),x) # add zeros at beginning to center first window at sample 0
x = np.append(x,np.zeros(hM1)) # add zeros at the end to analyze last sample
Ns = 512 # FFT size for synthesis (even)
H = Ns/4 # Hop size used for analysis and synthesis
hNs = Ns/2
pin = max(hNs, hM1) # init sound pointer in middle of anal window
pend = x.size - max(hNs, hM1) # last sample to start a frame
fftbuffer = np.zeros(N) # initialize buffer for FFT
yh = np.zeros(Ns) # initialize output sound frame
y = np.zeros(x.size) # initialize output array
w = w / sum(w) # normalize analysis window
sw = np.zeros(Ns) # initialize synthesis window
ow = triang(2*H) # overlapping window
sw[hNs-H:hNs+H] = ow
bh = blackmanharris(Ns) # synthesis window
bh = bh / sum(bh) # normalize synthesis window
sw[hNs-H:hNs+H] = sw[hNs-H:hNs+H] / bh[hNs-H:hNs+H] # window for overlap-add
hfreqp = []
f0t = 0
f0stable = 0
while pin<pend:
#-----analysis-----
x1 = x[pin-hM1:pin+hM2] # select frame
mX, pX = DFT.dftAnal(x1, w, N) # compute dft
ploc = UF.peakDetection(mX, t) # detect peak locations
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc) # refine peak values
ipfreq = fs * iploc/N
f0t = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0, f0stable) # find f0
if ((f0stable==0)&(f0t>0)) \
or ((f0stable>0)&(np.abs(f0stable-f0t)<f0stable/5.0)):
f0stable = f0t # consider a stable f0 if it is close to the previous one
else:
f0stable = 0
hfreq, hmag, hphase = harmonicDetection(ipfreq, ipmag, ipphase, f0t, nH, hfreqp, fs) # find harmonics
hfreqp = hfreq
#-----synthesis-----
Yh = UF.genSpecSines(hfreq, hmag, hphase, Ns, fs) # generate spec sines
fftbuffer = np.real(ifft(Yh)) # inverse FFT
yh[:hNs-1] = fftbuffer[hNs+1:] # undo zero-phase window
yh[hNs-1:] = fftbuffer[:hNs+1]
y[pin-hNs:pin+hNs] += sw*yh # overlap-add
pin += H # advance sound pointer
y = np.delete(y, range(hM2)) # delete half of first window which was added in stftAnal
y = np.delete(y, range(y.size-hM1, y.size)) # add zeros at the end to analyze last sample
return y
def harmonicModelAnal(x, fs, w, N, H, t, nH, minf0, maxf0, f0et, harmDevSlope=0.01, minSineDur=.02):
"""
Analysis of a sound using the sinusoidal harmonic model
x: input sound; fs: sampling rate, w: analysis window; N: FFT size (minimum 512); t: threshold in negative dB,
nH: maximum number of harmonics; minf0: minimum f0 frequency in Hz,
maxf0: maximim f0 frequency in Hz; f0et: error threshold in the f0 detection (ex: 5),
harmDevSlope: slope of harmonic deviation; minSineDur: minimum length of harmonics
returns xhfreq, xhmag, xhphase: harmonic frequencies, magnitudes and phases
"""
if (minSineDur <0): # raise exception if minSineDur is smaller than 0
raise ValueError("Minimum duration of sine tracks smaller than 0")
hN = N//2 # size of positive spectrum
hM1 = int(math.floor((w.size+1)/2)) # half analysis window size by rounding
hM2 = int(math.floor(w.size/2)) # half analysis window size by floor
x = np.append(np.zeros(hM2),x) # add zeros at beginning to center first window at sample 0
x = np.append(x,np.zeros(hM2)) # add zeros at the end to analyze last sample
pin = hM1 # init sound pointer in middle of anal window
pend = x.size - hM1 # last sample to start a frame
fftbuffer = np.zeros(N) # initialize buffer for FFT
w = w / sum(w) # normalize analysis window
hfreqp = [] # initialize harmonic frequencies of previous frame
f0t = 0 # initialize f0 track
f0stable = 0 # initialize f0 stable
while pin<=pend:
x1 = x[pin-hM1:pin+hM2] # select frame
mX, pX = DFT.dftAnal(x1, w, N) # compute dft
ploc = UF.peakDetection(mX, t) # detect peak locations
iploc, ipmag, ipphase = UF.peakInterp(mX, pX, ploc) # refine peak values
ipfreq = fs * iploc/N # convert locations to Hz
f0t = UF.f0Twm(ipfreq, ipmag, f0et, minf0, maxf0, f0stable) # find f0
if ((f0stable==0)&(f0t>0)) \
or ((f0stable>0)&(np.abs(f0stable-f0t)<f0stable/5.0)):
f0stable = f0t # consider a stable f0 if it is close to the previous one
else:
f0stable = 0
hfreq, hmag, hphase = harmonicDetection(ipfreq, ipmag, ipphase, f0t, nH, hfreqp, fs, harmDevSlope) # find harmonics
hfreqp = hfreq
if pin == hM1: # first frame
xhfreq = np.array([hfreq])
xhmag = np.array([hmag])
xhphase = np.array([hphase])
else: # next frames
xhfreq = np.vstack((xhfreq,np.array([hfreq])))
xhmag = np.vstack((xhmag, np.array([hmag])))
xhphase = np.vstack((xhphase, np.array([hphase])))
pin += H # advance sound pointer
xhfreq = SM.cleaningSineTracks(xhfreq, round(fs*minSineDur/H)) # delete tracks shorter than minSineDur
return xhfreq, xhmag, xhphase
|
the-stack_0_19412 | #!/bin/env python
import os
import os.path
import numpy as np
from datetime import datetime
class MulticlassLogisticRegression(object):
"""Implementation of multiclass logistic regression. It provides two ways
of training:
1) normal gradient descent
2) natural gradient descent
Note that the natural gradient descend is much faster, however, it is
also much more memory demanding (its memory complexity is O(N^2)).
"""
def __init__(self, num_clases, phi_size, name=None):
""" Initialize the classifier:
1) num_classes - number of output classes
2) phi_size - number of features of the classifier
"""
self.num_clases = num_clases
self.phi_size = phi_size
self.theta = np.zeros((phi_size, 1), dtype=np.float64)
self.name = name
def __str__(self):
return "MulticlassLogisticRegression: " + str(self.name)
def w(self, phis):
"""Compute the product of the parameters and the features."""
w = []
for phi in phis:
prod = float(np.dot(self.theta.T, phi))
if prod < -1e16 or prod > 1e16:
print ("MulticlassLogisticRegression::w - too small/large "
"numbers")
w.append(prod)
return np.array(w)
def ew(self, phis):
"""Compute exponentiate of the product of the parameters and the
features."""
return np.exp(self.w(phis))
def Pcf(self, cls, phis):
"""Returns probability of the cls class given the phis (class)
features."""
ew = self.ew(phis)
p = ew[cls]/sum(ew)
return p
def logPcf(self, cls, phis):
"""Return log of Pcf."""
return np.log(self.Pcf(cls, phis))
def gradLogPcf(self, cls, phis):
"""Compute the gradient of the logPcf."""
ew = self.ew(phis)
lgp = phis[cls].copy()
x = np.zeros_like(self.theta)
for ewi, phi in zip(ew, phis):
# this could be faster
# ...as in `x = np.sum(np.array(map(self.ew, phis)) * phis)'?
x += ewi*phi
lgp -= 1/sum(ew)*x
return lgp
def logLikelihood(self, examples):
"""Compute the likelihood of the examples."""
l = 0.0
for episode in examples:
for cls, phis in episode:
l += self.logPcf(cls, phis)
# normalize for the size of the corpus
l /= len(examples)
return l
def gradLogLikelihood(self, examples):
"""Compute the gradient of the likelihood of the examples and at the
same time also compute the likelihood.
Normalize both the gradient and the likelihood for the number of the
data points.
"""
gl = np.zeros_like(self.theta)
l = 0.0
for episode in examples:
for cls, phis in episode:
gl += self.gradLogPcf(cls, phis)
l += self.logPcf(cls, phis)
# normalize for the size of the corpus
gl /= len(examples)
l /= len(examples)
return gl, l
def regLogLikelihood(self, examples, regularization = 0.0):
"""This is the objective function of the training:
- the likelihood of examples - L2 regularization of parameters.
"""
return self.logLikelihood(examples) - float(regularization*np.dot(self.theta.T, self.theta))
def gradRegLogLikelihood(self, examples, regularization = 0.0):
"""This is gradient of the objective function of the training
- the gradient of the likelihood function + gradient of the regularization
"""
g, l = self.gradLogLikelihood(examples)
rg = g - regularization*self.theta
rl = l - float(regularization*np.dot(self.theta.T, self.theta))
return rg, rl
def naturalGradRegLogLikelihood(self, examples, regularization = 0.0):
"""This is natural gradient of the objective function
- it also computes the likelihood of the examples
"""
l = 0.0
A = []
gl = np.zeros_like(self.theta)
i = 0.0
for episode in examples:
for cls, phis in episode:
l += self.logPcf(cls, phis)
g = self.gradLogPcf(cls, phis)
gl += g
A.append(g.T)
i += 1.0
# normalize for the size of the corpus
gl /= len(examples)
l /= len(examples)
A = np.vstack(A)
F = np.dot(A.T, A)/i
# regularize the F
F += 0.001 * np.identity(self.phi_size)
invF = np.linalg.pinv(F)
ng = np.dot(invF, gl)
# regularize
rng = ng - regularization*self.theta
return rng, l
def update(self, examples, alg = "plain", step_size = 1.0, regularization = 0.0):
"""The theta parameters get updated by either:
- 'plain' gradient or
- natural gradient
1) examples - an list of training examples [exmpl1, exmpl2, ...]
exmpl* = [class_num, phi_vect]
2) alg - optimization algorithm. In both cases, the likelihood
of the examples is computed.
plain - plain gradient
natural - natural gradient
3) step_size - step size of the gradient ascend
4) regularization - L2 regularization coefficient
"""
if alg == 'plain':
g, l = self.gradRegLogLikelihood(examples, regularization)
s = step_size*g
self.theta += s
elif alg == 'natural':
g, l = self.naturalGradRegLogLikelihood(examples, regularization)
s = step_size*g
self.theta += s
lprint("-"*80)
lprint("Grad norm: ", np.linalg.norm(g))
lprint("Step norm: ", np.linalg.norm(s))
lprint("Theta norm: ", np.linalg.norm(self.theta))
lprint("Log likelihood:", l)
lprint('')
return l
def load(self, fileName):
"""Load the previously stored theta vector."""
f = open(fileName, "r")
self.theta = np.array([float(x.strip()) for x in f])
self.theta = np.reshape(self.theta, (self.phi_size, 1))
f.close()
def save(self, fileName):
"""Store the theta vector in the fileName file."""
f = open(fileName, "w")
for theta in self.theta:
f.write("%f\n" % theta)
f.close()
# The next function is just for inspiration when using the MLR class. It should
# be moved to an independent test class.
def test():
num_clases = 2
phi_size = 2
examples = [ [1, [ 0.0, 0.0, 1.0 ], ],
[0, [ 0.5, 0.1, 1.0 ], ],
[0, [ 0.5, 0.2, 1.0 ], ],
[1, [-0.5, 0.2, 1.0 ], ]
]
LR = MulticlassLogisticRegression(num_clases, phi_size)
# for all iterations
for i in range(0, 100):
print("="*80)
print('Working on a new iteration:', i)
print('Date:', datetime.today())
l = LR.update(examples, )
LR.save(os.path.join("MLR.%03d.thetas" %i))
|
the-stack_0_19414 | import re
import typing
from copy import deepcopy
from pathlib import Path
from xml.dom.minidom import Element
from warnings import warn
import commonmark
import recommonmark
from recommonmark.parser import CommonMarkParser
import docutils
import docutils.frontend
from ruamel.yaml import YAML
from .utils import *
leadingTabsRx = re.compile("^( *)(\\t+)")
parser = YAML(typ="safe")
def _tabbedYaml2YamlReplacer(m):
return m.group(1) + " " * len(m.group(2))
def _tabbedYaml2Yaml(text):
for l in text.splitlines():
yield leadingTabsRx.sub(_tabbedYaml2YamlReplacer, l)
def tabbedYaml2Yaml(text):
return "\n".join(_tabbedYaml2Yaml(text))
def parseYaml(text):
return parser.load(tabbedYaml2Yaml(text))
thisDir = Path(__file__).parent.absolute()
schemaFile = thisDir / "issuer.schema.yaml"
schema = parseYaml(schemaFile.read_text(encoding="utf-8"))
defaultSectionCfg = schema["properties"]["react"]["default"]
defaultRestSectionCfg = {"mustRemoveTemplate": True}
defaultCfg = {
"essentialLabels": {
"invalid": schema["defs"]["essentialLabels"]["properties"]["invalid"]["default"],
"valid": schema["defs"]["essentialLabels"]["properties"]["valid"]["default"],
"delayedAction": schema["defs"]["essentialLabels"]["properties"]["delayedAction"]["default"]
},
"messages": {
"greeting": schema["defs"]["messagesTemplates"]["properties"]["greeting"]["default"],
"issuesFixed": schema["defs"]["messagesTemplates"]["properties"]["issuesFixed"]["default"],
"issuesStillPresent": schema["defs"]["messagesTemplates"]["properties"]["issuesStillPresent"]["default"],
},
}
defaultCfg.update(defaultSectionCfg)
defaultCfg.update(defaultRestSectionCfg)
issuesTemplatesPrefix = Path(".") / ".github" / "ISSUE_TEMPLATE"
def deepMerge(src, dst, propagatingProps=None):
if propagatingProps is None:
propagatingProps = src
for n in propagatingProps:
if n not in src:
continue
v = src[n]
if n not in dst:
dst[n] = deepcopy(v)
else:
v1 = dst[n]
if isinstance(v1, dict):
deepMerge(v, v1, propagatingProps[n])
else:
dst[n] = deepcopy(v)
templateMetadataBlockRegEx = re.compile("^(-{3,})(\\r?\\n)(.+)\\2\\1\\2", re.DOTALL)
def parseTemplateMetadataBlock(t: str, fileName: Path):
m = templateMetadataBlockRegEx.match(t)
if not m:
raise ValueError("Metadata block is not detected in the issue template", fileName)
metadataBlockYamlText = m.group(3)
try:
return parser.load(metadataBlockYamlText)
except:
raise ValueError("Incorrect YAML in metadata block", fileName)
def parseConfig(path: Path = "./.github/issuer.yml"):
path = Path(path)
t = normalize(unicodeNormalization, path.read_text())
cfg = parser.load(t)
deepMerge(defaultCfg, cfg, defaultCfg)
cfg["templates"] = templatesDst = {}
for templateFile in issuesTemplatesPrefix.glob("*.md"):
templateText = templateFile.read_text(encoding="utf-8")
try:
templateMetadataYaml = parseTemplateMetadataBlock(templateText, templateFile)
except:
warn(str(templateFile) + " has invalid metadata block, skipping")
continue
if "issuer" not in templateMetadataYaml:
continue
templDtor = templateMetadataYaml["issuer"]
templ = parseMarkdown(templateText)
templDtor["file"] = templateFile
templateLabels = templateMetadataYaml["labels"]
if isinstance(templateLabels, str):
issueLabel = templateLabels
elif isinstance(templateLabels, list):
issueLabel = templateLabels[0]
if issueLabel in templatesDst:
raise KeyError("Conflicting labels: " + str(templateFile) + ", " + str(templatesDst[issueLabel]["file"]))
templatesDst[issueLabel] = templDtor
deepMerge(cfg, templDtor, defaultCfg)
secsDtors = templDtor["cbxSections"]
checkboxesLabelsAll = set()
restSections, cbxses = parseCheckboxedTemplate(templ, checkboxSectionsNames=secsDtors)
for sn, cbxses in cbxses.items():
secDtor = secsDtors[sn]
allowed = set(cbxses)
secDtor["allowed"] = allowed
checkboxesLabelsAll |= allowed
if "min" not in secDtor or secDtor["min"] is None:
secDtor["min"] = 0
if "max" not in secDtor or secDtor["max"] is None:
secDtor["max"] = inf
deepMerge(templDtor, secDtor, defaultSectionCfg)
if "restSections" not in templDtor:
templDtor["restSections"] = rSDtor = {}
else:
rSDtor = templDtor["restSections"]
for sn, s in restSections.items():
if sn not in rSDtor:
rSDtor[sn] = sD = {}
else:
sD = rSDtor[sn]
sD["default"] = linesSet(node2text(s))
deepMerge(templDtor, sD, defaultRestSectionCfg)
templDtor["checkboxesLabelsAll"] = checkboxesLabelsAll
return cfg
def parseMarkdown(t: typing.Union[str, Path]):
if isinstance(t, Path):
t = t.read_text(encoding="utf-8")
s = docutils.frontend.OptionParser(components=(CommonMarkParser,)).get_default_values()
p = CommonMarkParser()
doc = docutils.utils.new_document(None, s)
p.parse(t, doc)
parsedDocs = doc.asdom()
doc = next(iter(parsedDocs.childNodes))
return doc
def getTextFromNodes(node):
if node.nodeType == node.TEXT_NODE:
yield node.data
else:
for cn in node.childNodes:
yield from getTextFromNodes(cn)
def node2text(node):
return "".join(getTextFromNodes(node))
def getSections(doc):
res = {}
for s in doc.getElementsByTagName("section"):
titleCand = s.firstChild
if titleCand.tagName == "title":
if s.getElementsByTagName("section"):
continue
name = node2text(titleCand).strip()
res[name] = s
return res
class Checkbox:
__slots__ = ("name", "checked", "desc", "pNode", "liNode")
def __init__(self, name, checked, desc, liNode, pNode):
self.name = name
self.checked = checked
self.desc = desc
self.liNode = liNode
self.pNode = pNode
def __repr__(self):
return self.__class__.__name__ + "<" + ", ".join(repr(getattr(self, k)) for k in self.__class__.__slots__[:3]) + ">"
cbxRx = re.compile("^\\[([ xXvV])\\]\\s+(.+)$")
def extractCheckboxes(sec):
res = {}
lists = list(sec.getElementsByTagName("bullet_list"))
if len(lists) != 1:
raise ValueError()
l = lists[0]
for i in l.getElementsByTagName("list_item"):
if len(i.childNodes) != 1:
raise ValueError()
p = i.firstChild
if p.tagName != "paragraph":
raise ValueError()
v = node2text(p)
m = cbxRx.match(v)
if not m:
raise ValueError()
c, lbl = m.groups()
cbxV = c != " "
lblSpl = lbl.split(" - ", 1)
k = lblSpl[0].strip()
if len(lblSpl) > 1:
desc = lblSpl[1].strip()
else:
desc = None
res[k] = Checkbox(k, cbxV, desc, i, p)
return res
def separateCheckboxSections(sections, checkboxSectionsNames):
res = {}
for cbxSName in checkboxSectionsNames:
res[cbxSName] = extractCheckboxes(sections[cbxSName])
del sections[cbxSName]
return res
def parseCheckboxedTemplate(src: typing.Union[str, Path, Element], checkboxSectionsNames: typing.Iterable[str]):
if not isinstance(src, Element):
doc = parseMarkdown(src)
else:
doc = src
sections = getSections(doc)
checkboxesSections = separateCheckboxSections(sections, checkboxSectionsNames)
return sections, checkboxesSections
|
the-stack_0_19415 | nota1 = float(input('Primeira nota: '))
nota2 = float(input('Segunda nota: '))
média = (nota1 + nota2) / 2
print('Com as notas {:.1f} e {:.1f} você obteve média {:.1f}.'.format(nota1, nota2, média))
if média >= 7:
print('Sua média foi maior do que 7.0. Parabéns, você está APROVADO.')
elif 7 > média >= 5:
print('Sua média foi abaixo de 7.0. Você está em RECUPERAÇÃO.')
elif média < 5:
print('Sua média foi muito baixa. Estude mais! Você está REPROVADO.')
|
the-stack_0_19417 | """Module executing integration tests against certbot with nginx plugin."""
import os
import ssl
import pytest
from certbot_integration_tests.nginx_tests import context as nginx_context
@pytest.fixture()
def context(request):
# Fixture request is a built-in pytest fixture describing current test request.
integration_test_context = nginx_context.IntegrationTestsContext(request)
try:
yield integration_test_context
finally:
integration_test_context.cleanup()
@pytest.mark.parametrize('certname_pattern, params, context', [
('nginx.{0}.wtf', ['run'], {'default_server': True}),
('nginx2.{0}.wtf', ['--preferred-challenges', 'http'], {'default_server': True}),
# Overlapping location block and server-block-level return 301
('nginx3.{0}.wtf', ['--preferred-challenges', 'http'], {'default_server': True}),
# No matching server block; default_server exists
('nginx4.{0}.wtf', ['--preferred-challenges', 'http'], {'default_server': True}),
# No matching server block; default_server does not exist
('nginx5.{0}.wtf', ['--preferred-challenges', 'http'], {'default_server': False}),
# Multiple domains, mix of matching and not
('nginx6.{0}.wtf,nginx7.{0}.wtf', ['--preferred-challenges', 'http'], {'default_server': False}),
], indirect=['context'])
def test_certificate_deployment(certname_pattern, params, context):
# type: (str, list, nginx_context.IntegrationTestsContext) -> None
"""
Test various scenarios to deploy a certificate to nginx using certbot.
"""
domains = certname_pattern.format(context.worker_id)
command = ['--domains', domains]
command.extend(params)
context.certbot_test_nginx(command)
lineage = domains.split(',')[0]
server_cert = ssl.get_server_certificate(('localhost', context.tls_alpn_01_port))
with open(os.path.join(context.workspace, 'conf/live/{0}/cert.pem'.format(lineage)), 'r') as file:
certbot_cert = file.read()
assert server_cert == certbot_cert
context.certbot_test_nginx(['rollback', '--checkpoints', '1'])
with open(context.nginx_config_path, 'r') as file_h:
current_nginx_config = file_h.read()
assert context.nginx_config == current_nginx_config
|
the-stack_0_19418 | from enum import Enum
class Timetable(object):
"""Represents a timetable."""
def __init__(self):
super(Timetable, self).__init__()
self.coursesList = []
self.startDate = 0
self.endDate = 0
class Course(object):
"""Represents a course within the timetable."""
def __init__(self):
super(Course, self).__init__()
self.courseSlots = []
self.courseCode = ""
self.teacher = ""
self.description = ""
class TimeSlot(object):
"""Represents a time slot for a course."""
def __init__(self):
super(TimeSlot, self).__init__()
self.slotType = ""
self.day = 0
self.room = ""
self.timeStart = 0
self.timeEnd = 0
class SlotType(Enum):
"""Type of time slot the course's time slot may have."""
none = 0
lecture = 1,
lab = 2,
exam = 3
|
the-stack_0_19419 | #!/bin/env python3
# Author: ph-u
# Script: sysargv.py
# Desc: get familiarize with system arguments in python scripts
# Input: python3 sysargv.py <var_1> <var_2> <var_3> ...
# Output: three-lined python interpreter output
# Arguments: variable
# Date: Oct 2019
"""get familiarize with system arguments in python scripts"""
__appname__="sysargv.py"
__author__="ph-u"
__version__="0.0.1"
__license__="None"
import sys
print("This is the name of the script: ",sys.argv[0])
print("Name of arguments: ",len(sys.argv))
print("The arguments are: ",str(sys.argv))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.