filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_19591 | __author__ = 'Neil Butcher'
from PyQt4 import QtCore, QtGui
from widget_duration import SingleDurationWidget
from Rota_System.UI.widget_addDel_combo import AddDelComboWidget
from model_durations import DurationsModel
class DurationsWidget(QtGui.QWidget):
commandIssued = QtCore.pyqtSignal(QtGui.QUndoCommand)
criticalCommandIssued = QtCore.pyqtSignal()
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.layout = QtGui.QVBoxLayout(self)
self.comboWidget = AddDelComboWidget(self)
self.layout.addWidget(self.comboWidget)
self.singleDurationWidget = SingleDurationWidget(self)
self.layout.addWidget(self.singleDurationWidget)
self.addComandContributer(self.singleDurationWidget)
self.comboWidget.objectSelected.connect(self.singleDurationWidget.setDuration)
def setPopulationModel(self, model):
self.singleDurationWidget.setPopulationModel(model)
def setInstitution(self, institution):
m = DurationsModel(institution)
self._set_model(m)
def _set_model(self, model):
self.comboWidget.setModel(model)
if len(model.durations) > 0:
self.singleDurationWidget.setDuration(model.durations[0])
self.addComandContributer(model)
@QtCore.pyqtSlot(QtGui.QUndoCommand)
def emitCommand(self, command):
self.commandIssued.emit(command)
@QtCore.pyqtSlot()
def emitCriticalCommand(self):
self.criticalCommandIssued.emit()
def addComandContributer(self, otherModel):
otherModel.commandIssued.connect(self.emitCommand)
otherModel.criticalCommandIssued.connect(self.emitCriticalCommand)
import sys
from Rota_System.Roles import Role, GlobalRoleList
from Rota_System.Institution import Institution
from Rota_System.UI.model_undo import MasterUndoModel
from Rota_System.UI.People.model_population import PopulationModel
def main():
GlobalRoleList.add_role(Role('Baker', 'B', 2))
GlobalRoleList.add_role(Role('Singer', 'S', 9))
GlobalRoleList.add_role(Role('Fisherman', 'F', 7))
m = MasterUndoModel()
i = Institution(None)
p = PopulationModel(i)
app = QtGui.QApplication(sys.argv)
w = DurationsWidget(None)
w.setPopulationModel(p)
w.setInstitution(i)
m.add_command_contributer(w)
w.show()
v = QtGui.QUndoView(None)
v.setStack(m.undoStack)
v.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main() |
the-stack_0_19594 | import ast, astunparse, copy
from polyrec.witnesstuples import WitnessTuple
class AnalyzeTreeReads(ast.NodeVisitor):
def __init__(self):
self.treereads = set([])
def visit_Attribute(self, node: ast.Attribute):
if node.attr == 'l' or node.attr == 'r':
self.treereads.add(node.attr)
self.generic_visit(node)
class AnalyzeArrayReads(ast.NodeVisitor):
def __init__(self):
self.arrayreads = set([])
def visit_Subscript(self, node: ast.Subscript):
if isinstance(node.slice, ast.Index):
if isinstance(node.slice.value, ast.BinOp):
if isinstance(node.slice.value.op, ast.Add):
self.arrayreads.add(node.slice.value.right.n)
elif isinstance(node.slice.value.op, ast.Sub):
self.arrayreads.add(-node.slice.value.right.n)
if isinstance(node.slice.value, ast.Name):
self.arrayreads.add(0)
class AnalyzeTreeWrites(ast.NodeVisitor):
def __init__(self):
self.treewrites = set([])
def visit_Attribute(self, node: ast.Attribute):
if node.attr == 'l' or node.attr == 'r':
self.treewrites.add(node.attr)
self.generic_visit(node)
class AnalyzeArrayWrites(ast.NodeVisitor):
def __init__(self):
self.arraywrites = set([])
def visit_Subscript(self, node: ast.Subscript):
if isinstance(node.slice, ast.Index):
if isinstance(node.slice.value, ast.BinOp):
if isinstance(node.slice.value.op, ast.Add):
self.arraywrites.add(node.slice.value.right.n)
elif isinstance(node.slice.value.op, ast.Sub):
self.arraywrites.add(-node.slice.value.right.n)
if isinstance(node.slice.value, ast.Name):
self.arraywrites.add(0)
class AnalyzeSelfCall(ast.NodeVisitor):
def __init__(self, fname: str):
self.fname = fname
self.rcall = 0
def visit_Call(self, node: ast.Call):
if node.func.id == self.fname:
self.rcall += 1
class AnalyzeInductionVar(ast.NodeVisitor):
def __init__(self, tags: list):
self.tags = tags
self.indvars = {}
def visit_arguments(self, node: ast.arguments):
assert len(self.tags) == len(node.args)
self.indvars = dict(zip(self.tags, copy.deepcopy(node.args)))
class AnalyzeCollection(ast.NodeVisitor):
def __init__(self):
self.functions = {}
self.dims = 0
def visit_FunctionDef(self, node: ast.FunctionDef):
self.dims += 1
self.functions[self.dims] = node
class AnalyzeFunction(ast.NodeVisitor):
def __init__(self, dim: int, fname: str, loop: bool):
self.dim = dim
self.fname = fname
self.loop = loop # true or false
self.alp = ['e']
self.ord = ['e']
self.guard = {} # label: g<dim>
self.rcall = {} # label: r<dim><label>
self.tcall = {} # label: t<dim>
self.work = {} # label: s1
def visit_If(self, node: ast.If):
if isinstance(node.body[0], ast.Return):
self.guard['g'+str(self.dim)] = copy.deepcopy(node.test)
def visit_Call(self, node: ast.Call):
if node.func.id == self.fname:
if self.loop:
label = "r"+str(self.dim)
self.ord.append(label)
self.rcall[label] = copy.deepcopy(node)
else:
if isinstance(node.args[self.dim-1], ast.Attribute):
label = "r"+str(self.dim)+node.args[self.dim-1].attr
self.ord.append(label)
self.rcall[label] = copy.deepcopy(node)
else:
label = "t"+str(self.dim)
self.ord.append(label)
self.tcall[label] = copy.deepcopy(node)
def visit_Assign(self, node: ast.Assign):
self.ord.append('s1')
self.work['s1'] = copy.deepcopy(node)
def set_alp(self):
rec = []
trs = []
for s in self.ord:
if s[0] == 'r':
rec.append(s)
elif s[0] == 't' or s[0] == 's':
trs.append(s)
self.alp = self.alp + rec + trs
def constructf(self):
ret_node = ast.FunctionDef(name=self.fname, args=[],
decorator_list=[], returns=ast.NameConstant(None))
body_node = [ast.If(test=copy.deepcopy(self.guard['g'+str(self.dim)]),
body=[ast.Return(None)], orelse=[])]
for label in self.ord[1:]:
if label[0] == "t":
body_node.append(ast.Expr(copy.deepcopy(self.tcall[label])))
elif label[0] == "r":
body_node.append(ast.Expr(copy.deepcopy(self.rcall[label])))
else:
body_node.append(copy.deepcopy(self.work[label]))
ret_node.body = body_node
return ret_node
def cgen(self):
body = []
bound = astunparse.unparse(self.guard['g'+str(self.dim)]).strip().replace('or', '||').rstrip()
termination = "if " + bound + "{\nreturn;\n}"
body.append(termination)
for label in self.ord[1:]:
if label[0] == "t":
s = astunparse.unparse(ast.Expr(self.tcall[label])).rstrip()
s += ';'
body.append(s)
elif label[0] == "r":
s = astunparse.unparse(ast.Expr(self.rcall[label])).rstrip()
s += ';'
body.append(s)
else:
s = astunparse.unparse(ast.Expr(self.work[label])).replace('.', '->').rstrip()
s += ';'
body.append(s)
return body
class Analyze:
def __init__(self, tree):
self.tree = tree # module tree
self.dims = 0 # dimensions
self.indvars = {} # induction variables
self.representation = {} # map: dimension -> function reps
self.deps = set([])
def collect(self):
collectionWalk = AnalyzeCollection()
collectionWalk.visit(self.tree)
self.dims = collectionWalk.dims
functions = collectionWalk.functions
indvarWalk = AnalyzeInductionVar(range(1, self.dims+1))
indvarWalk.visit(functions[1])
self.indvars = indvarWalk.indvars
for i in range(1, self.dims+1):
self.func(i, functions[i])
def func(self, dim: int, node: ast.FunctionDef):
rcallWalk = AnalyzeSelfCall(node.name)
rcallWalk.visit(node)
loop = (rcallWalk.rcall == 1)
funcWalk = AnalyzeFunction(dim, node.name, loop)
funcWalk.visit(node)
funcWalk.set_alp()
self.representation[dim] = funcWalk
def getdim(self):
return self.dims
def getdimtype(self):
dim = self.dims
dim_type = []
for f in range(1, dim+1):
dim_type.append(len(self.representation[f].rcall))
return dim_type
def getord(self):
dim = self.dims
order = []
for f in range(1, dim+1):
order.append(self.representation[f].ord)
return order
def getalp(self):
dim = self.dims
alph = []
for f in range(1, dim+1):
alph.append(self.representation[f].alp)
return alph
def getindvar(self):
dim = self.dims
indvar = []
for f in range(1, dim+1):
indvar.append(self.indvars[f].arg)
return indvar
def constructfs(self):
dims = self.dims
args = []
for d in range(1, self.dims+1):
args.append(self.indvars[d])
fs = []
for t in range(1, dims+1):
fnode = self.representation[t].constructf()
fnode.args = ast.arguments(args=args, vararg=None,
kwonlyargs=[], kw_defaults=[],
kwarg=None, defaults=[])
fs.append(fnode)
return fs
def codegen(self):
s = ""
for f in self.constructfs():
s += astunparse.unparse(f)
return s
def cgen(self):
dims = self.dims
args = self.getindvar()
s_arg = []
for d, arg in enumerate(args):
if self.representation[d+1].loop:
s_arg.append("int " + arg)
else:
s_arg.append("Node * " + arg)
ss_arg = s_arg[0]
for s in s_arg[1:]:
ss_arg += ", " + s
s = ''
for d in range(1, dims+1):
sfunc = "void " + self.representation[d].fname + "(" + ss_arg + "){\n"
stmts = self.representation[d].cgen()
for st in stmts:
sfunc += "" + st + "\n"
sfunc += "}\n\n"
s += sfunc
return s
def depanalyze(self):
dims = self.dims
stmt = self.representation[dims].work['s1']
# print(ast.dump(stmt))
# Reads
treads = AnalyzeTreeReads()
areads = AnalyzeArrayReads()
treads.visit(stmt.value)
areads.visit(stmt.value)
# Writes
twrites = AnalyzeTreeWrites()
awrites = AnalyzeArrayWrites()
for t in stmt.targets:
twrites.visit(t)
awrites.visit(t)
#print("read array: ", areads.arrayreads)
#print("read tree: ", treads.treereads)
#print("write array: ", awrites.arraywrites)
#print("write tree: ", twrites.treewrites)
for x in areads.arrayreads:
if x != 0 and x > 0:
rgx1 = [['t1'],['s1']]
rgx2 = [['t1'],['s1']]
if 0 in areads.arrayreads and 0 in awrites.arraywrites:
rgx2[0].insert(0, '(r1)*')
for _ in range(x):
rgx2[0].insert(0, 'r1')
wt = WitnessTuple(self.getdim(), self.getdimtype(), self.getalp(), self.getord(), rgx1, rgx2)
wt.set_fsa()
self.deps.add(wt)
for l in treads.treereads:
rgx1 = [['t1'],['s1']]
rgx2 = [['t1'],['s1']]
for r in self.representation[2].rcall:
if r[2:] == l:
if self.representation[2].ord.index(r) < self.representation[2].ord.index('s1'):
rgx1[1].insert(0, r)
else:
rgx2[1].insert(0, r)
wt = WitnessTuple(self.getdim(), self.getdimtype(), self.getalp(), self.getord(), rgx1, rgx2)
wt.set_fsa()
self.deps.add(wt)
def getdeps(self):
ret = []
for i, dep in enumerate(self.deps):
ret.append((i, dep.regex1, dep.regex2))
return ret
if __name__ == "__main__":
with open("examples/sources/loop-rec.py", "r") as source:
tree = ast.parse(source.read())
analyze = Analyze(tree)
analyze.collect()
print(analyze.getdim())
print(analyze.getdimtype())
print(analyze.getalp())
print(analyze.getord())
print(analyze.getindvar())
print(analyze.codegen())
#print(analyze.cgen())
analyze.depanalyze()
print(analyze.getdeps())
|
the-stack_0_19596 | import sys
sys.path.insert(0, "..")
from pyfirmata import ArduinoMega, util
from core.utils import detect_arduino_usb_serial
from core.sensor_processing.sampling import mean_sample
from settings import PH_SENSOR_PIN, TDS_SENSOR_PIN, WATER_LEVEL_PIN, NUTRIENT_LEVEL, PH_DOWNER_LEVEL_PIN
from pprint import pprint
print(f"[UART][INFO] Connecting Arduino Mega Board ...")
board = ArduinoMega(detect_arduino_usb_serial())
it = util.Iterator(board)
it.start()
for pin in [PH_SENSOR_PIN, TDS_SENSOR_PIN, WATER_LEVEL_PIN, NUTRIENT_LEVEL, PH_DOWNER_LEVEL_PIN]:
board.analog[pin].enable_reporting()
print(f"[UART][OK] Connected to this board: {board}")
def read_tds(board: ArduinoMega, _pipeline_dict: dict, _temperature=25) -> dict:
d = mean_sample(board=board, pin=TDS_SENSOR_PIN)
# temperature compensation formula: fFinalResult(25^C) = fFinalResult(current)/(1.0+0.02*(fTP-25.0));
temperature_compensation = 1.0 + 0.02 * (_temperature - 25.0)
# temperature compensation
voltage_compensation = d["voltage"] / temperature_compensation
tds = (
133.42 * voltage_compensation * voltage_compensation * voltage_compensation
- 255.86 * voltage_compensation * voltage_compensation
+ 857.39 * voltage_compensation
) * 0.5 # convert voltage value to tds value
_pipeline_dict["tds_average_adc"] = d["adc"]
_pipeline_dict["tds_average_voltage"] = d["voltage"]
_pipeline_dict["tds_average_ppm"] = tds
return _pipeline_dict
while True:
d = {}
pprint(read_tds(board, d))
|
the-stack_0_19597 | from warnings import warn
from tqdm import tqdm
import torch
import numpy as np
from torch.optim import Adam
from torch.optim.lr_scheduler import StepLR
from torch.nn import MSELoss
from odl.contrib.torch import OperatorModule
from dival import IterativeReconstructor
from dliplib.utils.losses import poisson_loss, tv_loss
from dliplib.utils.models import get_skip_model
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
MIN = -1000
MAX = 1000
class DeepImagePriorReconstructor(IterativeReconstructor):
HYPER_PARAMS = {
'lr':
{'default': 1e-3,
'range': [1e-5, 1e-1]},
'gamma':
{'default': 1e-4,
'range': [1e-7, 1e-0],
'grid_search_options': {'num_samples': 20}},
'scales':
{'default': 4,
'choices': [3, 4, 5, 6, 7]},
'channels':
{'default': [128] * 5},
'skip_channels':
{'default': [4] * 5},
'iterations':
{'default': 5000,
'range': [1, 50000]},
'loss_function':
{'default': 'mse',
'choices': ['mse', 'poisson']}
}
"""
Deep Image Prior reconstructor similar to the one introduced in (cf. [1])
References
----------
.. [1] V. Lempitsky, A. Vedaldi, and D. Ulyanov, 2018,
"Deep Image Prior".
IEEE/CVF Conference on Computer Vision and Pattern Recognition.
`doi:10.1109/CVPR.2018.00984
<https://doi.org/10.1109/CVPR.2018.00984>`_
"""
def __init__(self, ray_trafo, hyper_params=None, callback=None,
callback_func=None, callback_func_interval=100, **kwargs):
"""
Parameters
----------
ray_trafo : `odl.tomo.operators.RayTransform`
The forward operator
"""
super().__init__(
reco_space=ray_trafo.domain, observation_space=ray_trafo.range,
hyper_params=hyper_params, callback=callback, **kwargs)
self.callback_func = callback_func
self.ray_trafo = ray_trafo
self.ray_trafo_module = OperatorModule(self.ray_trafo)
self.domain_shape = ray_trafo.domain.shape
self.callback_func = callback_func
self.callback_func_interval = callback_func_interval
def get_activation(self, layer_index):
return self.model.layer_output(self.net_input, layer_index)
def _reconstruct(self, observation, *args, **kwargs):
torch.random.manual_seed(10)
lr = self.hyper_params['lr']
gamma = self.hyper_params['gamma']
scales = self.hyper_params['scales']
channels = self.hyper_params['channels']
iterations = self.hyper_params['iterations']
skip_channels = self.hyper_params['skip_channels']
loss_function = self.hyper_params['loss_function']
output_depth = 1
input_depth = 1
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.net_input = 0.1 * \
torch.randn(input_depth, *self.reco_space.shape)[None].to(device)
self.model = get_skip_model(
input_depth,
output_depth,
channels=channels[:scales],
skip_channels=skip_channels[:scales]).to(device)
self.optimizer = Adam(self.model.parameters(), lr=lr)
y_delta = torch.tensor(observation.asarray(), dtype=torch.float32)
y_delta = y_delta.view(1, 1, *y_delta.shape)
y_delta = y_delta.to(device)
if loss_function == 'mse':
criterion = MSELoss()
elif loss_function == 'poisson':
criterion = poisson_loss
else:
warn('Unknown loss function, falling back to MSE')
criterion = MSELoss()
best_loss = np.infty
best_output = self.model(self.net_input).detach()
# scheduler = StepLR(self.optimizer, 500, 0.5)
for i in tqdm(range(iterations), total=iterations):
self.optimizer.zero_grad()
output = self.model(self.net_input)
loss = criterion(self.ray_trafo_module(output),
y_delta) + gamma * tv_loss(output)
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1)
self.optimizer.step()
# scheduler.step()
for p in self.model.parameters():
p.data.clamp_(MIN, MAX)
if loss.item() < best_loss:
best_loss = loss.item()
best_output = output.detach()
if (i % self.callback_func_interval == 0 or i == iterations-1) and self.callback_func is not None:
self.callback_func(
iteration=i, reconstruction=best_output[0, 0, ...].cpu().numpy(), loss=best_loss)
if self.callback is not None:
self.callback(self.reco_space.element(
best_output[0, 0, ...].cpu().numpy()))
return self.reco_space.element(best_output[0, 0, ...].cpu().numpy())
|
the-stack_0_19599 | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class LocalName:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'en_us': 'str',
'zh_cn': 'str'
}
attribute_map = {
'en_us': 'en_us',
'zh_cn': 'zh_cn'
}
def __init__(self, en_us=None, zh_cn=None):
"""LocalName - a model defined in huaweicloud sdk"""
self._en_us = None
self._zh_cn = None
self.discriminator = None
if en_us is not None:
self.en_us = en_us
if zh_cn is not None:
self.zh_cn = zh_cn
@property
def en_us(self):
"""Gets the en_us of this LocalName.
可用区英文名称。
:return: The en_us of this LocalName.
:rtype: str
"""
return self._en_us
@en_us.setter
def en_us(self, en_us):
"""Sets the en_us of this LocalName.
可用区英文名称。
:param en_us: The en_us of this LocalName.
:type: str
"""
self._en_us = en_us
@property
def zh_cn(self):
"""Gets the zh_cn of this LocalName.
可用区中文名称。
:return: The zh_cn of this LocalName.
:rtype: str
"""
return self._zh_cn
@zh_cn.setter
def zh_cn(self, zh_cn):
"""Sets the zh_cn of this LocalName.
可用区中文名称。
:param zh_cn: The zh_cn of this LocalName.
:type: str
"""
self._zh_cn = zh_cn
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LocalName):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_19600 | import typing
from typing import AbstractSet
from ..datalog.instance import MapInstance
from ..expressions import Constant
from ..logic import Implication
from .cplogic.program import CPLogicProgram
from .expression_processing import (
construct_within_language_succ_result,
is_within_language_prob_query,
within_language_succ_query_to_intensional_rule,
)
from .expressions import Condition
def _solve_within_language_prob_query(
cpl: CPLogicProgram,
rule: Implication,
succ_prob_solver: typing.Callable,
marg_prob_solver: typing.Callable,
) -> Constant[AbstractSet]:
query = within_language_succ_query_to_intensional_rule(rule)
if isinstance(rule.antecedent, Condition):
provset = marg_prob_solver(query, cpl)
else:
provset = succ_prob_solver(query, cpl)
relation = construct_within_language_succ_result(provset, rule)
return relation
def _solve_for_probabilistic_rule(
cpl: CPLogicProgram,
rule: Implication,
succ_prob_solver: typing.Callable,
):
provset = succ_prob_solver(rule, cpl)
relation = Constant[AbstractSet](
provset.value,
auto_infer_type=False,
verify_type=False,
)
return relation
def compute_probabilistic_solution(
det_edb,
pfact_edb,
pchoice_edb,
prob_idb,
succ_prob_solver,
marg_prob_solver,
):
solution = MapInstance()
cpl = _build_probabilistic_program(
det_edb, pfact_edb, pchoice_edb, prob_idb
)
for rule in prob_idb.formulas:
if is_within_language_prob_query(rule):
relation = _solve_within_language_prob_query(
cpl, rule, succ_prob_solver, marg_prob_solver
)
else:
relation = _solve_for_probabilistic_rule(
cpl, rule, succ_prob_solver
)
solution[rule.consequent.functor] = Constant[AbstractSet](
relation.value.to_unnamed()
)
return solution
def _build_probabilistic_program(det_edb, pfact_edb, pchoice_edb, prob_idb):
cpl = CPLogicProgram()
db_to_add_fun = [
(det_edb, cpl.add_extensional_predicate_from_tuples),
(pfact_edb, cpl.add_probabilistic_facts_from_tuples),
(pchoice_edb, cpl.add_probabilistic_choice_from_tuples),
]
for database, add_fun in db_to_add_fun:
for pred_symb, ra_set in database.items():
add_fun(pred_symb, ra_set.value.unwrap())
cpl.walk(prob_idb)
return cpl
|
the-stack_0_19601 | import os
import numpy as np
columns = [('image', object), ('label', int),
('x', float), ('y', float), ('z', float),
('rx', float), ('ry', float), ('rz', float),
('timestamp', object)]
def base_datasetname(dir, year_base, year_curr, offset_base, offset_curr):
return dir + "basepos-{0}-{1}-{2}m-{3}m.txt".format(year_base, year_curr, offset_base, offset_curr)
def curr_datasetname(dir, year_base, year_curr, offset_base, offset_curr):
return dir + "livepos-{0}-{1}-{2}m-{3}m.txt".format(year_base, year_curr, offset_base, offset_curr)
def save_dataset(data, dataset):
sample_file = open(dataset, "w")
sample_file.write("image label x y z rx ry rz timestamp\n")
for i in range(len(data)):
sample_file.write("{0} {1} {2} {3} {4} {5} {6} {7} {8}\n".format(
data['image'][i], data['label'][i],
data['x'][i], data['y'][i], data['z'][i],
data['rx'][i], data['ry'][i], data['rz'][i],
data['timestamp'][i])
)
sample_file.close()
def concat_dataset(output_dir, trainfile, basefile, datasets, offset_base, offset_curr):
datasetname_out = curr_datasetname(output_dir, trainfile, basefile, offset_base, offset_curr)
for i in range(0, len(datasets)):
datasetname_in = curr_datasetname(output_dir,basefile, datasets[i], offset_base, offset_curr)
data_base = np.genfromtxt(datasetname_in, delimiter=' ', names=True, dtype=np.dtype(columns))
if i == 0:
data_all = data_base
else:
data_all = np.concatenate((data_all, data_base))
data_all = np.sort(data_all, axis=0, order=['label', 'timestamp'])
save_dataset(data_all, datasetname_out)
if __name__ == '__main__':
input_dir = '/dados/baidu/camerapos/IDA/'
output_dir = '/dados/baidu/camerapos/IDA/'
offset_base = 5
offset_curr = 5
#os.system('rm -rf ' + output_dir + '*')
datasets = ['front-20190918143332', 'front-20190924124848','front-20191014142530','front-20191021162130','front-20191025104732','front-20191130112819']
dataset_valid = 'front-20191216123346'
dataset_test = 'front-20191225153609'
dataset_name = 'BAIDU-TRAIN-LAP'
valid_name = 'BAIDU-VALID-LAP'
test_name = 'BAIDU-TEST-LAP'
concat_dataset(output_dir, dataset_name, 'BASE', datasets, offset_base, offset_curr)
os.system('cp ' + curr_datasetname(output_dir, 'BASE', dataset_test, offset_base, offset_curr) + ' ' + curr_datasetname(output_dir, test_name, dataset_test, offset_base, offset_curr))
os.system('cp ' + curr_datasetname(output_dir, 'BASE', dataset_valid, offset_base, offset_curr) + ' ' + curr_datasetname(output_dir, valid_name, dataset_valid, offset_base, offset_curr))
|
the-stack_0_19602 | """ Tests for zmq shell / display publisher. """
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import unittest
from queue import Queue
from threading import Thread
import zmq
from jupyter_client.session import Session
from traitlets import Int
from ipykernel.zmqshell import ZMQDisplayPublisher
class NoReturnDisplayHook:
"""
A dummy DisplayHook which allows us to monitor
the number of times an object is called, but which
does *not* return a message when it is called.
"""
call_count = 0
def __call__(self, obj):
self.call_count += 1
class ReturnDisplayHook(NoReturnDisplayHook):
"""
A dummy DisplayHook with the same counting ability
as its base class, but which also returns the same
message when it is called.
"""
def __call__(self, obj):
super().__call__(obj)
return obj
class CounterSession(Session):
"""
This is a simple subclass to allow us to count
the calls made to the session object by the display
publisher.
"""
send_count = Int(0)
def send(self, *args, **kwargs):
"""
A trivial override to just augment the existing call
with an increment to the send counter.
"""
self.send_count += 1
super().send(*args, **kwargs)
class ZMQDisplayPublisherTests(unittest.TestCase):
"""
Tests the ZMQDisplayPublisher in zmqshell.py
"""
def setUp(self):
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PUB)
self.session = CounterSession()
self.disp_pub = ZMQDisplayPublisher(session=self.session, pub_socket=self.socket)
def tearDown(self):
"""
We need to close the socket in order to proceed with the
tests.
TODO - There is still an open file handler to '/dev/null',
presumably created by zmq.
"""
self.disp_pub.clear_output()
self.socket.close()
self.context.term()
def test_display_publisher_creation(self):
"""
Since there's no explicit constructor, here we confirm
that keyword args get assigned correctly, and override
the defaults.
"""
assert self.disp_pub.session == self.session
assert self.disp_pub.pub_socket == self.socket
def test_thread_local_hooks(self):
"""
Confirms that the thread_local attribute is correctly
initialised with an empty list for the display hooks
"""
assert self.disp_pub._hooks == []
def hook(msg):
return msg
self.disp_pub.register_hook(hook)
assert self.disp_pub._hooks == [hook]
q = Queue()
def set_thread_hooks():
q.put(self.disp_pub._hooks)
t = Thread(target=set_thread_hooks)
t.start()
thread_hooks = q.get(timeout=10)
assert thread_hooks == []
def test_publish(self):
"""
Publish should prepare the message and eventually call
`send` by default.
"""
data = dict(a=1)
assert self.session.send_count == 0
self.disp_pub.publish(data)
assert self.session.send_count == 1
def test_display_hook_halts_send(self):
"""
If a hook is installed, and on calling the object
it does *not* return a message, then we assume that
the message has been consumed, and should not be
processed (`sent`) in the normal manner.
"""
data = dict(a=1)
hook = NoReturnDisplayHook()
self.disp_pub.register_hook(hook)
assert hook.call_count == 0
assert self.session.send_count == 0
self.disp_pub.publish(data)
assert hook.call_count == 1
assert self.session.send_count == 0
def test_display_hook_return_calls_send(self):
"""
If a hook is installed and on calling the object
it returns a new message, then we assume that this
is just a message transformation, and the message
should be sent in the usual manner.
"""
data = dict(a=1)
hook = ReturnDisplayHook()
self.disp_pub.register_hook(hook)
assert hook.call_count == 0
assert self.session.send_count == 0
self.disp_pub.publish(data)
assert hook.call_count == 1
assert self.session.send_count == 1
def test_unregister_hook(self):
"""
Once a hook is unregistered, it should not be called
during `publish`.
"""
data = dict(a=1)
hook = NoReturnDisplayHook()
self.disp_pub.register_hook(hook)
assert hook.call_count == 0
assert self.session.send_count == 0
self.disp_pub.publish(data)
assert hook.call_count == 1
assert self.session.send_count == 0
#
# After unregistering the `NoReturn` hook, any calls
# to publish should *not* got through the DisplayHook,
# but should instead hit the usual `session.send` call
# at the end.
#
# As a result, the hook call count should *not* increase,
# but the session send count *should* increase.
#
first = self.disp_pub.unregister_hook(hook)
self.disp_pub.publish(data)
self.assertTrue(first)
assert hook.call_count == 1
assert self.session.send_count == 1
#
# If a hook is not installed, `unregister_hook`
# should return false.
#
second = self.disp_pub.unregister_hook(hook)
self.assertFalse(second)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_19603 | import codecs
from collections import defaultdict
import csv
from django.conf import settings
from django.contrib.admin.models import LogEntry, CHANGE
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.management.base import BaseCommand
import progressbar
import pymarc
from mep.books.models import Work
from mep.books.oclc import SRUSearch
class Command(BaseCommand):
"""Associate library items with OCLC entries via WordCat Search API"""
help = __doc__
mode = None
sru_search = None
#: fields to be included in CSV export
csv_fieldnames = [
# details from local db
'Title', 'Date', 'Creators',
# details from OCLC
'OCLC Title', 'OCLC Author', 'OCLC Date', 'OCLC URI',
'Work URI', '# matches',
# db notes last
'Notes']
#: summary message string for each mode
summary_message = {
'report': 'Processed %(count)d works, found matches for %(found)d',
'update': 'Processed %(count)d works, updated %(updated)d, no matches for %(no_match)d, %(error)d error(s)',
}
progbar = None
#: notes indicator for reconciliation attempted but no match found
oclc_no_match = "OCLCNoMatch"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stats = defaultdict(int)
self.script_user = User.objects.get(username=settings.SCRIPT_USERNAME)
self.work_content_type = ContentType.objects.get_for_model(Work).pk
def add_arguments(self, parser):
parser.add_argument('mode', choices=['report', 'update'])
parser.add_argument(
'--no-progress', action='store_true',
help='Do not display progress bar')
parser.add_argument(
'-o', '--output', help='Filename for the report to be generated')
def handle(self, *args, **kwargs):
"""Loop through Works in the database and look for matches in OCLC"""
# store operating mode
self.mode = kwargs['mode']
# initialize OCLC search client
self.sru_search = SRUSearch()
# filter out works with problems that we don't expect to be
# able to match reliably
# only include works that do not already have a work URI
works = Work.objects.exclude(notes__contains='GENERIC') \
.exclude(notes__contains='PROBLEM') \
.exclude(notes__contains='OBSCURE') \
.exclude(notes__contains='ZERO') \
.exclude(notes__contains=self.oclc_no_match) \
.filter(uri__exact='') \
.exclude(title__endswith='*')
# report on total to process
total = works.count()
self.stdout.write('%d works to reconcile' % total)
# bail out if there is nothing to do
if not total:
return
if not kwargs['no_progress'] and total > 5:
self.progbar = progressbar.ProgressBar(redirect_stdout=True,
max_value=total)
if self.mode == 'report':
# use output name specified in args, with a default fallback
outfilename = kwargs.get('output', None) or 'works-oclc.csv'
self.report(works, outfilename)
elif self.mode == 'update':
self.update_works(works)
if self.progbar:
self.progbar.finish()
# summarize what was done for the current mode
self.stdout.write(self.summary_message[self.mode] % self.stats)
def tick(self):
'''Increase count by one and update progress bar if there is one'''
self.stats['count'] += 1
if self.progbar:
self.progbar.update(self.stats['count'])
def report(self, works, outfilename):
'''Generate an CSV file to report on OCLC matches found'''
with open(outfilename, 'w') as csvfile:
# write utf-8 byte order mark at the beginning of the file
csvfile.write(codecs.BOM_UTF8.decode())
# initialize csv writer
writer = csv.DictWriter(csvfile, fieldnames=self.csv_fieldnames)
writer.writeheader()
for work in works:
info = {
'Title': work.title,
'Date': work.year,
'Creators': ';'.join([str(person) for person in work.creators.all()]),
'Notes': work.notes
}
info.update(self.oclc_info(work))
writer.writerow(info)
# keep track of how many records found any matches
if info.get('# matches', None):
self.stats['found'] += 1
self.tick()
def update_works(self, works):
'''Search for Works in OCLC and update in the database if
a match is found.'''
for work in works:
error = False
log_message = None
try:
worldcat_entity = self.oclc_search_record(work)
except ConnectionError as err:
self.stderr.write('Error: %s' % err)
worldcat_entity = None
self.stats['error'] += 1
error = True
if worldcat_entity:
work.populate_from_worldcat(worldcat_entity)
work.save()
# message for log entry to document the change
log_message = 'Updated from OCLC %s' % worldcat_entity.work_uri
self.stats['updated'] += 1
# if no match was found but there was no connection error,
# make a note and log the change
elif not error:
# add no match indicator to work notes
work.notes = '\n'.join([txt for txt in (work.notes, self.oclc_no_match)
if txt])
work.save()
# message for log entry to document the change
log_message = 'No OCLC match found'
self.stats['no_match'] += 1
# create a log entry if a message was set
# (either updateor no match found)
if log_message:
LogEntry.objects.log_action(
user_id=self.script_user.id,
content_type_id=self.work_content_type,
object_id=work.pk,
object_repr=str(work),
change_message=log_message,
action_flag=CHANGE)
self.tick()
def oclc_search(self, work):
"""Search for an work in OCLC by title, author, date, and
material type if noted as a Periodical. Filters by
english language and material type not Internet Resource
(i.e. electronic edition). Returns :class:`~mep.books.oclc.SRWResponse`.
"""
search_opts = {}
# search by title if known
if work.title:
search_opts['title__exact'] = work.title
# search by first author if there is one
if work.authors:
search_opts['author__all'] = str(work.authors[0])
# search by year if known
if work.year:
search_opts['year'] = work.year
# search year by range based on first documented event for this book
else:
first_date = work.first_known_interaction
if first_date:
# range search ending with first known event date
search_opts['year'] = "-%s" % first_date.year
# filter by material type; assume work is a book unless
# notes indicate periodical
search_opts['material_type__exact'] = 'periodical' \
if 'PERIODICAL' in work.notes else 'book'
# add filters that apply to all S&co content
# restrict to english language content
# (nearly all are english, handful that are not will be handled manually)
search_opts['language_code__exact'] = 'eng'
# exclude electronic books
search_opts['material_type__notexact'] = 'Internet Resource'
return self.sru_search.search(**search_opts)
def oclc_info(self, work):
"""Search for an work in OCLC by title, author, date.
Returns dictionary with details found for inclusion in CSV.
"""
result = self.oclc_search(work)
# report number of matches so 0 is explicit/obvious
oclc_info = {'# matches': result.num_records}
if result.num_records:
# assume first record is best match (seems to be true)
marc_record = result.marc_records[0]
try:
worldcat_rdf = self.sru_search.get_worldcat_rdf(marc_record)
oclc_info.update({
'OCLC Title': marc_record.title(),
'OCLC Author': marc_record.author(),
'OCLC Date': marc_record.pubyear(),
'OCLC URI': worldcat_rdf.work_uri,
'Work URI': worldcat_rdf.work_uri
})
except ConnectionError as err:
self.stderr.write('Error: %s' % err)
return oclc_info
def oclc_search_record(self, work):
"""Search for an work in OCLC by title, author, date.
Returns :class:`~mep.books.oclc.WorldCatResource` for the first
match.'''
"""
result = self.oclc_search(work)
if result and result.num_records:
return self.sru_search.get_worldcat_rdf(result.marc_records[0])
|
the-stack_0_19604 | import pytest
from webu.utils.events import (
construct_event_topic_set,
)
EVENT_1_ABI = {
"anonymous": False,
"inputs": [
{"indexed": False, "name": "arg0", "type": "uint256"},
{"indexed": True, "name": "arg1", "type": "uint256"},
{"indexed": True, "name": "arg2", "type": "uint256"},
{"indexed": False, "name": "arg3", "type": "uint256"},
{"indexed": True, "name": "arg4", "type": "uint256"},
{"indexed": False, "name": "arg5", "type": "uint256"},
],
"name": "Event_1",
"type": "event",
}
EVENT_1_TOPIC = '0xa7144ed450ecab4a6283d3b1e290ff6c889232d922b84d88203eb7619222fb32'
def hex_and_pad(i):
unpadded_hex_value = hex(i).rstrip('L')
return '0x' + unpadded_hex_value[2:].zfill(64)
@pytest.mark.parametrize(
'event_abi,arguments,expected',
(
(
EVENT_1_ABI,
{},
[[EVENT_1_TOPIC]],
),
(
EVENT_1_ABI,
{'arg0': 1},
[[EVENT_1_TOPIC]],
),
(
EVENT_1_ABI,
{'arg0': 1, 'arg3': [1, 2]},
[[EVENT_1_TOPIC]],
),
(
EVENT_1_ABI,
{'arg1': 1},
[
[EVENT_1_TOPIC, hex_and_pad(1), None, None],
],
),
(
EVENT_1_ABI,
{'arg1': [1, 2]},
[
[EVENT_1_TOPIC, hex_and_pad(1), None, None],
[EVENT_1_TOPIC, hex_and_pad(2), None, None],
],
),
(
EVENT_1_ABI,
{'arg1': [1], 'arg2': [2]},
[
[EVENT_1_TOPIC, hex_and_pad(1), hex_and_pad(2), None],
],
),
(
EVENT_1_ABI,
{'arg1': [1, 3], 'arg2': [2, 4]},
[
[EVENT_1_TOPIC, hex_and_pad(1), hex_and_pad(2), None],
[EVENT_1_TOPIC, hex_and_pad(1), hex_and_pad(4), None],
[EVENT_1_TOPIC, hex_and_pad(3), hex_and_pad(2), None],
[EVENT_1_TOPIC, hex_and_pad(3), hex_and_pad(4), None],
],
),
)
)
def test_construct_event_topics(event_abi, arguments, expected):
actual = construct_event_topic_set(event_abi, arguments)
assert actual == expected
|
the-stack_0_19606 | # -*- coding: utf-8 -*-
import os
import sys
import xbmc
import xbmcaddon
if sys.version_info >= (2, 7):
import json
else:
import simplejson as json
# Import the common settings
from resources.lib.settings import log
from resources.lib.settings import Settings
from resources.lib.backend import TunesBackend
ADDON = xbmcaddon.Addon(id='service.tvtunes')
CWD = ADDON.getAddonInfo('path')
LIB_DIR = xbmc.translatePath(os.path.join(CWD, 'resources', 'lib'))
# Class to detect when something in the system has changed
class TvTunesMonitor(xbmc.Monitor):
def onSettingsChanged(self):
log("TvTunesMonitor: Notification of settings change received")
Settings.reloadSettings()
##################################
# Main of the TvTunes Service
##################################
if __name__ == '__main__':
log("Starting TvTunes Service %s" % ADDON.getAddonInfo('version'))
# Check if the settings mean we want to reset the volume on startup
startupVol = Settings.getStartupVolume()
if startupVol < 0:
log("TvTunesService: No Volume Change Required")
else:
log("TvTunesService: Setting volume to %s" % startupVol)
executebuiltin('SetVolume(%d)' % startupVol, True)
# Make sure the user wants to play themes
if Settings.isThemePlayingEnabled():
log("TvTunesService: Theme playing enabled")
# Create a monitor so we can reload the settings if they change
systemMonitor = TvTunesMonitor()
# Start looping to perform the TvTune theme operations
main = TunesBackend()
# Start the themes running
main.runAsAService()
del main
del systemMonitor
else:
log("TvTunesService: Theme playing disabled")
|
the-stack_0_19607 | # -*- coding: utf-8 -*- #
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ml-engine versions set-default command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.ml_engine import versions_api
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.ml_engine import flags
from googlecloudsdk.command_lib.ml_engine import versions_util
def _AddSetDefaultArgs(parser):
flags.GetModelName(positional=False, required=True).AddToParser(parser)
flags.VERSION_NAME.AddToParser(parser)
class SetDefault(base.DescribeCommand):
"""Sets an existing Cloud ML Engine version as the default for its model."""
@staticmethod
def Args(parser):
_AddSetDefaultArgs(parser)
def Run(self, args):
return versions_util.SetDefault(versions_api.VersionsClient(),
args.version,
model=args.model)
_DETAILED_HELP = {
'DESCRIPTION': """\
Sets an existing Cloud ML Engine version as the default for its model.
*{command}* sets an existing Cloud ML Engine version as the default for its
model. Only one version may be the default for a given version.
"""
}
SetDefault.detailed_help = _DETAILED_HELP
|
the-stack_0_19614 | import datetime
import logging
import os
import tempfile
import threading
import time
from errno import EISDIR, ENOENT, EPERM
from queue import Queue
from stat import S_IFDIR, S_IFREG
from fuse import FuseOSError
from .dms import WtDmsGirderFS
from .utils import _convert_time, _lstrip_path, logger
class UploadThread(threading.Thread):
def __init__(self, queue, fs, cli):
threading.Thread.__init__(self)
self.daemon = True
self.queue = queue
self.fs = fs
self.cli = cli
def run(self):
while True:
(path, fdict) = self.queue.get(True)
self._upload(path, fdict)
def _upload(self, path, fdict):
obj = fdict["obj"]
# this will break if anything writes to the file during upload
# same if anything deletes the file. For now, let it fail with an error.
# TODO: cancel uploads when a file is opened w/a
fp = open(fdict["path"], "a+b")
fp.seek(0, os.SEEK_END)
size = fp.tell()
fp.seek(0, os.SEEK_SET)
if logger.isEnabledFor(logging.DEBUG):
logger.debug("-> _upload({}, size={})".format(obj, size))
if self._is_item(obj):
obj = self._get_file(obj)
self.cli.uploadFileContents(obj["_id"], fp, size)
fp.close()
self.cli.get("dm/fs/%s/evict" % fdict["obj"]["_id"])
def _is_item(self, obj):
return "folderId" in obj
def _get_file(self, obj):
files = list(self.cli.listFile(obj["_id"]))
if len(files) != 1:
raise Exception("Expected a single file in item %s" % obj["_id"])
return files[0]
class WtHomeGirderFS(WtDmsGirderFS):
# At this point this is a mess and should be re-implemented as a write-back cache
# for both data and metadata
def __init__(self, folderId, gc):
WtDmsGirderFS.__init__(self, folderId, gc)
# override root cache entry built by the DMS FS
self._invalidate_root(folderId)
self.uploadQueue = Queue()
self.uploadThread = UploadThread(self.uploadQueue, self, self.girder_cli)
self.uploadThread.start()
def _invalidate_root(self, id: str):
if not self.cache.delete(id):
logger.error('Object not in cache "%s" (%s)' % (id, type(id)))
def _load_root(self):
return None
def _get_root_listing(self):
return None
def statfs(self, path):
# default results in a read-only fs
return {"f_flag": os.ST_NOATIME + os.ST_NODEV + os.ST_NODIRATIME + os.ST_NOSUID}
def chmod(self, path, mode):
logger.debug("-> chmod({}, {})".format(path, mode))
path = _lstrip_path(path)
self._set_metadata(path, {"permissions": mode})
def _set_metadata(self, path, dict):
obj, objType = self._get_object_from_root(path)
self._set_object_metadata(obj, objType, dict)
def _set_object_metadata(self, obj, objType, dict):
self.girder_cli.put("dm/fs/%s/setProperties" % obj["_id"], json=dict)
obj.update(dict)
return obj
def getattr(self, path, fh=None):
logger.debug("-> getattr({})".format(path))
if path == "/":
now = _convert_time(str(datetime.datetime.now()))
return dict(
st_mode=(S_IFDIR | self._get_perm(path, True)),
st_nlink=2,
st_ctime=now,
st_atime=now,
st_mtime=now,
)
obj, objType = self._get_object_from_root(_lstrip_path(path))
if objType == "folder":
mode = S_IFDIR | self._get_prop(
obj, "permissions", self._get_perm(path, True)
)
nlinks = 2
else:
mode = S_IFREG | self._get_prop(
obj, "permissions", self._get_perm(path, False)
)
nlinks = 1
stat = dict(st_mode=mode, st_nlink=nlinks)
ctime = _convert_time(obj["created"])
try:
mtime = _convert_time(obj["updated"])
except KeyError:
mtime = ctime
size = obj["size"]
with self.flock:
if path in self.openFiles:
fdict = self.openFiles[path]
if "localsize" in fdict:
# assume local file is always the most current if it exists
size = fdict["localsize"]
stat.update(
dict(
st_ctime=ctime,
st_mtime=mtime,
st_blocks=1,
st_size=size,
st_atime=time.time(),
)
)
return stat
def _get_prop(self, obj, name, default):
if name in obj:
return obj[name]
else:
return default
def create(self, pathstr, mode, fi=None):
logger.debug("-> create({}, {})".format(pathstr, mode))
path = _lstrip_path(pathstr)
parentId = self._get_parent_id(path)
# See atomicity comment on mkdir()
try:
obj, objType = self._get_object_from_root(path)
except FuseOSError as ex:
if ex.errno == ENOENT:
objType = None
else:
raise
if objType == "folder":
raise FuseOSError(EISDIR)
# TODO: fix race
if objType == "file":
# Object exists and is a file. Truncate.
return self._truncate(path, 0, close=False)
else:
# item does not exist
obj = self._create(path, parentId, mode)
self._cache_add_file(obj, path)
# Confusingly, "mode" is used both for permissions (as in mkdir) and
# for open type (as in open)
# I'm assuming here that create() is meant to implement what creat() does
# and therefore it implies open(..., O_CREAT|O_WRONLY|O_TRUNC).
#
# create an empty file locally
fdict = self._ensure_fdict(pathstr, obj)
with tempfile.NamedTemporaryFile(prefix="wtdm", delete=False) as tmp:
fdict["path"] = tmp.name
self._mark_dirty(pathstr)
return self.open(pathstr, mode=os.O_CREAT + os.O_WRONLY + os.O_TRUNC)
def _get_object_id_by_path(self, obj_id, path):
if len(path.parts) == 0:
return self.folder_id
obj, _ = self._get_object_id_by_path(path)
return obj["_id"]
def _cache_get_parent_id(self, path):
return self._get_object_id_by_path(path.parent)
def _cache_add_file(self, obj, path):
self._cache_add_obj(obj, path, "files")
def _cache_add_dir(self, obj, path):
self._cache_add_obj(obj, path, "folders")
def _cache_add_obj(self, obj, path, type):
with self.flock:
parentId = str(self._get_parent_id(path))
dict = self.cache[parentId]
lst = dict[type]
lst.append(obj)
self.cache[parentId] = dict
def _cache_remove_dir(self, path):
self._cache_remove_obj(path, "folders")
def _cache_remove_file(self, path):
self._cache_remove_obj(path, "files")
def _cache_remove_obj(self, path, type):
with self.flock:
parentId = str(self._get_parent_id(path))
obj = self.cache[parentId]
lst = obj[type]
for i in range(len(lst)):
if lst[i]["name"] == path.name:
lst.pop(i)
self.cache[parentId] = obj
return
raise Exception("Could not remove object from cache: %s" % path)
def _mark_dirty(self, path):
# sets a flag that will trigger an upload when the file is closed
fdict = self.openFiles[path]
fdict["dirty"] = True
def _create(self, path, parentId, perms):
logger.debug("-> _create({}, {}, {})".format(path, parentId, perms))
item = self.girder_cli.createItem(parentId, path.name)
print(item)
file = self.girder_cli.post(
"file",
parameters={
"parentType": "item",
"parentId": item["_id"],
"name": path.name,
"size": 0,
},
)
self._set_object_metadata(file, "file", {"permissions": perms})
return item
def flush(self, path, fh):
return 0
def fsync(self, path, datasync, fh):
return 0
def fsyncdir(self, path, datasync, fh):
return 0
def mkdir(self, path, mode):
logger.debug("-> mkdir({}, {})".format(path, mode))
path = _lstrip_path(path)
parentId = self._get_parent_id(path)
# It's somewhat silly that you can't set other folder parameters with this call.
# The two-step solution is a race waiting to happen and possibly a security issue
# since there is no way to atomicaly create a locked-down directory.
# The better thing may be to abandon REST and add an API call that allows this to be
# done in one step. Although I suspect this will be a small problem once we
# think about having multiple clients syncing to the same home-dir.
obj = self.girder_cli.post(
"folder", parameters={"parentId": parentId, "name": path.name}
)
self._set_metadata(path, {"permissions": mode})
self._cache_add_dir(obj, path)
def _get_parent_id(self, path):
if len(path.parent.parts) == 0:
return self.folder_id
else:
obj, objType = self._get_object_from_root(path.parent)
return obj["_id"]
def rmdir(self, path):
logger.debug("-> rmdir({})".format(path))
path = _lstrip_path(path)
if len(path.parts) == 0:
raise FuseOSError(EPERM)
obj, objType = self._get_object_from_root(path)
# should probably check if it's empty
self.girder_cli.delete("%s/%s" % (objType, obj["_id"]))
self._cache_remove_dir(path)
def mknod(self, path, mode, dev):
raise FuseOSError(EPERM)
def rename(self, old, new):
logger.debug("-> rename({}, {})".format(old, new))
path = _lstrip_path(old)
if len(path.parts) == 0:
raise FuseOSError(EPERM)
obj, objType = self._get_object_from_root(path)
self.girder_cli.put("%s/%s" % (objType, obj["_id"]), parameters={"name": new})
obj["name"] = new
def truncate(self, path, length, fh=None):
logger.debug("-> truncate({}, {}, {})".format(path, length, fh))
# so fh=None means truncate() whereas fh != None means ftruncate
# the basic workflow is to do i/o locally and commit when all of the following are true:
# - no active downloads
# - the file is not open
pathObj = _lstrip_path(path)
obj, objType = self._get_object_from_root(pathObj)
if objType == "folder":
raise FuseOSError(EISDIR)
if fh is None:
self._truncate(path, length)
else:
self._ftruncate(path, fh, length)
self._mark_dirty(path)
def _truncate(self, path, length, close=True):
fh = self.open(path, mode=os.O_RDWR + os.O_APPEND)
self._ftruncate(path, fh, length)
self._mark_dirty(path)
if close:
self.release(path, fh)
else:
return fh
def _ftruncate(self, path, fh, length):
logger.debug("-> _ftruncate({}, {}, {})".format(path, fh, length))
fp = None
with self.flock:
fdict = self.openFiles[path]
self._update_size(path, fdict, length)
if fdict["downloading"]:
# simply stop downloading after the limit
fdict["downloadThread"].setLimit(length)
self._mark_dirty(path)
return
else:
if fh in self.fobjs:
fp = self.fobjs[fh]
if fp is None:
fp = open(fdict["path"], "r+b")
fp.truncate(length)
self._mark_dirty(path)
def unlink(self, path):
logger.debug("-> unlink({})".format(path))
path = _lstrip_path(path)
obj, objType = self._get_object_from_root(path)
# should be the parent item
self.girder_cli.delete("item/%s" % (obj["_id"]))
self._cache_remove_file(path)
def write(self, path, data, offset, fh):
logger.debug("-> write({}, {}, {})".format(path, fh, offset))
fdict = self.openFiles[path]
size = len(data)
if fdict["downloading"]:
self._ensure_region_available(path, fdict, fh, offset, size)
if fh not in self.fobjs:
self.fobjs[fh] = open(fdict["path"], "a+b")
fp = self.fobjs[fh]
# should probably lock this to prevent seek+write sequences from racing
fp.seek(offset)
fp.write(data)
fp.seek(0, os.SEEK_END)
size = fp.tell()
self._update_size(path, fdict, size)
self._mark_dirty(path)
return len(data)
def _update_size(self, path, fdict, sz):
fdict["localsize"] = sz
def release(self, path, fh): # pylint: disable=unused-argument
logger.debug("-> release2({}, {})".format(path, fh))
with self.flock:
writers = self._remove_writer(path, fh)
fdict = self.openFiles[path]
if not fdict["downloading"] and writers == 0:
self._commit(path, fdict)
else:
# still downloading
pass
return WtDmsGirderFS.release(self, path, fh)
def downloadCompleted(self, path, fdict):
logger.debug("-> downloadCompleted(path=%s)" % (path))
with self.flock:
fdict["cached.locally"] = True
if len(fdict["fds"]) == 0:
self._commit(path, fdict)
def open(self, path, mode=os.O_RDONLY, **kwargs):
logger.debug("-> open(path=%s, mode=%s)" % (path, mode))
fd = WtDmsGirderFS.open(self, path, mode=mode, **kwargs)
fdict = self.openFiles[path]
if "fds" not in fdict:
fdict["fds"] = set()
return fd
def _add_writer(self, path, fd):
with self.flock:
fdict = self.openFiles[path]
fdict["fds"].add(fd)
def _remove_writer(self, path, fd):
self.flock.assertLocked()
fdict = self.openFiles[path]
if fd in fdict["fds"]:
fdict["fds"].remove(fd)
else:
# no writes actually happened in this open/close session
pass
return len(fdict["fds"])
def _commit(self, path, fdict):
logger.debug("-> _commit({}, {}".format(path, fdict))
fdict = self.openFiles[path]
if "dirty" in fdict and fdict["dirty"]:
self.flock.assertLocked()
fdict["uploading"] = True
self.uploadQueue.put((path, fdict))
fdict["dirty"] = False
def _wait_for_file(self, fdict):
# Override because, except for the first download, the local
# copy is always considered to be the latest
if "cached.locally" in fdict:
return
else:
WtDmsGirderFS._wait_for_file(self, fdict)
|
the-stack_0_19619 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/12/12 下午3:09
# @Author : MaybeShewill-CV
# @Site : https://github.com/MaybeShewill-CV/image-classification-tensorflow
# @File : ilsvrc_2012_provider.py
# @IDE: PyCharm
"""
ilsvrc dataset reader
"""
import os.path as ops
import tqdm
from data_provider import base_dataset_provider
from local_utils import config_utils
class IlsvrcDatasetProvider(base_dataset_provider.DataSetProvider):
"""
Ilsvrc dataset reader
"""
def __init__(self, cfg):
"""
"""
super(IlsvrcDatasetProvider, self).__init__(cfg=cfg)
def _load_train_val_image_index(self):
"""
:return:
"""
with open(self._train_image_index_file_path, 'r') as file:
for line in file:
info = line.rstrip('\r').rstrip('\n').strip(' ').split()
train_src_image_path = info[0]
label_id = info[1]
assert ops.exists(train_src_image_path), '{:s} not exist'.format(train_src_image_path)
self._train_label_image_infos.append([train_src_image_path, label_id])
with open(self._val_image_index_file_path, 'r') as file:
for line in file:
info = line.rstrip('\r').rstrip('\n').strip(' ').split()
val_src_image_path = info[0]
val_label_id = info[1]
assert ops.exists(val_src_image_path), '{:s} not exist'.format(val_src_image_path)
self._val_label_image_infos.append([val_src_image_path, val_label_id])
return
def get_provider(cfg):
"""
:return:
"""
return IlsvrcDatasetProvider(cfg=cfg)
def _test():
"""
:return:
"""
cfg = config_utils.get_config(config_file_path='./config/ilsvrc_2012_xception.yaml')
reader = IlsvrcDatasetProvider(cfg=cfg)
if not reader.successfully_initialized:
print('Dataset reader not successfully initialized')
return
train_dataset = reader.train_dataset
val_dataset = reader.val_dataset
for train_samples in tqdm.tqdm(train_dataset):
src_imgs = train_samples[0]
src_labels = train_samples[1]
if src_imgs is None or src_labels is None:
print('Meet None')
continue
if __name__ == '__main__':
"""
test code
"""
_test()
|
the-stack_0_19620 | """
This code monitors a given stock/crypto symbol.
"""
from harvest.algo import BaseAlgo
from harvest.trader import PaperTrader
class Watch(BaseAlgo):
def config(self):
self.watchlist = ["@BTC"]
self.interval = "1MIN"
def main(self):
print(self.get_asset_price())
if __name__ == "__main__":
t = PaperTrader()
t.set_algo(Watch())
t.start()
|
the-stack_0_19621 | # -*- coding: utf-8 -*-
"""
Created on Sun Aug 4 23:04:56 2019
@author: avi
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression
from lightgbm import LGBMClassifier,LGBMRegressor
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVC
train_data= pd.read_csv("Train.csv")
test_data_= pd.read_csv("Test.csv")
samp_data= pd.read_csv("sample_submission.csv")
train_data.isnull().sum()
test_data_.isnull().sum()
train_data.dtypes
train_data["is_holiday"].value_counts()
train_data["is_holiday_num"]=train_data.is_holiday.astype("category").cat.codes
train_data["weather_type_num"]=train_data.weather_type.astype("category").cat.codes
train_data["weather_description_num"]=train_data.weather_description.astype("category").cat.codes
test_data_["is_holiday_num"]=test_data_.is_holiday.astype("category").cat.codes
test_data_["weather_type_num"]=test_data_.weather_type.astype("category").cat.codes
test_data_["weather_description_num"]=test_data_.weather_description.astype("category").cat.codes
train_data["is_holiday_num"].value_counts()
X_train=train_data[["air_pollution_index","humidity","wind_direction","clouds_all","rain_p_h","snow_p_h","is_holiday_num","weather_type_num","weather_description_num"]]
y_train = train_data["traffic_volume"]
X_test=test_data_[["air_pollution_index","humidity","wind_direction","clouds_all","rain_p_h","snow_p_h","is_holiday_num","weather_type_num","weather_description_num"]]
X_train11=train_data[["air_pollution_index","humidity","wind_direction","clouds_all","rain_p_h","snow_p_h","is_holiday_num","weather_type_num","weather_description_num","traffic_volume"]]
X_train11.corr()
clf=LGBMRegressor(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0,
importance_type='split', learning_rate=0.111, max_depth=-1,
min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0,
n_estimators=225, n_jobs=-1, num_leaves=31, objective=None,
random_state=None, reg_alpha=0.0, reg_lambda=0.0, silent=True,
subsample=1.0, subsample_for_bin=200000, subsample_freq=0)
model=clf.fit(X_train,y_train)
pred=model.predict(X_test)
test_data_["traffic_volume"]=pred
test_data_["traffic_volume"].value_counts()
op_file=test_data_[["date_time","traffic_volume"]]
op_file.to_csv("output.csv",index=False,header=True)
|
the-stack_0_19622 | import random
import pygame
from pygame import Rect
from DotStar_Emulator.emulator.vector2 import Vector2
from .flags import *
__all__ = ["Widget", ]
class Margin(object):
def __init__(self, top=None, right=None, bottom=None, left=None):
"""
Stores information about the Widget Styles margins.
:param top: top margin in pixels, default = 0
:param right: right margin in pixels, default = 0
:param bottom: left margin in pixels, default = 0
:param left: left margin in pixels, default = 0
:return: None
"""
self.top = top if top else 0
self.bottom = bottom if bottom else 0
self.left = left if left else 0
self.right = right if right else 0
def set(self, top, right, bottom, left):
"""
Set the margins. Order is in css fashion for reasons of insanity.
:param top: top margin in pixels
:param right: right margin in pixels
:param bottom: left margin in pixels
:param left: left margin in pixels
:return: None
"""
if top:
self.top = top
if right:
self.right = right
if bottom:
self.bottom = bottom
if left:
self.left = left
@property
def topleft(self):
"""
Return a Vector2 class of the top left corner margins
:return: 'class pygame.math.Vector2' Vector2(left, top)
"""
return Vector2(self.left, self.top)
@property
def widthheight(self):
"""
Return a Vector2 class of the total combined margins for width and height.
width is left + right margins.
height is top + bottom margins.
:return: 'class pygame.math.Vector2' Vector2(width, height)
"""
return Vector2(self.left + self.right, self.top + self.bottom)
class WidgetLayout(object):
def __init__(self):
"""
Manage the widgets layout.
:return:
"""
# Widgets are automatically sized by its parent panel(widget). self.requested_size can influence that
# automatic sizing.
self.requested_size = Vector2(0, 0)
# The size of the drawable widget. Does not include margins.
self.size = Vector2(0, 0)
# Offset of this widgets location within it's parents panel(widget) surface.
# Does not include its own margins. use self.total_offset to include margins
self.offset = Vector2(0, 0)
# pygame.Rect, ( total_offset.x, total_offset.y, size.x, size.y)
self.rect = Rect(0, 0, 0, 0)
# base global_offset of the widget in screen space. would still need to add this widgets total_offset to get
# correct screen space.
self.base_global_offset = Vector2(0, 0)
# pygame.Rect, with global_offset + total_offset for position, and self.size for width, height.
self.global_rect = Rect(0, 0, 0, 0)
# Information about the styles margins.
self.margin = Margin()
# data used during fit, which can be set by parent panel(widget). Useful so parent panel doesn't have to have
# its own way of storing information about the widget for automatic layout. Such as grid x, y position.
self.data = {}
@property
def total_offset(self):
"""
:return: 'class pygame.math.Vector2' total offset of this widgets position within its parents surface.
"""
return self.margin.topleft + self.offset
# @property
# def total_size(self):
# """
#
# :return: ;class pygame.math.Vector2' total width including margins
# """
# return self.size + self.margin.widthheight
def set_size(self, possible_size, offset, global_offset, flags):
"""
Automatically calculate the widgets size and position. Size will never exceed possible_size, but may be
smaller depending on flags. The calculation is also influenced by the widgets self.requested_size
FILLX
FILLY
CENTERX
CENTERY
:param possible_size: Vector2 of the maximum size the parent panel(widget) has allotted this widget.
:param offset: Vector2 top left corner of where this widget will draw n the parent's surface
:param global_offset: total screen global offset of where offset is actually located. This is needed for
to maintain screen space rect's of this widget. useful for mouse clicks..
:param flags: Positioning flags to influence the automatic fitting.
:return: None
"""
# Store base global_offset
self.base_global_offset = global_offset
# Store base offset
self.offset = offset
size = Vector2(0, 0)
if flags & FILLX:
if self.requested_size.x == 0 or self.requested_size.x == -1:
size.x = possible_size.x
else:
raise Exception("can not FILLX, as widget.style.x is already set")
else:
if self.requested_size.x == 0:
raise Exception("widget.style.x is equal to 0")
elif self.requested_size.x == -1:
# even if FILLX wasn't set by parent, fill out to parents possible_size
size.x = possible_size.x
else:
size.x = self.requested_size.x
if flags & FILLY:
if self.requested_size.y == 0 or self.requested_size.y == -1:
size.y = possible_size.y
else:
raise Exception("can not FILLY, as widget.style.y is already set")
else:
if self.requested_size.y == 0:
raise Exception("widget.style.y is equal to 0")
elif self.requested_size.y == -1:
# even if FILLY wasn't set by parent, fill out to parents possible_size
size.y = possible_size.y
else:
size.y = self.requested_size.y
# because size is the size of the widget's drawable surface, remove its own margins.
size -= self.margin.widthheight
self.size = size
# Once widgets size has been determined, we can center it within its parents space by adjusting the margins.
# TODO this will not work if the widgets are ever re-sized, as the margins are used to calculate the size in the
# step just above this.
if flags & CENTERX:
space = possible_size.x - size.x
self.margin.left = space / 2.0
self.margin.right = space / 2.0
if flags & CENTERY:
space = possible_size.y - size.y
self.margin.top = space / 2.0
self.margin.bottom = space / 2.0
# Cache Rectangle, topleft is offset + margin, size is size of drawable widget surface.
self.rect = Rect(self.total_offset, self.size)
# Cache Global Rect. Same as above but globally positioned. Useful for mouse events
self.global_rect = Rect(self.base_global_offset + self.total_offset, self.size)
class Widget(object):
def __init__(self, use_surface=True):
"""
A gui entity that can render itself to its own surface, and then draw that surface to a parent when
called to do so.
if use_surface is false, then the widget will not render it self to its own surface. But on_draw can be
used to draw anything directly to the parent.
The widget keeps track if it is dirty, and will only render itself if its dirty on a on_draw call. use
Widget.redraw() to notify if the widget is dirty. Simply setting Widget._dirty = True will not work, as that
would not notify the parent, and most likely Widget.on_draw will never be called.
:param use_surface: `bool`, default=True, use a surface to render the widget
:return:
"""
self.debug_draw = False
self.layout = WidgetLayout()
self._dirty = True # does the widget need to render itself?
self.use_surface = use_surface # does the widget use a surface to render it self to?
self._redraw_callback = None # stored function to call alongside self.redraw
self.surface = None # pygame surface for drawing the Widget too.
self.surface_flags = 0 # flags that are passed to pygmae.Surface when creating
def update(self, elapsed):
"""
Called every game loop iteration independently of any draw or render requests.
:param elapsed: milliseconds og pygame clock since last call
:return: None
"""
def set_redraw_callback(self, callback):
"""
When a widget becomes dirty, the widget needs to tell its parent widget that the it is dirty.
Widget.redraw() will call this function if it is set.
:param callback: function
:return: None
"""
self._redraw_callback = callback
def redraw(self):
"""
Set the widget as dirty, so next on_draw call will call render. Also notify parent if redraw_callback is set.
:return: None
"""
self._dirty = True
if self._redraw_callback:
self._redraw_callback()
def render(self):
"""
Start to render the widget to its own Widget.Surface. This function will first create the surface if
needed. The it will call Widget.on_render, which is the method that should be overridden by a subclass.
Then it will mark the Widget as no longer being dirty.
:return: None
"""
if not self.surface:
self.surface = pygame.Surface(self.layout.size, self.surface_flags)
self.on_render()
self._dirty = False
def fit(self, possible_size, offset, global_offset, flags):
"""
Start to size the panel, this is almost an alias for WidgetStyle.set_size, but allows for the
subsequent calling of Widget.on_fit. Some widgets need to be notified when a widget has been
sized.
:param possible_size: Vector2 of the maximum size the parent panel(widget) has allotted this widget.
:param offset: Vector2 top left corner of where this widget will draw n the parent's surface
:param global_offset: total screen global offset of where offset is actually located. This is needed for
to maintain screen space rect's of this widget. useful for mouse clicks..
:param flags: Positioning flags to influence the automatic fitting.
:return: None
"""
self.layout.set_size(possible_size, offset, global_offset, flags)
self.on_fit()
def on_fit(self):
"""
Called after a widget has been fit/sized.
:return: None
"""
pass
def on_render(self):
"""
Called during Widget.render call, this is where the the actual drawing to the Widget.surface should be done.
This should be overridden by the subclassing Widget. This default implementation will fill the surface
with a random color. Useful for debugging and seeing what is drawn to the screen..
:return:
"""
self.surface = pygame.Surface(self.layout.size)
self.surface.fill(
(
random.randint(60, 255),
random.randint(60, 255),
random.randint(60, 255),
)
)
def on_draw(self, surface, g_offset):
"""
Blit the widget.surface to the passed surface.
If Widget.use_surface = False, then do nothing. In this case the subclass should override the
Widget.on_draw method.
:param surface: surface to blit the Widget.surface to.
:param g_offset: additional offset to add to the rendering. This is only needed when the parent widget
does not use a surface, so it has to pass along its total_offset.
:return: None
"""
if self.use_surface:
if self._dirty:
self.render()
pos = self.layout.total_offset
pos += g_offset
surface.blit(self.surface, pos)
|
the-stack_0_19623 | import importlib
from adventofcode.util import (
get_day_id,
get_input,
get_latest_year,
get_year_id,
highlight
)
def run_solution(args) -> None:
year = args.year
day = args.day
if year is None:
year = get_latest_year()
solution_module_path = f'adventofcode.solutions.{get_year_id(year)}.{get_day_id(day)}'
solution_module = importlib.import_module(solution_module_path)
data = get_input(year, day)
print(f'Running solution for year {highlight(year)}, day {highlight(day)}.')
print()
answer1, answer2 = solution_module.run(data)
print()
print(highlight('Solutions found.', color='g'))
print()
print('Answer to puzzle 1:', highlight(answer1, color='g'))
print('Answer to puzzle 2:', highlight(answer2, color='g'))
|
the-stack_0_19624 | """Docstring in public module."""
import os
import sys
import ujson as json
from tornado.testing import AsyncHTTPTestCase
from consoleme.config import config
APP_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.append(os.path.join(APP_ROOT, ".."))
class TestRoleLoginApi(AsyncHTTPTestCase):
def get_app(self):
from consoleme.routes import make_app
return make_app(jwt_validator=lambda x: {})
def test_role_api_fail(self):
headers = {
config.get("auth.user_header_name"): "[email protected]",
config.get("auth.groups_header_name"): "groupa,groupb,groupc",
}
response = self.fetch("/api/v2/role_login/role123", headers=headers)
self.assertEqual(response.code, 404)
self.assertEqual(
json.loads(response.body),
{
"type": "error",
"message": "You do not have any roles matching your search criteria. ",
},
)
def test_role_api_fail_multiple_matching_roles(self):
headers = {
config.get("auth.user_header_name"): "[email protected]",
config.get("auth.groups_header_name"): "group9,group3",
}
response = self.fetch("/api/v2/role_login/role", headers=headers)
self.assertEqual(response.code, 200)
response_j = json.loads(response.body)
self.assertEqual(
response_j["message"],
"You have more than one role matching your query. Please select one.",
)
self.assertEqual(response_j["reason"], "multiple_roles")
self.assertEqual(response_j["type"], "redirect")
self.assertIn("/?arn=role&warningMessage=", response_j["redirect_url"])
def test_role_api_success(self):
headers = {
config.get("auth.user_header_name"): "[email protected]",
config.get("auth.groups_header_name"): "[email protected]",
}
response = self.fetch("/api/v2/role_login/roleA", headers=headers)
self.assertEqual(response.code, 200)
response_j = json.loads(response.body)
self.assertEqual(response_j["type"], "redirect")
self.assertEqual(response_j["reason"], "console_login")
self.assertEqual(response_j["role"], "arn:aws:iam::123456789012:role/roleA")
self.assertIn(
"https://signin.aws.amazon.com/federation?Action=login&Issuer=YourCompany&Destination=https%3A%2F%2Fus-east-1.console.aws.amazon.com&SigninToken=",
response_j["redirect_url"],
)
|
the-stack_0_19626 | import tkinter as tk
from ui.pages.create_customer_page import CreateCustomerPage
from ui.pages.room_administration_page import RoomAdministrationPage
from ui.pages.customer_administration_page import CustomerAdministrationPage
def room_admin():
return RoomAdministrationPage()
class StartPage(tk.Frame):
def __init__(self, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
#p1 = CreateCustomerPage(self)
#p2 = RoomAdministrationPage(self)
w = tk.Label(self, text="Trash-Hotel Bearbeitungs_Zentrum", bg = "red")
w.pack()
buttonframe = tk.Frame(self)
container = tk.Frame(self)
buttonframe.pack(side="top", fill="x", expand=False)
container.pack(side="top", fill="both", expand=True)
b1 = tk.Button(buttonframe, text="CustomerPage")
b2 = tk.Button(buttonframe, text="RoomAdministration", command=room_admin)
b3 = tk.Button(buttonframe, text="CustomerAdministrationPage")
b1.grid(row=0, column=2, padx='10', pady='30', sticky='ew')
b2.grid(row=1, column=2, padx='10', pady='30', sticky='ew')
b3.grid(row=2, column=2, padx='10', pady='30', sticky='ew')
|
the-stack_0_19628 | #!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file pyre/meshio/Xdmf.py
##
## @brief Python class for Xdmf metadata file associated with an HDF5 file.
##
## Factory: xdmf
class Field(object):
"""
Python object for data associated with vertex or cell field in HDF5 file.
"""
DOMAIN_VERTICES = "Node"
DOMAIN_CELLS = "Cell"
groupToDomain = {
"vertex_fields": DOMAIN_VERTICES,
"cell_fields": DOMAIN_CELLS,
}
domainToGroup = dict((v,k) for k,v in groupToDomain.iteritems())
def __init__(self):
self.name = None
self.vectorFieldType = None
self.data = None
self.domain = None
return
# Xdmf class
class Xdmf(object):
"""
Python class for Xdmf metadata file associated with an HDF5 file.
"""
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self):
"""
Constructor.
"""
self.file = None
return
def write(self, filenameH5, filenameXdmf=None, verbose=True):
"""Write Xdmf file corresponding to given HDF5 file.
"""
if not filenameXdmf:
filenameXdmf = filenameH5.replace(".h5", ".xmf")
if verbose:
print("Generating %s..." % filenameXdmf)
import h5py
import os
if not os.path.isfile(filenameH5):
raise IOError("Cannot create Xdmf file for HDF5 file '%s'. File not found." % filenameH5)
self.h5 = h5py.File(filenameH5, "r")
if self._spaceDim() == 1:
print("WARNING: Xdmf grids are not defined for 1-D domains.\n"
"Skipping creation of Xdmf file for HDF5 file '%s'." % filenameH5)
self.h5.close()
return
self.file = open(filenameXdmf, "w")
# Header
self._openXdmf()
# Domain
cells = self.h5["/topology/cells"]
vertices = self.h5["/geometry/vertices"]
self._openDomain(cells, vertices)
timeStamps = self._getTimeStamps()
fields = self._getFields()
if not timeStamps is None:
self._openTimeCollection()
self._writeTimeStamps(timeStamps)
for iTime,timeStamp in enumerate(timeStamps):
self._openTimeGrid()
self._writeGridTopology(cells)
self._writeGridGeometry(vertices)
for field in fields:
if field.vectorFieldType in ["Tensor6", "Matrix"]:
numComponents = field.data.shape[-1]
for iComponent in range(numComponents):
self._writeGridFieldComponent(field, iTime, iComponent)
else:
self._writeGridField(field, iTime)
self._closeTimeGrid()
self._closeTimeCollection()
else:
iTime = None
self._openTimeGrid()
self._writeGridTopology(cells)
self._writeGridGeometry(vertices)
for field in fields:
if field.vectorFieldType in ["Tensor6", "Matrix"]:
numComponents = field.data.shape[-1]
for iComponent in range(numComponents):
self._writeGridFieldComponent(field, iTime, iComponent)
else:
self._writeGridField(field, iTime)
self._closeTimeGrid()
self._closeDomain()
self._closeXdmf()
self._close()
return
# PRIVATE METHODS ////////////////////////////////////////////////////
def _close(self):
self.h5.close()
self.file.close()
return
def _spaceDim(self):
vertices = self.h5["/geometry/vertices"]
assert(2 == len(vertices.shape))
return vertices.shape[1]
def _xdmfCellType(self, cells):
"""
Get type of cell.
"""
assert(2 == len(cells.shape))
numCells, numCorners = cells.shape
if "cell_dim" in cells.attrs:
cellDim = cells.attrs["cell_dim"]
else:
# Use space dimension as a proxy for cell dimension.
vertices = self.h5["/geometry/vertices"]
assert(2 == len(vertices.shape))
cellDim = vertices.shape[1]
if 0 == cellDim and 1 == numCorners:
cellType = "Polyvertex"
elif 1 == cellDim and 2 == numCorners:
cellType = "Polyline"
elif 2 == cellDim and 3 == numCorners:
cellType = "Triangle"
elif 2 == cellDim and 4 == numCorners:
cellType = "Quadrilateral"
elif 3 == cellDim and 4 == numCorners:
cellType = "Tetrahedron"
elif 3 == cellDim and 8 == numCorners:
cellType = "Hexahedron"
else:
cellType = "Unknown"
print("WARNING: Unknown cell type with %d vertices and dimension %d." % (numCorners, cellDim))
return cellType
def _xdmfVectorFieldType(self, vectorFieldString):
"""Get Xdmf vector field type.
"""
vtype = "Matrix"
if vectorFieldString.lower() == "scalar":
vtype = "Scalar"
elif vectorFieldString.lower() == "vector":
vtype = "Vector"
elif vectorFieldString.lower() == "tensor":
vtype = "Tensor6"
return vtype
def _getTimeStamps(self):
"""Get time stamps if they exist, otherwise return None.
"""
timeStamps = None
if "time" in self.h5:
timeStamps = self.h5["time"][:]
return timeStamps
def _getFields(self):
fields = []
for group in ["vertex_fields", "cell_fields"]:
if group in self.h5:
vfields = self.h5[group]
for name, dataset in vfields.items():
field = Field()
field.name = name
field.data = dataset[:]
field.domain = Field.groupToDomain[group]
if "vector_field_type" in dataset.attrs:
field.vectorFieldType = self._xdmfVectorFieldType(dataset.attrs["vector_field_type"])
else:
print("WARNING: Field '%s/%s' dataset missing 'vector_field_type' attribute. Guessing vector field type." % (group, name,))
field.vectorFieldType = "Matrix"
if len(dataset.shape) == 2 or len(dataset.shape) == 3:
numComponents = dataset.shape[-1]
if numComponents == 1:
field.vectorFieldType = "Scalar"
elif numComponents == 3:
field.vectorFieldType = "Vector"
fields.append(field)
return fields
def _openXdmf(self):
"""Write header and create Xdmf element.
"""
import os
filenameH5 = os.path.split(self.h5.filename)[-1]
self.file.write(
"<?xml version=\"1.0\" ?>\n"
"<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\" [\n"
"<!ENTITY HeavyData \"%s\">\n"
"]>\n"
"\n"
"<Xdmf>\n"
% (filenameH5,)
)
return
def _closeXdmf(self):
"""Close Xdmf element.
"""
self.file.write(
"</Xdmf>\n"
)
return
def _openDomain(self, cells, vertices):
self.file.write(" <Domain Name=\"domain\">\n")
# Cells
assert(2 == len(cells.shape))
numCells, numCorners = cells.shape
self.file.write(
" <DataItem Name=\"cells\" ItemType=\"Uniform\" Format=\"HDF\" NumberType=\"Float\" Precision=\"8\" Dimensions=\"%d %d\">\n"
" &HeavyData;:/topology/cells\n"
" </DataItem>\n"
% (numCells, numCorners,)
)
# Vertices
assert(2 == len(vertices.shape))
numVertices, spaceDim = vertices.shape
if 3 == spaceDim:
self.file.write(
" <DataItem Name=\"vertices\" ItemType=\"Uniform\" Format=\"HDF\" Dimensions=\"%d %d\">\n"
" &HeavyData;:/geometry/vertices\n"
" </DataItem>\n"
% (numVertices, spaceDim),
)
elif 2 == spaceDim:
# Form vector with 3 components using x and y components
# and then a fake z-component by multiplying the
# x-component by zero.
self.file.write(" <DataItem Name=\"vertices\" ItemType=\"Function\" Dimensions=\"%d 3\" Function=\"JOIN($0, $1, $2)\">\n" % numVertices)
# x component
self.file.write(
" <DataItem Name=\"verticesX\" ItemType=\"Hyperslab\" Type=\"HyperSlab\" Dimensions=\"%d 1\">\n"
" <DataItem Dimensions=\"3 2\" Format=\"XML\">\n"
" 0 0 1 1 %d 1\n"
" </DataItem>\n"
" <DataItem Dimensions=\"%d 1\" Format=\"HDF\">\n"
" &HeavyData;:/geometry/vertices\n"
" </DataItem>\n"
" </DataItem>\n"
% (numVertices, numVertices, numVertices,)
)
# y component
self.file.write(
" <DataItem Name=\"verticesY\" ItemType=\"Hyperslab\" Type=\"HyperSlab\" Dimensions=\"%d 1\">\n"
" <DataItem Dimensions=\"3 2\" Format=\"XML\">\n"
" 0 1 1 1 %d 1\n"
" </DataItem>\n"
" <DataItem Dimensions=\"%d 1\" Format=\"HDF\">\n"
" &HeavyData;:/geometry/vertices\n"
" </DataItem>\n"
" </DataItem>\n"
% (numVertices, numVertices, numVertices,))
# z component
self.file.write(
" <DataItem Name=\"verticesZ\" ItemType=\"Function\" Dimensions=\"%d 1\" Function=\"0*$0\">\n"
" <DataItem Reference=\"XML\">\n"
" /Xdmf/Domain/DataItem[@Name=\"vertices\"]/DataItem[@Name=\"verticesX\"]\n"
" </DataItem>\n"
" </DataItem>\n" % (numVertices,)
)
self.file.write(" </DataItem>\n")
else:
self._close()
raise ValueError("Unexpected spatial dimension %d when writing domain vertices." % spaceDim)
return
def _closeDomain(self):
"""Close domain element.
"""
self.file.write(
" </Domain>\n"
)
return
def _openTimeCollection(self):
"""Create Grid element for collection of time grids.
"""
self.file.write(
" <Grid Name=\"TimeSeries\" GridType=\"Collection\" CollectionType=\"Temporal\">\n"
)
return
def _closeTimeCollection(self):
"""Close Grid element for collection of time grids.
"""
self.file.write(
" </Grid>\n"
)
return
def _writeTimeStamps(self, tstamps):
"""Write time stamps.
"""
self.file.write(
" <Time TimeType=\"List\">\n"
" <DataItem Format=\"XML\" NumberType=\"Float\" Dimensions=\"%d\">\n"
" "
% (tstamps.shape[0],)
)
for t in tstamps:
self.file.write(" %16.8e" % t)
self.file.write(
"\n"
" </DataItem>\n"
" </Time>\n"
)
return
def _openTimeGrid(self):
"""Create Grid element for a single time step.
"""
self.file.write(
" <Grid Name=\"domain\" GridType=\"Uniform\">\n"
)
return
def _closeTimeGrid(self):
"""Close Grid element for a single time step.
"""
self.file.write(
" </Grid>\n"
)
return
def _writeGridTopology(self, cells):
"""Write topology information for current grid.
"""
cellType = self._xdmfCellType(cells)
assert(2 == len(cells.shape))
numCells = cells.shape[0]
self.file.write(
" <Topology TopologyType=\"%s\" NumberOfElements=\"%d\">\n"
" <DataItem Reference=\"XML\">\n"
" /Xdmf/Domain/DataItem[@Name=\"cells\"]\n"
" </DataItem>\n"
" </Topology>\n"
% (cellType, numCells,)
)
return
def _writeGridGeometry(self, vertices):
"""Write vertices information for current grid.
"""
self.file.write(
" <Geometry GeometryType=\"XYZ\">\n"
" <DataItem Reference=\"XML\">\n"
" /Xdmf/Domain/DataItem[@Name=\"vertices\"]\n"
" </DataItem>\n"
" </Geometry>\n"
)
return
def _writeGridFieldComponent(self, field, iTime, iComponent):
"""Write single component of field for current time step.
"""
if field.vectorFieldType == "Vector":
components = ["_x", "_y", "_z"]
componentName = field.name + components[iComponent]
elif field.vectorFieldType == "Tensor6":
spaceDim = self._spaceDim()
if spaceDim == 2:
components = ["_xx", "_yy", "_xy"]
elif spaceDim == 3:
components = ["_xx", "_yy", "_zz", "_xy", "_yz", "_xz"]
else:
self._close()
raise ValueError("Unexpected spatial dimension %d for field component names." % spaceDim)
componentName = field.name + components[iComponent]
elif field.vectorFieldType == "Matrix":
componentName = field.name + "_%d" % iComponent
else:
self._close()
raise ValueError("Unexpected vector field type '%s' for field component names." % field.vectorFieldType)
h5Name = "/" + Field.domainToGroup[field.domain] + "/" + field.name
if iTime is None:
assert(2 == len(field.data.shape))
numPoints, numComponents = field.data.shape
numTimeSteps = 1
else:
assert(3 == len(field.data.shape))
numTimeSteps, numPoints, numComponents = field.data.shape
self.file.write(
" <Attribute Name=\"%(componentName)s\" Type=\"Scalar\" Center=\"%(domain)s\">\n"
" <DataItem ItemType=\"HyperSlab\" Dimensions=\"1 %(numPoints)d 1\" Type=\"HyperSlab\">\n"
" <DataItem Dimensions=\"3 3\" Format=\"XML\">\n"
" %(iTime)d 0 %(iComponent)d 1 1 1 1 %(numPoints)d 1\n"
" </DataItem>\n"
" <DataItem DataType=\"Float\" Precision=\"8\" Dimensions=\"%(numTimeSteps)d %(numPoints)d %(numComponents)d\" Format=\"HDF\">\n"
" &HeavyData;:%(h5Name)s\n"
" </DataItem>\n"
" </DataItem>\n"
" </Attribute>\n"
% {"componentName": componentName,
"domain": field.domain,
"numPoints": numPoints,
"iTime": iTime,
"iComponent": iComponent,
"numTimeSteps": numTimeSteps,
"numComponents": numComponents,
"h5Name": h5Name,
}
)
return
def _writeGridField(self, field, iTime):
"""Write field for current time step.
"""
gridRef = "/Xdmf/Domain/Grid" if iTime is None else "/Xdmf/Domain/Grid/Grid[1]"
self.file.write(
" <Attribute Name=\"%s\" Type=\"%s\" Center=\"%s\">\n"
% (field.name, field.vectorFieldType, field.domain,)
)
h5Name = "/" + Field.domainToGroup[field.domain] + "/" + field.name
iStep = iTime
if iTime is None:
iStep = 0
if 2 == len(field.data.shape):
numPoints, numComponents = field.data.shape
numTimeSteps = 1
elif 3 == len(field.data.shape):
numTimeSteps, numPoints, numComponents = field.data.shape
else:
raise ValueError("Unexpected shape for dataset '%s'." % field.name)
else:
assert(3 == len(field.data.shape))
numTimeSteps, numPoints, numComponents = field.data.shape
if 2 == self._spaceDim() and field.vectorFieldType == "Vector":
self.file.write(
" <DataItem ItemType=\"Function\" Dimensions=\"%d 3\" Function=\"JOIN($0, $1, $2)\">\n"
% (numPoints,)
)
# x component
self.file.write(
" <DataItem ItemType=\"HyperSlab\" Dimensions=\"%(numPoints)d 1\" Type=\"HyperSlab\">\n"
" <DataItem Dimensions=\"3 3\" Format=\"XML\">\n"
" %(iStep)d 0 0 1 1 1 1 %(numPoints)d 1\n"
" </DataItem>\n"
" <DataItem DataType=\"Float\" Precision=\"8\" Dimensions=\"%(numTimeSteps)d %(numPoints)d %(numComponents)d\" Format=\"HDF\">\n"
" &HeavyData;:%(h5Name)s\n"
" </DataItem>\n"
" </DataItem>\n"
% {"numTimeSteps": numTimeSteps, "numPoints": numPoints, "iStep": iStep, "numComponents": numComponents, "h5Name": h5Name}
)
# y component
self.file.write(
" <DataItem ItemType=\"HyperSlab\" Dimensions=\"%(numPoints)d 1\" Type=\"HyperSlab\">\n"
" <DataItem Dimensions=\"3 3\" Format=\"XML\">\n"
" %(iStep)d 0 1 1 1 1 1 %(numPoints)d 1\n"
" </DataItem>\n"
" <DataItem DataType=\"Float\" Precision=\"8\" Dimensions=\"%(numTimeSteps)d %(numPoints)d %(numComponents)d\" Format=\"HDF\">\n"
" &HeavyData;:%(h5Name)s\n"
" </DataItem>\n"
" </DataItem>\n"
% {"numTimeSteps": numTimeSteps, "numPoints": numPoints, "iStep": iStep, "numComponents": numComponents, "h5Name": h5Name}
)
# z component
self.file.write(
" <DataItem ItemType=\"Function\" Dimensions=\"%(numPoints)d 1\" Function=\"0*$0\">\n"
" <DataItem Reference=\"XML\">\n"
" %(gridRef)s/Attribute[@Name=\"%(name)s\"]/DataItem[1]/DataItem[1]\n"
" </DataItem>\n"
" </DataItem>\n"
% {"numPoints": numPoints, "name": field.name, "gridRef": gridRef}
)
# close
self.file.write(
" </DataItem>\n"
" </Attribute>\n"
)
else:
self.file.write(
" <DataItem ItemType=\"HyperSlab\" Dimensions=\"1 %(numPoints)d %(numComponents)d\" Type=\"HyperSlab\">\n"
" <DataItem Dimensions=\"3 3\" Format=\"XML\">\n"
" %(iStep)d 0 0 1 1 1 1 %(numPoints)d %(numComponents)d\n"
" </DataItem>\n"
" <DataItem DataType=\"Float\" Precision=\"8\" Dimensions=\"%(numTimeSteps)d %(numPoints)d %(numComponents)d\" Format=\"HDF\">\n"
" &HeavyData;:%(h5Name)s\n"
" </DataItem>\n"
" </DataItem>\n"
" </Attribute>\n"
% {"numTimeSteps": numTimeSteps, "numPoints": numPoints, "iStep": iStep, "numComponents": numComponents, "h5Name": h5Name}
)
return
# End of file
|
the-stack_0_19631 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from functools import partial
import itertools as it
from typing import Any, Callable, Dict
import jax
from jax.interpreters import partial_eval as pe
from jax.config import config
from jax import core
from jax._src.dtypes import dtype, float0
from jax.core import (Trace, Tracer, get_aval, call_p, Primitive, Literal,
raise_to_shaped)
from jax._src.ad_util import (add_jaxvals, add_jaxvals_p, zeros_like_jaxval,
zeros_like_aval, zeros_like_p, Zero)
from jax._src.util import (unzip2, safe_map, safe_zip, split_list,
wrap_name, as_hashable_function)
from jax.tree_util import register_pytree_node
from jax import linear_util as lu
from jax._src.api_util import flatten_fun, flatten_fun_nokwargs
from jax.tree_util import tree_flatten, tree_unflatten, Partial
from jax._src import source_info_util
zip = safe_zip
map = safe_map
def identity(x): return x
def jvp(fun: lu.WrappedFun, has_aux=False, instantiate=True) -> Any:
if not has_aux:
return jvpfun(jvp_subtrace(fun), instantiate)
else:
fun, aux = jvp_subtrace_aux(fun)
return jvpfun(fun, instantiate), aux
@lu.transformation
def jvpfun(instantiate, primals, tangents):
tangents = [Zero.from_value(t) if not isinstance(t, Zero)
and dtype(t) is float0 else t for t in tangents]
with core.new_main(JVPTrace) as main:
out_primals, out_tangents = yield (main, primals, tangents), {}
del main
if type(instantiate) is bool:
instantiate = [instantiate] * len(out_tangents)
out_tangents = [instantiate_zeros(t) if inst else t for t, inst
in zip(out_tangents, instantiate)]
yield out_primals, out_tangents
@lu.transformation
def jvp_subtrace(main, primals, tangents):
trace = JVPTrace(main, core.cur_sublevel())
for x in list(primals) + list(tangents):
if isinstance(x, Tracer):
assert x._trace.level < trace.level
in_tracers = [JVPTracer(trace, x, t) if type(t) is not Zero else x
for x, t in zip(primals, tangents)]
ans = yield in_tracers, {}
out_tracers = map(trace.full_raise, ans)
yield unzip2([(out_tracer.primal, out_tracer.tangent)
for out_tracer in out_tracers])
@lu.transformation_with_aux
def jvp_subtrace_aux(main, primals, tangents):
trace = JVPTrace(main, core.cur_sublevel())
for x in list(primals) + list(tangents):
if isinstance(x, Tracer):
assert x._trace.level < trace.level
ans, aux = yield map(partial(JVPTracer, trace), primals, tangents), {}
ans_tracers = map(trace.full_raise, ans)
out_primals, out_tangents = unzip2((t.primal, t.tangent) for t in ans_tracers)
aux_primals = [core.full_lower(x.primal)
if isinstance(x, JVPTracer) and x._trace.level == trace.level
else x for x in aux]
yield (out_primals, out_tangents), aux_primals
def linearize(traceable, *primals, **kwargs):
has_aux = kwargs.pop('has_aux', False)
if not has_aux:
jvpfun = jvp(traceable)
else:
jvpfun, aux = jvp(traceable, has_aux=True)
in_pvals = (tuple(pe.PartialVal.known(p) for p in primals)
+ tuple(pe.PartialVal.unknown(get_aval(p).at_least_vspace())
for p in primals))
_, in_tree = tree_flatten(((primals, primals), {}))
jvpfun_flat, out_tree = flatten_fun(jvpfun, in_tree)
jaxpr, out_pvals, consts = pe.trace_to_jaxpr(jvpfun_flat, in_pvals)
out_primals_pvals, out_tangents_pvals = tree_unflatten(out_tree(), out_pvals)
assert all(out_primal_pval.is_known() for out_primal_pval in out_primals_pvals)
_, out_primals_consts = unzip2(out_primals_pvals)
jaxpr.invars = jaxpr.invars[len(primals):]
jaxpr.outvars = jaxpr.outvars[len(out_primals_pvals):]
if not has_aux:
return out_primals_consts, out_tangents_pvals, jaxpr, consts
else:
return out_primals_consts, out_tangents_pvals, jaxpr, consts, aux()
def vjp(traceable, primals, has_aux=False, reduce_axes=()):
if not has_aux:
out_primals, pvals, jaxpr, consts = linearize(traceable, *primals)
else:
out_primals, pvals, jaxpr, consts, aux = linearize(traceable, *primals, has_aux=True)
def unbound_vjp(pvals, jaxpr, consts, *cts):
cts = tuple(map(ignore_consts, cts, pvals))
dummy_args = [UndefinedPrimal(v.aval) for v in jaxpr.invars]
arg_cts = backward_pass(jaxpr, reduce_axes, consts, dummy_args, cts)
return map(instantiate_zeros, arg_cts)
# Ensure that vjp_ is a PyTree so that we can pass it from the forward to the backward
# pass in a custom VJP.
vjp_ = Partial(partial(unbound_vjp, pvals, jaxpr), consts)
if not has_aux:
return out_primals, vjp_
else:
return out_primals, vjp_, aux
def ignore_consts(ct, pval):
aval, const = pval
if isinstance(aval, core.AbstractValue):
return ct
elif aval is None:
return core.unit
else:
raise TypeError(aval)
def unpair_pval(pval):
aval, const = pval
const_1, const_2 = const
if aval is None:
return (None, const_1), (None, const_2)
else:
aval_1, aval_2 = aval
return (aval_1, const_1), (aval_2, const_2)
def replace_float0s(primal, tangent):
if dtype(tangent) is float0:
return zeros_like_jaxval(primal)
else:
return tangent
def recast_to_float0(primal, tangent):
if core.primal_dtype_to_tangent_dtype(dtype(primal)) == float0:
return Zero(get_aval(primal).at_least_vspace())
else:
return tangent
# NOTE: The FIXMEs below are caused by primal/tangent mixups (type errors if you will)
def backward_pass(jaxpr: core.Jaxpr, reduce_axes, consts, primals_in, cotangents_in):
if all(type(ct) is Zero for ct in cotangents_in):
return map(lambda v: Zero(v.aval), jaxpr.invars)
def write_cotangent(prim, v, ct):
# assert v not in primal_env
assert ct is not Zero, (prim, v.aval) # check for an old harmless type error
if ct is None or type(v) is Literal:
return
if type(ct) is Zero:
# FIXME: This triggers a lot of failures!
# assert v.aval == ct.aval, (prim, v.aval, ct.aval)
return
axes_to_reduce = tuple(axis_name for axis_name in reduce_axes
if axis_name in core.get_aval(ct).named_shape
and axis_name not in v.aval.named_shape)
if axes_to_reduce:
ct = jax.lax.psum(ct, axis_name=axes_to_reduce)
ct_env[v] = add_tangents(ct_env[v], ct) if v in ct_env else ct
if config.jax_enable_checks:
ct_aval = core.get_aval(ct_env[v])
joined_aval = core.lattice_join(v.aval, ct_aval).strip_weak_type().strip_named_shape()
assert v.aval.strip_weak_type().strip_named_shape() == joined_aval, (prim, v.aval, ct_aval)
def read_cotangent(v):
return ct_env.pop(v, Zero(v.aval))
def read_primal(v):
if type(v) is Literal:
return v.val
else:
return primal_env.get(v, UndefinedPrimal(v.aval))
def write_primal(v, val):
if not is_undefined_primal(val):
primal_env[v] = val
primal_env: Dict[Any, Any] = {}
write_primal(core.unitvar, core.unit)
map(write_primal, jaxpr.constvars, consts)
# FIXME: invars can contain both primal and tangent values, and this line
# forces primal_in to contain UndefinedPrimals for tangent values!
map(write_primal, jaxpr.invars, primals_in)
ct_env: Dict[Any, Any] = {}
map(partial(write_cotangent, 'outvars'), jaxpr.outvars, cotangents_in)
for eqn in jaxpr.eqns[::-1]:
# FIXME: Some invars correspond to tangents
invals = map(read_primal, eqn.invars)
if eqn.primitive.multiple_results:
cts_in = map(read_cotangent, eqn.outvars)
else:
cts_in, = map(read_cotangent, eqn.outvars)
with source_info_util.user_context(eqn.source_info.traceback):
if eqn.primitive.call_primitive or eqn.primitive.map_primitive:
cts_in_avals = [v.aval for v in eqn.outvars]
params = dict(eqn.params)
call_jaxpr = params.pop('call_jaxpr')
cts_out = get_primitive_transpose(eqn.primitive)(
params, call_jaxpr, invals, cts_in, cts_in_avals, reduce_axes)
elif eqn.primitive in reducing_transposes:
cts_out = reducing_transposes[eqn.primitive](
reduce_axes, cts_in, *invals, **eqn.params)
else:
cts_out = get_primitive_transpose(eqn.primitive)(cts_in, *invals,
**eqn.params)
cts_out = [Zero(v.aval) for v in eqn.invars] if cts_out is Zero else cts_out
# FIXME: Some invars correspond to primals!
map(partial(write_cotangent, eqn.primitive), eqn.invars, cts_out)
cotangents_out = map(read_cotangent, jaxpr.invars)
return cotangents_out
def closed_backward_pass(jaxpr: core.ClosedJaxpr, reduce_axes, primals_in, cotangents_in):
return backward_pass(jaxpr.jaxpr, reduce_axes, jaxpr.consts, primals_in, cotangents_in)
class UndefinedPrimal:
__slots__ = ['aval']
def __init__(self, aval):
self.aval = aval
def __repr__(self):
return 'UndefinedPrimal({})'.format(self.aval)
def is_undefined_primal(x):
return type(x) is UndefinedPrimal
register_pytree_node(UndefinedPrimal,
lambda z: ((), z.aval),
lambda aval, _: UndefinedPrimal(aval))
def get_primitive_transpose(p):
try:
return primitive_transposes[p]
except KeyError as err:
raise NotImplementedError(
"Transpose rule (for reverse-mode differentiation) for '{}' "
"not implemented".format(p)) from err
@lu.transformation_with_aux
def nonzero_tangent_outputs(*args, **kwargs):
results = (_, tangents_out) = yield args, kwargs
yield results, [type(r) is not Zero for r in tangents_out]
class JVPTrace(Trace):
def pure(self, val):
tangent_zero = Zero(get_aval(val).at_least_vspace())
return JVPTracer(self, val, tangent_zero)
def lift(self, val):
tangent_zero = Zero(get_aval(val).at_least_vspace())
return JVPTracer(self, val, tangent_zero)
def sublift(self, val):
return JVPTracer(self, val.primal, val.tangent)
def process_primitive(self, primitive, tracers, params):
primals_in, tangents_in = unzip2((t.primal, t.tangent) for t in tracers)
jvp = primitive_jvps.get(primitive)
if not jvp:
msg = f"Differentiation rule for '{primitive}' not implemented"
raise NotImplementedError(msg)
primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
if primitive.multiple_results:
return [JVPTracer(self, x, t) for x, t in zip(primal_out, tangent_out)]
else:
return JVPTracer(self, primal_out, tangent_out)
def process_call(self, call_primitive, f: lu.WrappedFun, tracers, params):
assert call_primitive.multiple_results
primals, tangents = unzip2((t.primal, t.tangent) for t in tracers)
nonzero_tangents, tangent_tree_def = tree_flatten(tangents)
nz_tangents = [type(t) is not Zero for t in tangents]
if 'name' in params:
params = dict(params, name=wrap_name(params['name'], 'jvp'))
f_jvp = jvp_subtrace(f, self.main)
f_jvp, nz_tangents_out = nonzero_tangent_outputs(f_jvp)
if isinstance(call_primitive, core.MapPrimitive):
in_axes = params['in_axes']
tangent_in_axes = [ax for ax, nz in zip(in_axes, nz_tangents) if nz]
out_axes_thunk = params['out_axes_thunk']
# The new thunk depends deterministically on the old thunk and the wrapped function.
# Any caching already has to include the wrapped function as part of the key, so we
# only use the previous thunk for equality checks.
# NOTE: This assumes that the output tangents being zero is a deterministic
# function of which input tangents were zero.
@as_hashable_function(closure=(tuple(nz_tangents), out_axes_thunk))
def new_out_axes_thunk():
out_axes = out_axes_thunk()
return (*out_axes, *(ax for ax, nz in zip(out_axes, nz_tangents_out()) if nz))
params = dict(params,
in_axes=(*in_axes, *tangent_in_axes),
out_axes_thunk=new_out_axes_thunk)
f_jvp, out_tree_def = traceable(f_jvp, len(primals), tangent_tree_def)
update_params = call_param_updaters.get(call_primitive)
new_params = (update_params(params, nz_tangents, nz_tangents_out)
if update_params else params)
result = call_primitive.bind(f_jvp, *primals, *nonzero_tangents, **new_params)
primal_out, tangent_out = tree_unflatten(out_tree_def(), result)
return [JVPTracer(self, p, t) for p, t in zip(primal_out, tangent_out)]
def post_process_call(self, call_primitive, out_tracers, params):
primals, tangents = unzip2((t.primal, t.tangent) for t in out_tracers)
out, treedef = tree_flatten((primals, tangents))
tangents_nz = [type(t) is not Zero for t in tangents]
del primals, tangents
main = self.main
def todo(x):
primals, tangents = tree_unflatten(treedef, x)
trace = JVPTrace(main, core.cur_sublevel())
return map(partial(JVPTracer, trace), primals, tangents)
if call_primitive.map_primitive:
def out_axes_transform(out_axes):
return (*out_axes, *(ax for ax, nz in zip(out_axes, tangents_nz) if nz))
todo = (todo, out_axes_transform)
return out, todo
# The only difference between process_map and process_call is that
# the `in_axes` and `out_axes_thunk` params must be updated;
# that's handled in process_call.
process_map = process_call
post_process_map = post_process_call
def process_custom_jvp_call(self, _, __, f_jvp, tracers):
primals_in, tangents_in = unzip2((t.primal, t.tangent) for t in tracers)
primals_in = map(core.full_lower, primals_in)
tangents_in = map(instantiate_zeros, tangents_in)
# Cast float0 to zeros with the primal dtype because custom jvp rules don't
# currently handle float0s
tangents_in = map(replace_float0s, primals_in, tangents_in)
outs = f_jvp.call_wrapped(*it.chain(primals_in, tangents_in))
primals_out, tangents_out = split_list(outs, [len(outs) // 2])
tangents_out = map(recast_to_float0, primals_out, tangents_out)
return map(partial(JVPTracer, self), primals_out, tangents_out)
def post_process_custom_jvp_call(self, out_tracers, _):
raise CustomJVPException()
def process_custom_vjp_call(self, _, __, fwd, bwd, tracers, *, out_trees):
primals_in, tangents_in = unzip2((t.primal, t.tangent) for t in tracers)
tangents_in = map(instantiate_zeros, tangents_in)
res_and_primals_out = fwd.call_wrapped(*map(core.full_lower, primals_in))
out_tree, res_tree = out_trees()
res, primals_out = split_list(res_and_primals_out, [res_tree.num_leaves])
avals_out = [raise_to_shaped(core.get_aval(x)) for x in primals_out]
tangents_out = custom_lin_p.bind(
*res, *tangents_in, num_res=res_tree.num_leaves, bwd=bwd,
out_avals=avals_out)
tangents_out = map(recast_to_float0, primals_out, tangents_out)
return map(partial(JVPTracer, self), primals_out, tangents_out)
def post_process_custom_vjp_call(self, out_tracers, _):
raise CustomVJPException()
def join(self, xt, yt):
xz, yz = type(xt) is Zero, type(yt) is Zero
if xz == yz:
return xt, yt
elif yz and not xz:
return xt, zeros_like_jaxval(xt)
elif xz and not yz:
return zeros_like_jaxval(yt), yt
else:
raise TypeError((xt, yt))
class JVPTracer(Tracer):
__slots__ = ['primal', 'tangent']
def __init__(self, trace, primal, tangent):
if config.jax_enable_checks:
_primal_tangent_shapes_match(primal, tangent)
self._trace = trace
self.primal = primal
self.tangent = tangent
@property
def aval(self):
# TODO(dougalm): add epsilon ball
return get_aval(self.primal)
def full_lower(self):
if type(self.tangent) is Zero:
return core.full_lower(self.primal)
else:
return self
def _primal_tangent_shapes_match(primal, tangent):
if type(tangent) is not Zero:
primal_aval = raise_to_shaped(get_aval(primal), weak_type=False)
tangent_aval = raise_to_shaped(get_aval(tangent), weak_type=False)
assert primal_aval.shape == tangent_aval.shape, (primal_aval.shape, tangent_aval.shape)
expected_tangent_dtype = core.primal_dtype_to_tangent_dtype(primal_aval.dtype)
assert expected_tangent_dtype == tangent_aval.dtype, (expected_tangent_dtype, tangent_aval.dtype)
call_param_updaters: Dict[core.Primitive, Callable] = {}
call_transpose_param_updaters: Dict[core.Primitive, Callable] = {}
# -------------------- Primitives --------------------
primitive_jvps : Dict[core.Primitive, Callable] = {}
primitive_transposes: Dict[core.Primitive, Callable] = {}
# transpose rules that internally perform reductions over the given named axes
reducing_transposes: Dict[core.Primitive, Callable] = {}
def deflinear(primitive, transpose_rule):
primitive_jvps[primitive] = partial(linear_jvp, primitive)
primitive_transposes[primitive] = partial(linear_transpose, transpose_rule)
def linear_jvp(primitive, primals, tangents, **params):
val_out = primitive.bind(*primals, **params)
if all(type(tangent) is Zero for tangent in tangents):
return val_out, Zero.from_value(val_out)
else:
tangents = map(instantiate_zeros, tangents)
return val_out, primitive.bind(*tangents, **params)
def linear_transpose(transpose_rule, cotangent, *args, **kwargs):
return Zero if type(cotangent) is Zero else transpose_rule(cotangent, **kwargs)
def deflinear2(primitive, transpose_rule):
primitive_jvps[primitive] = partial(linear_jvp, primitive)
primitive_transposes[primitive] = partial(linear_transpose2, transpose_rule)
def linear_transpose2(transpose_rule, cotangent, *args, **kwargs):
return Zero if type(cotangent) is Zero else transpose_rule(cotangent, *args, **kwargs)
def defjvp(primitive, *jvprules):
assert isinstance(primitive, Primitive)
assert not primitive.multiple_results
primitive_jvps[primitive] = partial(standard_jvp, jvprules, primitive)
def standard_jvp(jvprules, primitive, primals, tangents, **params):
val_out = primitive.bind(*primals, **params)
tangents_out = [rule(t, *primals, **params) for rule, t in zip(jvprules, tangents)
if rule is not None and type(t) is not Zero]
return val_out, functools.reduce(add_tangents, tangents_out, Zero.from_value(val_out))
def defjvp2(primitive, *jvprules):
assert isinstance(primitive, Primitive)
assert not primitive.multiple_results
primitive_jvps[primitive] = partial(standard_jvp2, jvprules, primitive)
def standard_jvp2(jvprules, primitive, primals, tangents, **params):
val_out = primitive.bind(*primals, **params)
tangents_out = (rule(t, val_out, *primals, **params) for rule, t in zip(jvprules, tangents)
if rule is not None and type(t) is not Zero)
tangents_out = list(tangents_out)
return val_out, functools.reduce(add_tangents, tangents_out, Zero.from_value(val_out))
def add_tangents(x, y):
if type(x) is Zero:
return y
elif type(y) is Zero:
return x
else:
return add_jaxvals(x, y)
def defbilinear(prim, lhs_rule, rhs_rule):
assert isinstance(prim, Primitive)
lhs_jvp = lambda g, x, y, **kwargs: prim.bind(g, y, **kwargs)
rhs_jvp = lambda g, x, y, **kwargs: prim.bind(x, g, **kwargs)
defjvp(prim, lhs_jvp, rhs_jvp)
primitive_transposes[prim] = partial(bilinear_transpose, lhs_rule, rhs_rule)
def bilinear_transpose(lhs_rule, rhs_rule, cotangent, x, y, **kwargs):
assert is_undefined_primal(x) ^ is_undefined_primal(y)
if type(cotangent) is Zero:
return Zero
if is_undefined_primal(x):
out = lhs_rule(cotangent, y, **kwargs)
return Zero if out is Zero else (out, None)
else:
out = rhs_rule(cotangent, x, **kwargs)
return Zero if out is Zero else (None, out)
def defjvp_zero(primitive):
assert isinstance(primitive, Primitive)
primitive_jvps[primitive] = partial(zero_jvp, primitive)
def zero_jvp(primitive, primals, tangents, **params):
r = primitive.bind(*primals, **params)
return r, Zero.from_value(r)
deflinear2(zeros_like_p, lambda t, _: [Zero.from_value(t)])
deflinear2(add_jaxvals_p, lambda t, *args: (t, t))
def instantiate_zeros(tangent):
if type(tangent) is Zero:
if isinstance(tangent.aval, Tracer):
return tangent.aval
return zeros_like_aval(tangent.aval)
else:
return tangent
# This function seems similar to instantiate_zeros, but it is sometimes used
# to instantiate zero abstract units with a different aval
def instantiate_zeros_aval(aval, tangent):
if type(tangent) is Zero:
assert type(tangent.aval) is core.AbstractUnit or tangent.aval == aval
return zeros_like_aval(aval)
else:
return tangent
@lu.transformation_with_aux
def traceable(num_primals, in_tree_def, *primals_and_tangents):
new_primals = primals_and_tangents[:num_primals]
new_tangents = primals_and_tangents[num_primals:]
new_tangents = tree_unflatten(in_tree_def, new_tangents)
primal_out, tangent_out = yield (new_primals, new_tangents), {}
out_flat, tree_def = tree_flatten((primal_out, tangent_out))
yield out_flat, tree_def
def call_transpose(primitive, params, call_jaxpr, args, ct, _, reduce_axes):
all_args, in_tree_def = tree_flatten(((), args, ct)) # empty consts
fun = lu.hashable_partial(lu.wrap_init(backward_pass), call_jaxpr, reduce_axes)
fun, out_tree = flatten_fun_nokwargs(fun, in_tree_def)
new_params = dict(params, name=wrap_name(params['name'], 'transpose'))
update_params = call_transpose_param_updaters.get(primitive)
if update_params:
new_params = update_params(new_params, map(is_undefined_primal, args),
[type(x) is not Zero for x in ct])
out_flat = primitive.bind(fun, *all_args, **new_params)
return tree_unflatten(out_tree(), out_flat)
primitive_transposes[core.call_p] = partial(call_transpose, call_p)
def remat_transpose(params, call_jaxpr, primals_in, cotangents_in,
cotangent_in_avals, reduce_axes):
# backward_pass can only transpose linear computations, but the call_jaxpr embedded in
# remat contains primal (non-linear) equations too. Hence, we have to eliminate those
# (in this case via partial_eval) before we call into backward_pass again.
typed_call_jaxpr = core.ClosedJaxpr(call_jaxpr, [])
unknowns = map(is_undefined_primal, primals_in)
primal_jaxpr, tangent_jaxpr, out_unknowns = \
pe.partial_eval_jaxpr(typed_call_jaxpr, unknowns=unknowns, instantiate=True) # type: ignore
def do_transpose(primals_in, cotangents_in):
# NOTE: This is passing in undefined primals in place of tangent arguments, but it
# should all work out, because we're only computing the primal part here.
residuals = core.jaxpr_as_fun(primal_jaxpr)(*primals_in)[len(cotangents_in):]
# Now that we have a purely linear jaxpr, we can transpose it
cotangents_out = backward_pass(
tangent_jaxpr.jaxpr, reduce_axes, (), primals_in + residuals, cotangents_in)
# backward_pass will return cotangents computed for all invars, but some of them
# are residuals appended by partial eval, so we need to skip those before we return.
return cotangents_out[:len(primals_in)]
flat_args, in_tree_def = tree_flatten((primals_in, cotangents_in))
flat_do_transpose, out_tree = flatten_fun_nokwargs(lu.wrap_init(do_transpose), in_tree_def)
flat_cotangents_out = pe.remat_call_p.bind(flat_do_transpose, *flat_args, **params)
return tree_unflatten(out_tree(), flat_cotangents_out)
primitive_transposes[pe.remat_call_p] = remat_transpose
@lu.transformation_with_aux
def nonzero_outputs(*args, **kwargs):
results = yield args, kwargs
yield results, [type(r) is not Zero for r in results]
def map_transpose(primitive, params, call_jaxpr, args, ct, _, reduce_axes):
all_args, in_tree_def = tree_flatten(((), args, ct)) # empty consts
fun = lu.hashable_partial(lu.wrap_init(backward_pass), call_jaxpr, reduce_axes)
fun, nz_arg_cts = nonzero_outputs(fun)
fun, out_tree = flatten_fun_nokwargs(fun, in_tree_def)
# Preserve axis for primal arguments, skip tangents (represented as undefined primals).
in_axes, out_axes = params['in_axes'], params['out_axes']
new_in_axes = (*[axis for axis, x in zip(in_axes, args)
if not is_undefined_primal(x)],
*[axis for axis, x in zip(out_axes, ct)
if type(x) is not Zero])
# The interim strategy we use below (until avals-with-names) only works
# when all outputs are mapped.
assert all(out_axis is not None for out_axis in out_axes), out_axes
# NOTE: This assumes that the output cotangents being zero is a deterministic
# function of which input cotangents were zero.
@as_hashable_function(closure=(in_axes, tuple(type(c) is Zero for c in ct)))
def out_axes_thunk():
return tuple(axis or 0 for axis, nz in zip(in_axes, nz_arg_cts()) if nz)
new_params = dict(params, name=wrap_name(params['name'], 'transpose'),
in_axes=new_in_axes, out_axes_thunk=out_axes_thunk)
del new_params['out_axes']
update_params = call_transpose_param_updaters.get(primitive)
if update_params:
new_params = update_params(new_params, map(is_undefined_primal, args),
[type(x) is not Zero for x in ct])
out_flat = primitive.bind(fun, *all_args, **new_params)
arg_cts = tree_unflatten(out_tree(), out_flat)
# The freevars are being fanned out (not mapped). During transpose the
# dual of fan-out is fan-in-sum. We apply it to the unmapped invars.
assert len(in_axes) == len(arg_cts)
def unmap_zero(zero, in_axis):
return (zero if in_axis is None else
Zero(core.unmapped_aval(params['axis_size'], params['axis_name'], in_axis, zero.aval)))
arg_cts = (unmap_zero(arg_ct, in_axis) if type(arg_ct) is Zero else
arg_ct if in_axis is not None else
arg_ct.sum(0)
for arg_ct, in_axis in zip(arg_cts, in_axes))
return tuple(arg_cts)
def jvp_jaxpr(jaxpr, nonzeros, instantiate):
assert len(jaxpr.in_avals) == len(nonzeros)
f = lu.wrap_init(core.jaxpr_as_fun(jaxpr))
f_jvp, out_nonzeros = f_jvp_traceable(jvp(f, instantiate=instantiate), nonzeros)
tangent_avals = [aval for aval, nz in zip(jaxpr.in_avals, nonzeros) if nz]
avals_in = list(it.chain(jaxpr.in_avals, tangent_avals))
jaxpr_out, avals_out, literals_out = pe.trace_to_jaxpr_dynamic(f_jvp, avals_in)
return core.ClosedJaxpr(jaxpr_out, literals_out), out_nonzeros()
@lu.transformation_with_aux
def f_jvp_traceable(nonzeros, *primals_and_nztangents):
num_primals = len(nonzeros)
primals = list(primals_and_nztangents[:num_primals])
nonzero_tangents = iter(primals_and_nztangents[num_primals:])
tangents = [next(nonzero_tangents) if nz else Zero.from_value(p)
for p, nz in zip(primals, nonzeros)]
primals_out, tangents_out = yield (primals, tangents), {}
out_nonzeros = [type(t) is not Zero for t in tangents_out]
nonzero_tangents_out = [t for t in tangents_out if type(t) is not Zero]
yield list(primals_out) + nonzero_tangents_out, out_nonzeros
def rearrange_binders(jaxpr: core.ClosedJaxpr, primals_in, tangents_in, primals_out, tangents_out):
new_invars = _perm(primals_in, tangents_in, jaxpr.jaxpr.invars)
new_outvars = _perm(primals_out, tangents_out, jaxpr.jaxpr.outvars)
new_jaxpr = core.Jaxpr(jaxpr.jaxpr.constvars,
new_invars, new_outvars, jaxpr.jaxpr.eqns)
return core.ClosedJaxpr(new_jaxpr, jaxpr.consts)
def _perm(primal_counts, tangent_counts, lst):
n = sum(primal_counts)
primals, tangents = lst[:n], lst[n:]
primal_groups = split_list(primals, primal_counts[:-1])
tangent_groups = split_list(tangents, tangent_counts[:-1])
return _interleave(primal_groups, tangent_groups)
def _interleave(xs, ys):
assert len(xs) == len(ys)
return [e for pair in zip(xs, ys) for l in pair for e in l]
custom_lin_p: core.Primitive = core.Primitive('custom_lin')
custom_lin_p.def_abstract_eval(lambda *_, out_avals, **__: out_avals)
custom_lin_p.multiple_results = True
def _raise_custom_vjp_error_on_jvp(*_, **__):
raise TypeError("can't apply forward-mode autodiff (jvp) to a custom_vjp "
"function.")
custom_lin_p.def_impl(_raise_custom_vjp_error_on_jvp)
def _custom_lin_transpose(cts_out, *invals, num_res, bwd, out_avals):
res, _ = split_list(invals, [num_res])
cts_out = map(instantiate_zeros_aval, out_avals, cts_out)
cts_in = bwd.call_wrapped(*res, *cts_out)
return [None] * num_res + list(cts_in)
primitive_transposes[custom_lin_p] = _custom_lin_transpose
class CustomJVPException(Exception):
def __init__(self):
# TODO(mattjj): track source provenance on AD tracers, improve error
msg = ("Detected differentiation of a custom_jvp function with respect to "
"a closed-over value. That isn't supported because the custom JVP "
"rule only specifies how to differentiate the custom_jvp function "
"with respect to explicit input parameters. Try passing the "
"closed-over value into the custom_jvp function as an argument, and "
"adapting the custom_jvp rule.")
super().__init__(msg)
class CustomVJPException(Exception):
def __init__(self):
# TODO(mattjj): track source provenance on AD tracers, improve error
msg = ("Detected differentiation of a custom_vjp function with respect to "
"a closed-over value. That isn't supported because the custom VJP "
"rule only specifies how to differentiate the custom_vjp function "
"with respect to explicit input parameters. Try passing the "
"closed-over value into the custom_vjp function as an argument, and "
"adapting the custom_vjp fwd and bwd rules.")
super().__init__(msg)
|
the-stack_0_19632 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# openomics documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
import openomics
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.napoleon']
napoleon_google_docstring = True
napoleon_use_param = True
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'OpenOmics'
copyright = u"2019, Nhat Tran"
author = u"Jonny Tran"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = openomics.__version__
# The full version, including alpha/beta/rc tags.
release = openomics.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'openomicsdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'openomics.tex',
u'openOmics Documentation',
u'Nhat Chau Tran', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'openomics',
u'OpenOmics Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'openomics',
u'OpenOmics Documentation',
author,
'openomics',
'One line description of project.',
'Miscellaneous'),
]
|
the-stack_0_19633 | import asyncio
import json
import re
import aiohttp
from holo_py.types import HoloResponse
class HoloClient:
_base_url = "http://discord-holo-api.ml/api/"
_loop = None
def __init__(self, loop: asyncio.AbstractEventLoop):
self._loop = loop
@classmethod
async def get_emote(cls, tag: str) -> HoloResponse:
"""
Get's an emotion from API - http://discord-holo-api.ml/api/
:param tag: string name of emotion tag to search, like "kiss"
:return: HoloResponse
:raise ValueError: if no tag was specified
"""
tag = re.sub(r"[^ -~]+", r"", tag)
if not tag or len(tag) < 3:
raise ValueError("tag length must be more or equal 3" if tag else "you must specify a tag")
async with aiohttp.ClientSession(loop=cls._loop) as session:
resp = await session.get(cls._base_url + tag)
try:
data = await resp.json()
except aiohttp.client_exceptions.ContentTypeError as err:
data = json.loads(await resp.read())
return HoloResponse(data)
|
the-stack_0_19634 | """Module to handle the operations within the aggregate pipeline."""
import bisect
import collections
import copy
import datetime
import decimal
import itertools
import math
import numbers
import random
import warnings
from sentinels import NOTHING
import six
from six import moves
from mongomock import command_cursor
from mongomock import filtering
from mongomock import helpers
from mongomock import OperationFailure
try:
from bson import decimal128
decimal_support = True
except ImportError:
decimal_support = False
_random = random.Random()
group_operators = [
'$addToSet',
'$first',
'$last',
'$max',
'$min',
'$avg',
'$push',
'$sum',
'$stdDevPop',
'$stdDevSamp']
arithmetic_operators = [
'$abs',
'$add',
'$ceil',
'$divide',
'$exp',
'$floor',
'$ln',
'$log',
'$log10',
'$mod',
'$multiply',
'$pow',
'$sqrt',
'$subtract',
'$trunc',
]
project_operators = [
'$max',
'$min',
'$avg',
'$sum',
'$stdDevPop',
'$stdDevSamp',
'$arrayElemAt',
]
projection_operators = ['$map', '$let', '$literal']
date_operators = [
'$dayOfYear',
'$dayOfMonth',
'$dayOfWeek',
'$year',
'$month',
'$week',
'$hour',
'$minute',
'$second',
'$millisecond',
'$dateToString',
]
conditional_operators = ['$cond', '$ifNull']
array_operators = [
'$concatArrays',
'$filter',
'$isArray',
'$size',
'$slice',
]
text_search_operators = ['$meta']
string_operators = [
'$concat',
'$strcasecmp',
'$substr',
'$toLower',
'$toUpper',
]
comparison_operators = [
'$cmp',
'$eq',
'$ne',
] + list(filtering.SORTING_OPERATOR_MAP.keys())
boolean_operators = ['$and', '$or', '$not']
set_operators = [
'$in',
'$setEquals',
'$setIntersection',
'$setDifference',
'$setUnion',
'$setIsSubset',
'$anyElementTrue',
'$allElementsTrue',
]
type_convertion_operators = [
'$toString',
'$toInt',
'$toDecimal',
]
def _avg_operation(values):
values_list = list(v for v in values if isinstance(v, numbers.Number))
if not values_list:
return None
return sum(values_list) / float(len(list(values_list)))
def _group_operation(values, operator):
values_list = list(v for v in values if v is not None)
if not values_list:
return None
return operator(values_list)
def _sum_operation(values):
values_list = list()
if decimal_support:
for v in values:
if isinstance(v, numbers.Number):
values_list.append(v)
elif isinstance(v, decimal128.Decimal128):
values_list.append(v.to_decimal())
else:
values_list = list(v for v in values if isinstance(v, numbers.Number))
sum_value = sum(values_list)
return decimal128.Decimal128(sum_value) if isinstance(sum_value, decimal.Decimal) else sum_value
_GROUPING_OPERATOR_MAP = {
'$sum': _sum_operation,
'$avg': _avg_operation,
'$min': lambda values: _group_operation(values, min),
'$max': lambda values: _group_operation(values, max),
}
class _Parser(object):
"""Helper to parse expressions within the aggregate pipeline."""
def __init__(self, doc_dict, user_vars=None, ignore_missing_keys=False):
self._doc_dict = doc_dict
self._ignore_missing_keys = ignore_missing_keys
self._user_vars = user_vars or {}
def parse(self, expression):
"""Parse a MongoDB expression."""
if not isinstance(expression, dict):
return self._parse_basic_expression(expression)
if len(expression) > 1 and any(key.startswith('$') for key in expression):
raise OperationFailure(
'an expression specification must contain exactly one field, '
'the name of the expression. Found %d fields in %s'
% (len(expression), expression))
value_dict = {}
for k, v in six.iteritems(expression):
if k in arithmetic_operators:
return self._handle_arithmetic_operator(k, v)
if k in project_operators:
return self._handle_project_operator(k, v)
if k in projection_operators:
return self._handle_projection_operator(k, v)
if k in comparison_operators:
return self._handle_comparison_operator(k, v)
if k in date_operators:
return self._handle_date_operator(k, v)
if k in array_operators:
return self._handle_array_operator(k, v)
if k in conditional_operators:
return self._handle_conditional_operator(k, v)
if k in set_operators:
return self._handle_set_operator(k, v)
if k in string_operators:
return self._handle_string_operator(k, v)
if k in type_convertion_operators:
return self._handle_type_convertion_operator(k, v)
if k in boolean_operators + \
text_search_operators + projection_operators:
raise NotImplementedError(
"'%s' is a valid operation but it is not supported by Mongomock yet." % k)
if k.startswith('$'):
raise OperationFailure("Unrecognized expression '%s'" % k)
try:
value = self.parse(v)
except KeyError:
if self._ignore_missing_keys:
continue
raise
value_dict[k] = value
return value_dict
def parse_many(self, values):
for value in values:
try:
yield self.parse(value)
except KeyError:
if self._ignore_missing_keys:
yield None
def _parse_basic_expression(self, expression):
if isinstance(expression, six.string_types) and expression.startswith('$'):
if expression.startswith('$$'):
return helpers.get_value_by_dot(dict({
'ROOT': self._doc_dict,
'CURRENT': self._doc_dict,
}, **self._user_vars), expression[2:])
return helpers.get_value_by_dot(self._doc_dict, expression[1:], can_generate_array=True)
return expression
def _handle_arithmetic_operator(self, operator, values):
if operator == '$abs':
return abs(self.parse(values))
if operator == '$add':
return sum(self.parse(value) for value in values)
if operator == '$ceil':
return math.ceil(self.parse(values))
if operator == '$divide':
assert len(values) == 2, 'divide must have only 2 items'
return self.parse(values[0]) / self.parse(values[1])
if operator == '$exp':
return math.exp(self.parse(values))
if operator == '$floor':
return math.floor(self.parse(values))
if operator == '$ln':
return math.log(self.parse(values))
if operator == '$log':
assert len(values) == 2, 'log must have only 2 items'
return math.log(self.parse(values[0]), self.parse(values[1]))
if operator == '$log10':
return math.log10(self.parse(values))
if operator == '$mod':
assert len(values) == 2, 'mod must have only 2 items'
return math.fmod(self.parse(values[0]), self.parse(values[1]))
if operator == '$multiply':
return moves.reduce(
lambda x, y: x * y,
(self.parse(value) for value in values))
if operator == '$pow':
assert len(values) == 2, 'pow must have only 2 items'
return math.pow(self.parse(values[0]), self.parse(values[1]))
if operator == '$sqrt':
return math.sqrt(self.parse(values))
if operator == '$subtract':
assert len(values) == 2, 'subtract must have only 2 items'
value_0 = self.parse(values[0])
value_1 = self.parse(values[1])
if isinstance(value_0, datetime.datetime) and \
isinstance(value_1, (six.integer_types, float)):
value_1 = datetime.timedelta(milliseconds=value_1)
res = value_0 - value_1
if isinstance(res, datetime.timedelta):
return round(res.total_seconds() * 1000)
return res
if operator == '$trunc':
return math.trunc(self.parse(values))
# This should never happen: it is only a safe fallback if something went wrong.
raise NotImplementedError( # pragma: no cover
"Although '%s' is a valid aritmetic operator for the aggregation "
'pipeline, it is currently not implemented in Mongomock.' % operator)
def _handle_project_operator(self, operator, values):
if operator in _GROUPING_OPERATOR_MAP:
return _GROUPING_OPERATOR_MAP[operator](self.parse_many(values))
if operator == '$arrayElemAt':
key, index = values
array = self._parse_basic_expression(key)
return array[index]
raise NotImplementedError("Although '%s' is a valid project operator for the "
'aggregation pipeline, it is currently not implemented '
'in Mongomock.' % operator)
def _handle_projection_operator(self, operator, value):
if operator == '$literal':
return value
raise NotImplementedError("Although '%s' is a valid project operator for the "
'aggregation pipeline, it is currently not implemented '
'in Mongomock.' % operator)
def _handle_comparison_operator(self, operator, values):
assert len(values) == 2, 'Comparison requires two expressions'
a = self.parse(values[0])
b = self.parse(values[1])
if operator == '$eq':
return a == b
if operator == '$ne':
return a != b
if operator in filtering.SORTING_OPERATOR_MAP:
return filtering.bson_compare(filtering.SORTING_OPERATOR_MAP[operator], a, b)
raise NotImplementedError(
"Although '%s' is a valid comparison operator for the "
'aggregation pipeline, it is currently not implemented '
' in Mongomock.' % operator)
def _handle_string_operator(self, operator, values):
if operator == '$toLower':
parsed = self.parse(values)
return str(parsed).lower() if parsed is not None else ''
if operator == '$toUpper':
parsed = self.parse(values)
return str(parsed).upper() if parsed is not None else ''
if operator == '$concat':
parsed_list = list(self.parse_many(values))
return None if None in parsed_list else ''.join([str(x) for x in parsed_list])
if operator == '$substr':
if len(values) != 3:
raise OperationFailure('substr must have 3 items')
string = str(self.parse(values[0]))
first = self.parse(values[1])
length = self.parse(values[2])
if string is None:
return ''
if first < 0:
warnings.warn('Negative starting point given to $substr is accepted only until '
'MongoDB 3.7. This behavior will change in the future.')
return ''
if length < 0:
warnings.warn('Negative length given to $substr is accepted only until '
'MongoDB 3.7. This behavior will change in the future.')
second = len(string) if length < 0 else first + length
return string[first:second]
if operator == '$strcasecmp':
if len(values) != 2:
raise OperationFailure('strcasecmp must have 2 items')
a, b = str(self.parse(values[0])), str(self.parse(values[1]))
return 0 if a == b else -1 if a < b else 1
# This should never happen: it is only a safe fallback if something went wrong.
raise NotImplementedError( # pragma: no cover
"Although '%s' is a valid string operator for the aggregation "
'pipeline, it is currently not implemented in Mongomock.' % operator)
def _handle_date_operator(self, operator, values):
out_value = self.parse(values)
if operator == '$dayOfYear':
return out_value.timetuple().tm_yday
if operator == '$dayOfMonth':
return out_value.day
if operator == '$dayOfWeek':
return (out_value.isoweekday() % 7) + 1
if operator == '$year':
return out_value.year
if operator == '$month':
return out_value.month
if operator == '$week':
return int(out_value.strftime('%U'))
if operator == '$hour':
return out_value.hour
if operator == '$minute':
return out_value.minute
if operator == '$second':
return out_value.second
if operator == '$millisecond':
return int(out_value.microsecond / 1000)
if operator == '$dateToString':
if not isinstance(values, dict):
raise OperationFailure(
'$dateToString operator must correspond a dict'
'that has "format" and "date" field.'
)
if not isinstance(values, dict) or not {'format', 'date'} <= set(values):
raise OperationFailure(
'$dateToString operator must correspond a dict'
'that has "format" and "date" field.'
)
if '%L' in out_value['format']:
raise NotImplementedError(
'Although %L is a valid date format for the '
'$dateToString operator, it is currently not implemented '
' in Mongomock.'
)
if 'onNull' in values:
raise NotImplementedError(
'Although onNull is a valid field for the '
'$dateToString operator, it is currently not implemented '
' in Mongomock.'
)
if 'timezone' in values.keys():
raise NotImplementedError(
'Although timezone is a valid field for the '
'$dateToString operator, it is currently not implemented '
' in Mongomock.'
)
return out_value['date'].strftime(out_value['format'])
raise NotImplementedError(
"Although '%s' is a valid date operator for the "
'aggregation pipeline, it is currently not implemented '
' in Mongomock.' % operator)
def _handle_array_operator(self, operator, value):
if operator == '$size':
if isinstance(value, list):
if len(value) != 1:
raise OperationFailure('Expression $size takes exactly 1 arguments. '
'%d were passed in.' % len(value))
value = value[0]
array_value = self.parse(value)
if not isinstance(array_value, list):
raise OperationFailure('The argument to $size must be an array, '
'but was of type: %s' % type(array_value))
return len(array_value)
if operator == '$filter':
if not isinstance(value, dict):
raise OperationFailure('$filter only supports an object as its argument')
extra_params = set(value) - {'input', 'cond', 'as'}
if extra_params:
raise OperationFailure('Unrecognized parameter to $filter: %s' % extra_params.pop())
missing_params = {'input', 'cond'} - set(value)
if missing_params:
raise OperationFailure("Missing '%s' parameter to $filter" % missing_params.pop())
input_array = self.parse(value['input'])
fieldname = value.get('as', 'this')
cond = value['cond']
return [
item for item in input_array
if _Parser(
self._doc_dict,
dict(self._user_vars, **{fieldname: item}),
ignore_missing_keys=self._ignore_missing_keys,
).parse(cond)
]
if operator == '$slice':
if not isinstance(value, list):
raise OperationFailure('$slice only supports a list as its argument')
if len(value) < 2 or len(value) > 3:
raise OperationFailure('Expression $slice takes at least 2 arguments, and at most '
'3, but {} were passed in'.format(len(value)))
array_value = self.parse(value[0])
if not isinstance(array_value, list):
raise OperationFailure(
'First argument to $slice must be an array, but is of type: {}'
.format(type(array_value)))
for num, v in zip(('Second', 'Third'), value[1:]):
if not isinstance(v, six.integer_types):
raise OperationFailure(
'{} argument to $slice must be numeric, but is of type: {}'
.format(num, type(v)))
if len(value) > 2 and value[2] <= 0:
raise OperationFailure('Third argument to $slice must be '
'positive: {}'.format(value[2]))
start = value[1]
if start < 0:
if len(value) > 2:
stop = len(array_value) + start + value[2]
else:
stop = None
elif len(value) > 2:
stop = start + value[2]
else:
stop = start
start = 0
return array_value[start:stop]
raise NotImplementedError(
"Although '%s' is a valid array operator for the "
'aggregation pipeline, it is currently not implemented '
'in Mongomock.' % operator)
def _handle_type_convertion_operator(self, operator, values):
if operator == '$toString':
try:
parsed = self.parse(values)
except KeyError:
return None
if isinstance(parsed, bool):
return str(parsed).lower()
if isinstance(parsed, datetime.datetime):
return parsed.isoformat()[:-3] + 'Z'
return str(parsed)
if operator == '$toInt':
try:
parsed = self.parse(values)
except KeyError:
return None
if decimal_support:
if isinstance(parsed, decimal128.Decimal128):
return int(parsed.to_decimal())
return int(parsed)
raise NotImplementedError(
'You need to import the pymongo library to support decimal128 type.'
)
# Document: https://docs.mongodb.com/manual/reference/operator/aggregation/toDecimal/
if operator == '$toDecimal':
if not decimal_support:
raise NotImplementedError(
'You need to import the pymongo library to support decimal128 type.'
)
try:
parsed = self.parse(values)
except KeyError:
return None
if isinstance(parsed, bool):
parsed = '1' if parsed is True else '0'
decimal_value = decimal128.Decimal128(parsed)
elif isinstance(parsed, int):
decimal_value = decimal128.Decimal128(str(parsed))
elif isinstance(parsed, float):
exp = decimal.Decimal('.00000000000000')
decimal_value = decimal.Decimal(str(parsed)).quantize(exp)
decimal_value = decimal128.Decimal128(decimal_value)
elif isinstance(parsed, decimal128.Decimal128):
decimal_value = parsed
elif isinstance(parsed, str):
try:
decimal_value = decimal128.Decimal128(parsed)
except decimal.InvalidOperation:
raise OperationFailure(
"Failed to parse number '%s' in $convert with no onError value:"
'Failed to parse string to decimal' % parsed)
elif isinstance(parsed, datetime.datetime):
epoch = datetime.datetime.utcfromtimestamp(0)
string_micro_seconds = str((parsed - epoch).total_seconds() * 1000).split('.')[0]
decimal_value = decimal128.Decimal128(string_micro_seconds)
else:
raise TypeError("'%s' type is not supported" % type(parsed))
return decimal_value
def _handle_conditional_operator(self, operator, values):
if operator == '$ifNull':
field, fallback = values
try:
out_value = self.parse(field)
if out_value is not None:
return out_value
except KeyError:
pass
return self.parse(fallback)
if operator == '$cond':
if isinstance(values, list):
condition, true_case, false_case = values
elif isinstance(values, dict):
condition = values['if']
true_case = values['then']
false_case = values['else']
try:
condition_value = self.parse(condition)
except KeyError:
condition_value = False
expression = true_case if condition_value else false_case
return self.parse(expression)
# This should never happen: it is only a safe fallback if something went wrong.
raise NotImplementedError( # pragma: no cover
"Although '%s' is a valid conditional operator for the "
'aggregation pipeline, it is currently not implemented '
' in Mongomock.' % operator)
def _handle_set_operator(self, operator, values):
if operator == '$in':
expression, array = values
return self.parse(expression) in self.parse(array)
if operator == '$setUnion':
result = []
for set_value in values:
for value in self.parse(set_value):
if value not in result:
result.append(value)
return result
if operator == '$setEquals':
set_values = [set(self.parse(value)) for value in values]
for set1, set2 in itertools.combinations(set_values, 2):
if set1 != set2:
return False
return True
raise NotImplementedError(
"Although '%s' is a valid set operator for the aggregation "
'pipeline, it is currently not implemented in Mongomock.' % operator)
def _parse_expression(expression, doc_dict, ignore_missing_keys=False):
"""Parse an expression.
Args:
expression: an Aggregate Expression, see
https://docs.mongodb.com/manual/meta/aggregation-quick-reference/#aggregation-expressions.
doc_dict: the document on which to evaluate the expression.
ignore_missing_keys: if True, missing keys evaluated by the expression are ignored silently
if it is possible.
"""
return _Parser(doc_dict, ignore_missing_keys=ignore_missing_keys).parse(expression)
def _accumulate_group(output_fields, group_list):
doc_dict = {}
for field, value in six.iteritems(output_fields):
if field == '_id':
continue
for operator, key in six.iteritems(value):
values = []
for doc in group_list:
try:
values.append(_parse_expression(key, doc))
except KeyError:
continue
if operator in _GROUPING_OPERATOR_MAP:
doc_dict[field] = _GROUPING_OPERATOR_MAP[operator](values)
elif operator == '$first':
doc_dict[field] = values[0] if values else None
elif operator == '$last':
doc_dict[field] = values[-1] if values else None
elif operator == '$addToSet':
value = []
val_it = (val or None for val in values)
# Don't use set in case elt in not hashable (like dicts).
for elt in val_it:
if elt not in value:
value.append(elt)
doc_dict[field] = value
elif operator == '$push':
if field not in doc_dict:
doc_dict[field] = values
else:
doc_dict[field].extend(values)
elif operator in group_operators:
raise NotImplementedError(
'Although %s is a valid group operator for the '
'aggregation pipeline, it is currently not implemented '
'in Mongomock.' % operator)
else:
raise NotImplementedError(
'%s is not a valid group operator for the aggregation '
'pipeline. See http://docs.mongodb.org/manual/meta/'
'aggregation-quick-reference/ for a complete list of '
'valid operators.' % operator)
return doc_dict
def _fix_sort_key(key_getter):
def fixed_getter(doc):
key = key_getter(doc)
# Convert dictionaries to make sorted() work in Python 3.
if isinstance(key, dict):
return [(k, v) for (k, v) in sorted(key.items())]
return key
return fixed_getter
def _handle_lookup_stage(in_collection, database, options):
for operator in ('let', 'pipeline'):
if operator in options:
raise NotImplementedError(
"Although '%s' is a valid lookup operator for the "
'aggregation pipeline, it is currently not '
'implemented in Mongomock.' % operator)
for operator in ('from', 'localField', 'foreignField', 'as'):
if operator not in options:
raise OperationFailure(
"Must specify '%s' field for a $lookup" % operator)
if not isinstance(options[operator], six.string_types):
raise OperationFailure(
'Arguments to $lookup must be strings')
if operator in ('as', 'localField', 'foreignField') and \
options[operator].startswith('$'):
raise OperationFailure(
"FieldPath field names may not start with '$'")
if operator == 'as' and \
'.' in options[operator]:
raise NotImplementedError(
"Although '.' is valid in the 'as' "
'parameters for the lookup stage of the aggregation '
'pipeline, it is currently not implemented in Mongomock.')
foreign_name = options['from']
local_field = options['localField']
foreign_field = options['foreignField']
local_name = options['as']
foreign_collection = database.get_collection(foreign_name)
for doc in in_collection:
try:
query = helpers.get_value_by_dot(doc, local_field)
except KeyError:
query = None
if isinstance(query, list):
query = {'$in': query}
matches = foreign_collection.find({foreign_field: query})
doc[local_name] = [foreign_doc for foreign_doc in matches]
return in_collection
def _handle_graph_lookup_stage(in_collection, database, options):
if not isinstance(options.get('maxDepth', 0), six.integer_types):
raise OperationFailure(
"Argument 'maxDepth' to $graphLookup must be a number")
if not isinstance(options.get('restrictSearchWithMatch', {}), dict):
raise OperationFailure(
"Argument 'restrictSearchWithMatch' to $graphLookup must be a Dictionary")
if not isinstance(options.get('depthField', ''), six.string_types):
raise OperationFailure(
"Argument 'depthField' to $graphlookup must be a string")
if 'startWith' not in options:
raise OperationFailure(
"Must specify 'startWith' field for a $graphLookup")
for operator in ('as', 'connectFromField', 'connectToField', 'from'):
if operator not in options:
raise OperationFailure(
"Must specify '%s' field for a $graphLookup" % operator)
if not isinstance(options[operator], six.string_types):
raise OperationFailure(
"Argument '%s' to $graphLookup must be string" % operator)
if options[operator].startswith('$'):
raise OperationFailure("FieldPath field names may not start with '$'")
if operator in ('connectFromField', 'as') and \
'.' in options[operator]:
raise NotImplementedError(
"Although '.' is valid in the '%s' "
'parameter for the $graphLookup stage of the aggregation '
'pipeline, it is currently not implemented in Mongomock.' % operator)
foreign_name = options['from']
start_with = options['startWith']
connect_from_field = options['connectFromField']
connect_to_field = options['connectToField']
local_name = options['as']
max_depth = options.get('maxDepth', None)
depth_field = options.get('depthField', None)
restrict_search_with_match = options.get('restrictSearchWithMatch', {})
foreign_collection = database.get_collection(foreign_name)
out_doc = copy.deepcopy(in_collection) # TODO(pascal): speed the deep copy
def _find_matches_for_depth(query):
if isinstance(query, list):
query = {'$in': query}
matches = foreign_collection.find({connect_to_field: query})
new_matches = []
for new_match in matches:
if filtering.filter_applies(restrict_search_with_match, new_match) \
and new_match['_id'] not in found_items:
if depth_field is not None:
new_match = collections.OrderedDict(new_match, **{depth_field: depth})
new_matches.append(new_match)
found_items.add(new_match['_id'])
return new_matches
for doc in out_doc:
found_items = set()
depth = 0
result = _parse_expression(start_with, doc)
origin_matches = doc[local_name] = _find_matches_for_depth(result)
while origin_matches and (max_depth is None or depth < max_depth):
depth += 1
newly_discovered_matches = []
for match in origin_matches:
match_target = match.get(connect_from_field)
newly_discovered_matches += _find_matches_for_depth(match_target)
doc[local_name] += newly_discovered_matches
origin_matches = newly_discovered_matches
return out_doc
def _handle_group_stage(in_collection, unused_database, options):
grouped_collection = []
_id = options['_id']
if _id:
def _key_getter(doc):
try:
return _parse_expression(_id, doc)
except KeyError:
return None
def _sort_key_getter(doc):
return filtering.BsonComparable(_key_getter(doc))
# Sort the collection only for the itertools.groupby.
# $group does not order its output document.
sorted_collection = sorted(in_collection, key=_sort_key_getter)
grouped = itertools.groupby(sorted_collection, _key_getter)
else:
grouped = [(None, in_collection)]
for doc_id, group in grouped:
group_list = ([x for x in group])
doc_dict = _accumulate_group(options, group_list)
doc_dict['_id'] = doc_id
grouped_collection.append(doc_dict)
return grouped_collection
def _handle_bucket_stage(in_collection, unused_database, options):
unknown_options = set(options) - {'groupBy', 'boundaries', 'output', 'default'}
if unknown_options:
raise OperationFailure(
'Unrecognized option to $bucket: %s.' % unknown_options.pop())
if 'groupBy' not in options or 'boundaries' not in options:
raise OperationFailure(
"$bucket requires 'groupBy' and 'boundaries' to be specified.")
group_by = options['groupBy']
boundaries = options['boundaries']
if not isinstance(boundaries, list):
raise OperationFailure(
"The $bucket 'boundaries' field must be an array, but found type: %s"
% type(boundaries))
if len(boundaries) < 2:
raise OperationFailure(
"The $bucket 'boundaries' field must have at least 2 values, but "
'found %d value(s).' % len(boundaries))
if sorted(boundaries) != boundaries:
raise OperationFailure(
"The 'boundaries' option to $bucket must be sorted in ascending order")
output_fields = options.get('output', {'count': {'$sum': 1}})
default_value = options.get('default', None)
try:
is_default_last = default_value >= boundaries[-1]
except TypeError:
is_default_last = True
def _get_default_bucket():
try:
return options['default']
except KeyError:
raise OperationFailure(
'$bucket could not find a matching branch for '
'an input, and no default was specified.')
def _get_bucket_id(doc):
"""Get the bucket ID for a document.
Note that it actually returns a tuple with the first
param being a sort key to sort the default bucket even
if it's not the same type as the boundaries.
"""
try:
value = _parse_expression(group_by, doc)
except KeyError:
return (is_default_last, _get_default_bucket())
index = bisect.bisect_right(boundaries, value)
if index and index < len(boundaries):
return (False, boundaries[index - 1])
return (is_default_last, _get_default_bucket())
in_collection = ((_get_bucket_id(doc), doc) for doc in in_collection)
out_collection = sorted(in_collection, key=lambda kv: kv[0])
grouped = itertools.groupby(out_collection, lambda kv: kv[0])
out_collection = []
for (unused_key, doc_id), group in grouped:
group_list = [kv[1] for kv in group]
doc_dict = _accumulate_group(output_fields, group_list)
doc_dict['_id'] = doc_id
out_collection.append(doc_dict)
return out_collection
def _handle_sample_stage(in_collection, unused_database, options):
if not isinstance(options, dict):
raise OperationFailure('the $sample stage specification must be an object')
size = options.pop('size', None)
if size is None:
raise OperationFailure('$sample stage must specify a size')
if options:
raise OperationFailure('unrecognized option to $sample: %s' % set(options).pop())
shuffled = list(in_collection)
_random.shuffle(shuffled)
return shuffled[:size]
def _handle_sort_stage(in_collection, unused_database, options):
sort_array = reversed([{x: y} for x, y in options.items()])
sorted_collection = in_collection
for sort_pair in sort_array:
for sortKey, sortDirection in sort_pair.items():
sorted_collection = sorted(
sorted_collection,
key=lambda x: filtering.resolve_sort_key(sortKey, x),
reverse=sortDirection < 0)
return sorted_collection
def _handle_unwind_stage(in_collection, unused_database, options):
if not isinstance(options, dict):
options = {'path': options}
path = options['path']
if not isinstance(path, six.string_types) or path[0] != '$':
raise ValueError(
'$unwind failed: exception: field path references must be prefixed '
"with a '$' '%s'" % path)
path = path[1:]
should_preserve_null_and_empty = options.get('preserveNullAndEmptyArrays')
include_array_index = options.get('includeArrayIndex')
unwound_collection = []
for doc in in_collection:
try:
array_value = helpers.get_value_by_dot(doc, path)
except KeyError:
if should_preserve_null_and_empty:
unwound_collection.append(doc)
continue
if array_value is None:
if should_preserve_null_and_empty:
unwound_collection.append(doc)
continue
if array_value == []:
if should_preserve_null_and_empty:
new_doc = copy.deepcopy(doc)
# We just ran a get_value_by_dot so we know the value exists.
helpers.delete_value_by_dot(new_doc, path)
unwound_collection.append(new_doc)
continue
if isinstance(array_value, list):
iter_array = enumerate(array_value)
else:
iter_array = [(None, array_value)]
for index, field_item in iter_array:
new_doc = copy.deepcopy(doc)
new_doc = helpers.set_value_by_dot(new_doc, path, field_item)
if include_array_index:
new_doc = helpers.set_value_by_dot(new_doc, include_array_index, index)
unwound_collection.append(new_doc)
return unwound_collection
# TODO(pascal): Combine with the equivalent function in collection but check
# what are the allowed overriding.
def _combine_projection_spec(filter_list, original_filter, prefix=''):
"""Re-format a projection fields spec into a nested dictionary.
e.g: ['a', 'b.c', 'b.d'] => {'a': 1, 'b': {'c': 1, 'd': 1}}
"""
if not isinstance(filter_list, list):
return filter_list
filter_dict = collections.OrderedDict()
for key in filter_list:
field, separator, subkey = key.partition('.')
if not separator:
if isinstance(filter_dict.get(field), list):
other_key = field + '.' + filter_dict[field][0]
raise OperationFailure(
'Invalid $project :: caused by :: specification contains two conflicting paths.'
' Cannot specify both %s and %s: %s' % (
repr(prefix + field), repr(prefix + other_key), original_filter))
filter_dict[field] = 1
continue
if not isinstance(filter_dict.get(field, []), list):
raise OperationFailure(
'Invalid $project :: caused by :: specification contains two conflicting paths.'
' Cannot specify both %s and %s: %s' % (
repr(prefix + field), repr(prefix + key), original_filter))
filter_dict[field] = filter_dict.get(field, []) + [subkey]
return collections.OrderedDict(
(k, _combine_projection_spec(v, original_filter, prefix='%s%s.' % (prefix, k)))
for k, v in six.iteritems(filter_dict)
)
def _project_by_spec(doc, proj_spec, is_include):
output = {}
for key, value in six.iteritems(doc):
if key not in proj_spec:
if not is_include:
output[key] = value
continue
if not isinstance(proj_spec[key], dict):
if is_include:
output[key] = value
continue
if isinstance(value, dict):
output[key] = _project_by_spec(value, proj_spec[key], is_include)
elif isinstance(value, list):
output[key] = [_project_by_spec(array_value, proj_spec[key], is_include)
for array_value in value if isinstance(array_value, dict)]
elif not is_include:
output[key] = value
return output
def _handle_replace_root_stage(in_collection, unused_database, options):
if 'newRoot' not in options:
raise OperationFailure("Parameter 'newRoot' is missing for $replaceRoot operation.")
new_root = options['newRoot']
out_collection = []
for doc in in_collection:
try:
new_doc = _parse_expression(new_root, doc, ignore_missing_keys=True)
except KeyError:
new_doc = NOTHING
if not isinstance(new_doc, dict):
raise OperationFailure(
"'newRoot' expression must evaluate to an object, but resulting value was: {}"
.format(new_doc))
out_collection.append(new_doc)
return out_collection
def _handle_project_stage(in_collection, unused_database, options):
filter_list = []
method = None
include_id = options.get('_id')
# Compute new values for each field, except inclusion/exclusions that are
# handled in one final step.
new_fields_collection = None
for field, value in six.iteritems(options):
if method is None and (field != '_id' or value):
method = 'include' if value else 'exclude'
elif method == 'include' and not value and field != '_id':
raise OperationFailure(
'Bad projection specification, cannot exclude fields '
"other than '_id' in an inclusion projection: %s" % options)
elif method == 'exclude' and value:
raise OperationFailure(
'Bad projection specification, cannot include fields '
'or add computed fields during an exclusion projection: %s' % options)
if value in (0, 1, True, False):
if field != '_id':
filter_list.append(field)
continue
if not new_fields_collection:
new_fields_collection = [{} for unused_doc in in_collection]
for in_doc, out_doc in zip(in_collection, new_fields_collection):
try:
out_doc[field] = _parse_expression(value, in_doc, ignore_missing_keys=True)
except KeyError:
pass
if (method == 'include') == (include_id is not False and include_id is not 0):
filter_list.append('_id')
if not filter_list:
return new_fields_collection
# Final steps: include or exclude fields and merge with newly created fields.
projection_spec = _combine_projection_spec(filter_list, original_filter=options)
out_collection = [
_project_by_spec(doc, projection_spec, is_include=(method == 'include'))
for doc in in_collection
]
if new_fields_collection:
return [
dict(a, **b)
for a, b in zip(out_collection, new_fields_collection)
]
return out_collection
def _handle_add_fields_stage(in_collection, unused_database, options):
if not options:
raise OperationFailure(
'Invalid $addFields :: caused by :: specification must have at least one field')
out_collection = [dict(doc) for doc in in_collection]
for field, value in six.iteritems(options):
for in_doc, out_doc in zip(in_collection, out_collection):
try:
out_value = _parse_expression(value, in_doc, ignore_missing_keys=True)
except KeyError:
continue
parts = field.split('.')
for subfield in parts[:-1]:
out_doc[subfield] = out_doc.get(subfield, {})
if not isinstance(out_doc[subfield], dict):
out_doc[subfield] = {}
out_doc = out_doc[subfield]
out_doc[parts[-1]] = out_value
return out_collection
def _handle_out_stage(in_collection, database, options):
# TODO(MetrodataTeam): should leave the origin collection unchanged
out_collection = database.get_collection(options)
if out_collection.count() > 0:
out_collection.drop()
if in_collection:
out_collection.insert_many(in_collection)
return in_collection
def _handle_count_stage(in_collection, database, options):
if not isinstance(options, str) or options == '':
raise OperationFailure('the count field must be a non-empty string')
elif options.startswith('$'):
raise OperationFailure('the count field cannot be a $-prefixed path')
elif '.' in options:
raise OperationFailure("the count field cannot contain '.'")
return [{options: len(in_collection)}]
def _handle_facet_stage(in_collection, database, options):
out_collection_by_pipeline = {}
for pipeline_title, pipeline in options.items():
out_collection_by_pipeline[pipeline_title] = list(process_pipeline(
in_collection, database, pipeline, None))
return [out_collection_by_pipeline]
_PIPELINE_HANDLERS = {
'$addFields': _handle_add_fields_stage,
'$bucket': _handle_bucket_stage,
'$bucketAuto': None,
'$collStats': None,
'$count': _handle_count_stage,
'$currentOp': None,
'$facet': _handle_facet_stage,
'$geoNear': None,
'$graphLookup': _handle_graph_lookup_stage,
'$group': _handle_group_stage,
'$indexStats': None,
'$limit': lambda c, d, o: c[:o],
'$listLocalSessions': None,
'$listSessions': None,
'$lookup': _handle_lookup_stage,
'$match': lambda c, d, o: [doc for doc in c if filtering.filter_applies(o, doc)],
'$merge': None,
'$out': _handle_out_stage,
'$planCacheStats': None,
'$project': _handle_project_stage,
'$redact': None,
'$replaceRoot': _handle_replace_root_stage,
'$replaceWith': None,
'$sample': _handle_sample_stage,
'$set': _handle_add_fields_stage,
'$skip': lambda c, d, o: c[o:],
'$sort': _handle_sort_stage,
'$sortByCount': None,
'$unset': None,
'$unwind': _handle_unwind_stage,
}
def process_pipeline(collection, database, pipeline, session):
if session:
raise NotImplementedError('Mongomock does not handle sessions yet')
for stage in pipeline:
for operator, options in six.iteritems(stage):
try:
handler = _PIPELINE_HANDLERS[operator]
except KeyError:
raise NotImplementedError(
'%s is not a valid operator for the aggregation pipeline. '
'See http://docs.mongodb.org/manual/meta/aggregation-quick-reference/ '
'for a complete list of valid operators.' % operator)
if not handler:
raise NotImplementedError(
"Although '%s' is a valid operator for the aggregation pipeline, it is "
'currently not implemented in Mongomock.' % operator)
collection = handler(collection, database, options)
return command_cursor.CommandCursor(collection)
|
the-stack_0_19635 | HOBBIES = [
("travel", "🛣 Путешествую"),
("photo", "🌆 Фотографирую"),
("writing", "✏️ Пишу"),
("walking", "🚶♂️ Гуляю"),
("cycle", "🚴♀️ Велосипед"),
("pet-projects", "🏗 Пет-проджекты"),
("drinks", "🍸 Бухаю"),
("making", "👷♀️ Строю"),
("running", "🏃♂️ Бегаю"),
("books", "📚 Книги"),
("time-management", "⏰ Тайм-менеджмент"),
("collecting", "📦 Коллекционирование"),
("music", "🎙 Музыка"),
("video", "🎥 Видео"),
("cars", "🚘 Автомобили"),
("420", "🍁 420"),
("yoga", "🧘♀️ Йога"),
("gadgets", "⌚️ Гаджеты"),
("languages", "🈵 Языки"),
("anime", "😻 Аниме"),
("politics", "👩💼 Политика"),
("history", "🏛 История"),
("dancing", "💃 Танцы"),
("bikes", "🏍 Мотоцикл"),
("board-games", "🎲 Настолки"),
("modelling", "⛵️ Моделирование"),
("boards", "🏂 Доски"),
("gym", "🏋️️ Качалочка"),
("sport", "🎾 Другой спорт"),
("fishing", "🐠 Охота и рыбалка"),
("planes", "✈️ Самолеты"),
("yachts", "🚤 Яхты"),
("space", "🛰️ Космос"),
("bbq", "🍖️ BBQ"),
("stonks", "📉 stonks"),
("gardening", "☘️ Садоводство"),
("fashion", "👗️ Мода"),
("art", "🎨 Арт"),
("bdsm", "😶 BDSM"),
("audiophile", "🎧 Аудиофил"),
("cooking", "🍲 Готовлю"),
("games", "🎮 Геймер"),
]
PERSONAL = [
("optimism", "👍 Оптимист"),
("pessimism", "👎 Пессимист"),
("bureaucrat", "👨💼 Бюрократ"),
("experiments", "🧪 Экспериментатор"),
("work-hard", "👩💻 Трудоголик"),
("single", "❣️ Одинок"),
("family", "👨👩👦 Семьянин"),
("no_kids", "😎 Без детей"),
("nomad", "🏝 Номад"),
("extrovert", "👯♂️ Люблю людей"),
("introvert", "🧘♂️ Люблю уединение"),
("feminism", "👩🏫 Феминист*ка"),
("control", "🎛 Контрол-фрик"),
("mentor", "👨🏫 Ментор"),
("stoicism", "💪 Стоицизм"),
("business", "🚀 Свой бизнес"),
("abroad", "🚜 Пора валить"),
("smoker", "🚬 Курю"),
("beer", "🍻 Пиво"),
("wine", "🍷 Винишко"),
("non-drinker", "🚱 Не пью"),
("coffee", "☕️ Кофе"),
("tea", "🍵 Чай"),
("meditation", "👁 Медитирую"),
("tattoo", "👩🎤 Есть тату"),
("burnout", "🥵 Выгорал"),
("therapy", "👌 Хожу к терапевту"),
("vegan", "🍏 Веган"),
("healthy-food", "🥑 Здоровое питание"),
("cynic", "😕 Циник"),
("cats", "😽 Котики"),
("dogs", "🐶 Пёсики"),
("cities", "🌃 Большие города"),
("villages", "🏘 Уютные деревушки"),
("early-bird", "🌅 Жаворонок"),
("night-life", "🌜 Сова"),
("linux", "🐧 Linux"),
("mac", "🍎 Mac"),
("windows", "💻 Windows"),
("ios", "⌚️ iOS"),
("android", "📱 Android"),
("ps", "🎮 PlayStation"),
("xbox", "🕹 Xbox"),
("pc", "🖥 Пэка"),
("conservative", "📜 Консерватизм"),
("centrism", "🎯 Центризм"),
("libertarianism", "🗽 Либертарианство"),
("parties", "🎉 Тусовщик"),
]
TECH = [
]
CLUB = [
("can_meet", "🎉 Открыт к общению"),
("search_events", "🤪️ За движ"),
("can_beer", "🍻 Можем выпить по пиву"),
("can_office", "🏢 Могу показать офис"),
("can_coffee", "☕️ Готов выпить кофе"),
("can_city", "🗼 Могу показать город"),
("can_refer", "💼 Могу зареферить в компанию"),
("can_sleep", "🛌 Могу вписать на ночь"),
("can_travel", "🏔 Можем вместе попутешествовать"),
("can_advice", "👍 Готов помочь советом"),
("search_friend", "🤟 Хочу дружить"),
("can_project", "👷♂️ Могу поучаствовать в проекте"),
("search_employees", "🔍 Ищу сотрудников"),
("can_teach", "🎸 Могу проконсультировать за деньги"),
("search_idea", "️🙇♂️ Ищу идеи"),
("can_idea", "💡 У меня куча идей"),
("can_invest", "💸 Могу проинвестировать"),
("search_mentor", "🤓 Ищу ментора"),
("can_mentor", "🧐 Могу поменторить"),
("can_hobby", "🏓 Ищу приятеля по хобби"),
("search_job", "🔎 Ищу работу"),
("search_remote", "🛋 Хочу удалёнку"),
("search_relocate", "🚜 Хочу релокацию"),
]
|
the-stack_0_19636 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow import models
from airflow.models.pool import Pool
from airflow.api.common.experimental import pool as pool_api
from airflow.exceptions import AirflowBadRequest, PoolNotFound
from airflow.utils.db import create_session
from tests.test_utils.db import clear_db_pools
class TestPool(unittest.TestCase):
USER_POOL_COUNT = 2
TOTAL_POOL_COUNT = USER_POOL_COUNT + 1 # including default_pool
def setUp(self):
clear_db_pools()
self.pools = [Pool.get_default_pool()]
for i in range(self.USER_POOL_COUNT):
name = 'experimental_%s' % (i + 1)
pool = models.Pool(
pool=name,
slots=i,
description=name,
)
self.pools.append(pool)
with create_session() as session:
session.add_all(self.pools)
def test_get_pool(self):
pool = pool_api.get_pool(name=self.pools[0].pool)
self.assertEqual(pool.pool, self.pools[0].pool)
def test_get_pool_non_existing(self):
self.assertRaisesRegex(PoolNotFound,
"^Pool 'test' doesn't exist$",
pool_api.get_pool,
name='test')
def test_get_pool_bad_name(self):
for name in ('', ' '):
self.assertRaisesRegex(AirflowBadRequest,
"^Pool name shouldn't be empty$",
pool_api.get_pool,
name=name)
def test_get_pools(self):
pools = sorted(pool_api.get_pools(),
key=lambda p: p.pool)
self.assertEqual(pools[0].pool, self.pools[0].pool)
self.assertEqual(pools[1].pool, self.pools[1].pool)
def test_create_pool(self):
pool = pool_api.create_pool(name='foo',
slots=5,
description='')
self.assertEqual(pool.pool, 'foo')
self.assertEqual(pool.slots, 5)
self.assertEqual(pool.description, '')
with create_session() as session:
self.assertEqual(session.query(models.Pool).count(), self.TOTAL_POOL_COUNT + 1)
def test_create_pool_existing(self):
pool = pool_api.create_pool(name=self.pools[0].pool,
slots=5,
description='')
self.assertEqual(pool.pool, self.pools[0].pool)
self.assertEqual(pool.slots, 5)
self.assertEqual(pool.description, '')
with create_session() as session:
self.assertEqual(session.query(models.Pool).count(), self.TOTAL_POOL_COUNT)
def test_create_pool_bad_name(self):
for name in ('', ' '):
self.assertRaisesRegex(AirflowBadRequest,
"^Pool name shouldn't be empty$",
pool_api.create_pool,
name=name,
slots=5,
description='')
def test_create_pool_bad_slots(self):
self.assertRaisesRegex(AirflowBadRequest,
"^Bad value for `slots`: foo$",
pool_api.create_pool,
name='foo',
slots='foo',
description='')
def test_delete_pool(self):
pool = pool_api.delete_pool(name=self.pools[-1].pool)
self.assertEqual(pool.pool, self.pools[-1].pool)
with create_session() as session:
self.assertEqual(session.query(models.Pool).count(), self.TOTAL_POOL_COUNT - 1)
def test_delete_pool_non_existing(self):
self.assertRaisesRegex(pool_api.PoolNotFound,
"^Pool 'test' doesn't exist$",
pool_api.delete_pool,
name='test')
def test_delete_pool_bad_name(self):
for name in ('', ' '):
self.assertRaisesRegex(AirflowBadRequest,
"^Pool name shouldn't be empty$",
pool_api.delete_pool,
name=name)
def test_delete_default_pool_not_allowed(self):
with self.assertRaisesRegex(AirflowBadRequest,
"^default_pool cannot be deleted$"):
pool_api.delete_pool(Pool.DEFAULT_POOL_NAME)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_19637 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import io
import logging
import contextlib
import os
import datetime
import json
import numpy as np
from PIL import Image
from fvcore.common.timer import Timer
from detectron2.structures import BoxMode, PolygonMasks, Boxes
from fvcore.common.file_io import PathManager
from .. import MetadataCatalog, DatasetCatalog
"""
This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format".
"""
logger = logging.getLogger(__name__)
__all__ = ["load_coco_json", "load_sem_seg"]
def load_coco_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None):
"""
Load a json file with COCO's instances annotation format.
Currently supports instance detection, instance segmentation,
and person keypoints annotations.
Args:
json_file (str): full path to the json file in COCO instances annotation format.
image_root (str): the directory where the images in this json file exists.
dataset_name (str): the name of the dataset (e.g., coco_2017_train).
If provided, this function will also put "thing_classes" into
the metadata associated with this dataset.
extra_annotation_keys (list[str]): list of per-annotation keys that should also be
loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints",
"category_id", "segmentation"). The values for these keys will be returned as-is.
For example, the densepose annotations are loaded in this way.
Returns:
list[dict]: a list of dicts in Detectron2 standard format. (See
`Using Custom Datasets </tutorials/datasets.html>`_ )
Notes:
1. This function does not read the image files.
The results do not have the "image" field.
"""
from pycocotools.coco import COCO
timer = Timer()
json_file = PathManager.get_local_path(json_file)
with contextlib.redirect_stdout(io.StringIO()):
coco_api = COCO(json_file)
if timer.seconds() > 1:
logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
id_map = None
if dataset_name is not None:
meta = MetadataCatalog.get(dataset_name)
cat_ids = sorted(coco_api.getCatIds())
cats = coco_api.loadCats(cat_ids)
# The categories in a custom json file may not be sorted.
thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])]
meta.thing_classes = thing_classes
# In COCO, certain category ids are artificially removed,
# and by convention they are always ignored.
# We deal with COCO's id issue and translate
# the category ids to contiguous ids in [0, 80).
# It works by looking at the "categories" field in the json, therefore
# if users' own json also have incontiguous ids, we'll
# apply this mapping as well but print a warning.
if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)):
if "coco" not in dataset_name:
logger.warning(
"""
Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you.
"""
)
id_map = {v: i for i, v in enumerate(cat_ids)}
meta.thing_dataset_id_to_contiguous_id = id_map
# sort indices for reproducible results
img_ids = sorted(list(coco_api.imgs.keys()))
# imgs is a list of dicts, each looks something like:
# {'license': 4,
# 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
# 'file_name': 'COCO_val2014_000000001268.jpg',
# 'height': 427,
# 'width': 640,
# 'date_captured': '2013-11-17 05:57:24',
# 'id': 1268}
imgs = coco_api.loadImgs(img_ids)
# anns is a list[list[dict]], where each dict is an annotation
# record for an object. The inner list enumerates the objects in an image
# and the outer list enumerates over images. Example of anns[0]:
# [{'segmentation': [[192.81,
# 247.09,
# ...
# 219.03,
# 249.06]],
# 'area': 1035.749,
# 'iscrowd': 0,
# 'image_id': 1268,
# 'bbox': [192.81, 224.8, 74.73, 33.43],
# 'category_id': 16,
# 'id': 42986},
# ...]
anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
if "minival" not in json_file:
# The popular valminusminival & minival annotations for COCO2014 contain this bug.
# However the ratio of buggy annotations there is tiny and does not affect accuracy.
# Therefore we explicitly white-list them.
ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format(
json_file
)
imgs_anns = list(zip(imgs, anns))
logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file))
dataset_dicts = []
ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (extra_annotation_keys or [])
num_instances_without_valid_segmentation = 0
for (img_dict, anno_dict_list) in imgs_anns:
record = {}
record["file_name"] = os.path.join(image_root, img_dict["file_name"])
record["height"] = img_dict["height"]
record["width"] = img_dict["width"]
image_id = record["image_id"] = img_dict["id"]
objs = []
for anno in anno_dict_list:
# Check that the image_id in this annotation is the same as
# the image_id we're looking at.
# This fails only when the data parsing logic or the annotation file is buggy.
# The original COCO valminusminival2014 & minival2014 annotation files
# actually contains bugs that, together with certain ways of using COCO API,
# can trigger this assertion.
assert anno["image_id"] == image_id
assert anno.get("ignore", 0) == 0
obj = {key: anno[key] for key in ann_keys if key in anno}
segm = anno.get("segmentation", None)
if segm: # either list[list[float]] or dict(RLE)
if not isinstance(segm, dict):
# filter out invalid polygons (< 3 points)
segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
if len(segm) == 0:
num_instances_without_valid_segmentation += 1
continue # ignore this instance
obj["segmentation"] = segm
keypts = anno.get("keypoints", None)
if keypts: # list[int]
for idx, v in enumerate(keypts):
if idx % 3 != 2:
# COCO's segmentation coordinates are floating points in [0, H or W],
# but keypoint coordinates are integers in [0, H-1 or W-1]
# Therefore we assume the coordinates are "pixel indices" and
# add 0.5 to convert to floating point coordinates.
keypts[idx] = v + 0.5
obj["keypoints"] = keypts
obj["bbox_mode"] = BoxMode.XYWH_ABS
if id_map:
obj["category_id"] = id_map[obj["category_id"]]
objs.append(obj)
record["annotations"] = objs
dataset_dicts.append(record)
if num_instances_without_valid_segmentation > 0:
logger.warn(
"Filtered out {} instances without valid segmentation. "
"There might be issues in your dataset generation process.".format(
num_instances_without_valid_segmentation
)
)
return dataset_dicts
def load_sem_seg(gt_root, image_root, gt_ext="png", image_ext="jpg"):
"""
Load semantic segmentation datasets. All files under "gt_root" with "gt_ext" extension are
treated as ground truth annotations and all files under "image_root" with "image_ext" extension
as input images. Ground truth and input images are matched using file paths relative to
"gt_root" and "image_root" respectively without taking into account file extensions.
This works for COCO as well as some other datasets.
Args:
gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation
annotations are stored as images with integer values in pixels that represent
corresponding semantic labels.
image_root (str): the directory where the input images are.
gt_ext (str): file extension for ground truth annotations.
image_ext (str): file extension for input images.
Returns:
list[dict]:
a list of dicts in detectron2 standard format without instance-level
annotation.
Notes:
1. This function does not read the image and ground truth files.
The results do not have the "image" and "sem_seg" fields.
"""
# We match input images with ground truth based on their relative filepaths (without file
# extensions) starting from 'image_root' and 'gt_root' respectively.
def file2id(folder_path, file_path):
# extract relative path starting from `folder_path`
image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path))
# remove file extension
image_id = os.path.splitext(image_id)[0]
return image_id
input_files = sorted(
(os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)),
key=lambda file_path: file2id(image_root, file_path),
)
gt_files = sorted(
(os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)),
key=lambda file_path: file2id(gt_root, file_path),
)
assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root)
# Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images
if len(input_files) != len(gt_files):
logger.warn(
"Directory {} and {} has {} and {} files, respectively.".format(
image_root, gt_root, len(input_files), len(gt_files)
)
)
input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files]
gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files]
intersect = list(set(input_basenames) & set(gt_basenames))
# sort, otherwise each worker may obtain a list[dict] in different order
intersect = sorted(intersect)
logger.warn("Will use their intersection of {} files.".format(len(intersect)))
input_files = [os.path.join(image_root, f + image_ext) for f in intersect]
gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect]
logger.info(
"Loaded {} images with semantic segmentation from {}".format(len(input_files), image_root)
)
dataset_dicts = []
for (img_path, gt_path) in zip(input_files, gt_files):
record = {}
record["file_name"] = img_path
record["sem_seg_file_name"] = gt_path
with PathManager.open(gt_path, "rb") as f:
img = Image.open(f)
w, h = img.size
record["height"] = h
record["width"] = w
dataset_dicts.append(record)
return dataset_dicts
def convert_to_coco_dict(dataset_name):
"""
Convert a dataset in detectron2's standard format into COCO json format
Generic dataset description can be found here:
https://detectron2.readthedocs.io/tutorials/datasets.html#register-a-dataset
COCO data format description can be found here:
http://cocodataset.org/#format-data
Args:
dataset_name:
name of the source dataset
must be registered in DatastCatalog and in detectron2's standard format
Returns:
coco_dict: serializable dict in COCO json format
"""
dataset_dicts = DatasetCatalog.get(dataset_name)
categories = [
{"id": id, "name": name}
for id, name in enumerate(MetadataCatalog.get(dataset_name).thing_classes)
]
logger.info("Converting dataset dicts into COCO format")
coco_images = []
coco_annotations = []
for image_id, image_dict in enumerate(dataset_dicts):
coco_image = {
"id": image_dict.get("image_id", image_id),
"width": image_dict["width"],
"height": image_dict["height"],
"file_name": image_dict["file_name"],
}
coco_images.append(coco_image)
anns_per_image = image_dict["annotations"]
for annotation in anns_per_image:
# create a new dict with only COCO fields
coco_annotation = {}
# COCO requirement: XYWH box format
bbox = annotation["bbox"]
bbox_mode = annotation["bbox_mode"]
bbox = BoxMode.convert(bbox, bbox_mode, BoxMode.XYWH_ABS)
# COCO requirement: instance area
if "segmentation" in annotation:
# Computing areas for instances by counting the pixels
segmentation = annotation["segmentation"]
# TODO: check segmentation type: RLE, BinaryMask or Polygon
polygons = PolygonMasks([segmentation])
area = polygons.area()[0].item()
else:
# Computing areas using bounding boxes
bbox_xy = BoxMode.convert(bbox, BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
area = Boxes([bbox_xy]).area()[0].item()
if "keypoints" in annotation:
keypoints = annotation["keypoints"] # list[int]
for idx, v in enumerate(keypoints):
if idx % 3 != 2:
# COCO's segmentation coordinates are floating points in [0, H or W],
# but keypoint coordinates are integers in [0, H-1 or W-1]
# For COCO format consistency we substract 0.5
# https://github.com/facebookresearch/detectron2/pull/175#issuecomment-551202163
keypoints[idx] = v - 0.5
if "num_keypoints" in annotation:
num_keypoints = annotation["num_keypoints"]
else:
num_keypoints = sum(kp > 0 for kp in keypoints[2::3])
# COCO requirement:
# linking annotations to images
# "id" field must start with 1
coco_annotation["id"] = len(coco_annotations) + 1
coco_annotation["image_id"] = coco_image["id"]
coco_annotation["bbox"] = [round(float(x), 3) for x in bbox]
coco_annotation["area"] = area
coco_annotation["category_id"] = annotation["category_id"]
coco_annotation["iscrowd"] = annotation.get("iscrowd", 0)
# Add optional fields
if "keypoints" in annotation:
coco_annotation["keypoints"] = keypoints
coco_annotation["num_keypoints"] = num_keypoints
if "segmentation" in annotation:
coco_annotation["segmentation"] = annotation["segmentation"]
coco_annotations.append(coco_annotation)
logger.info(
"Conversion finished, "
f"num images: {len(coco_images)}, num annotations: {len(coco_annotations)}"
)
info = {
"date_created": str(datetime.datetime.now()),
"description": "Automatically generated COCO json file for Detectron2.",
}
coco_dict = {
"info": info,
"images": coco_images,
"annotations": coco_annotations,
"categories": categories,
"licenses": None,
}
return coco_dict
def convert_to_coco_json(dataset_name, output_folder="", allow_cached=True):
"""
Converts dataset into COCO format and saves it to a json file.
dataset_name must be registered in DatastCatalog and in detectron2's standard format.
Args:
dataset_name:
reference from the config file to the catalogs
must be registered in DatastCatalog and in detectron2's standard format
output_folder: where json file will be saved and loaded from
allow_cached: if json file is already present then skip conversion
Returns:
cache_path: path to the COCO-format json file
"""
# TODO: The dataset or the conversion script *may* change,
# a checksum would be useful for validating the cached data
cache_path = os.path.join(output_folder, f"{dataset_name}_coco_format.json")
PathManager.mkdirs(output_folder)
if os.path.exists(cache_path) and allow_cached:
logger.info(f"Reading cached annotations in COCO format from:{cache_path} ...")
else:
logger.info(f"Converting dataset annotations in '{dataset_name}' to COCO format ...)")
coco_dict = convert_to_coco_dict(dataset_name)
with PathManager.open(cache_path, "w") as json_file:
logger.info(f"Caching annotations in COCO format: {cache_path}")
json.dump(coco_dict, json_file)
return cache_path
if __name__ == "__main__":
"""
Test the COCO json dataset loader.
Usage:
python -m detectron2.data.datasets.coco \
path/to/json path/to/image_root dataset_name
"dataset_name" can be "coco_2014_minival_100", or other
pre-registered ones
"""
from detectron2.utils.logger import setup_logger
from detectron2.utils.visualizer import Visualizer
import detectron2.data.datasets # noqa # add pre-defined metadata
import sys
logger = setup_logger(name=__name__)
assert sys.argv[3] in DatasetCatalog.list()
meta = MetadataCatalog.get(sys.argv[3])
dicts = load_coco_json(sys.argv[1], sys.argv[2], sys.argv[3])
logger.info("Done loading {} samples.".format(len(dicts)))
dirname = "coco-data-vis"
os.makedirs(dirname, exist_ok=True)
for d in dicts:
img = np.array(Image.open(d["file_name"]))
visualizer = Visualizer(img, metadata=meta)
vis = visualizer.draw_dataset_dict(d)
fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
vis.save(fpath)
|
the-stack_0_19638 | # coding: utf-8
from __future__ import absolute_import
import time
from cardpay import *
from config import *
from recurrings import do_cancel_subscription, create_recurring_request, fetch_recurring, create_recurring_plan_request
from recurrings.scheduled import do_create_plan
from utils.http_utils import do_get
logger = create_logger(__name__)
config = Configuration(base_url=CARDPAY_API_URL, terminal_code=GATEWAY_POSTPONED_TERMINAL_CODE, password=GATEWAY_POSTPONED_PASSWORD, debug=DEBUG_MODE)
recurrings = RecurringsApi(ApiClient(config))
subscription_id = None
def teardown_function():
global subscription_id
do_cancel_subscription(recurrings, subscription_id) if subscription_id is not None else None
def test_hold_subscription():
global subscription_id
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Phase 1: prepare a new plan
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
plan_id = do_create_plan(recurrings)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Phase 2: create scheduled subscription
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
recurring_request = create_recurring_request(plan_id=plan_id)
# perform create scheduled subscription
creation_response = recurrings.create_recurring(recurring_request)
logger.info(creation_response)
# get redirect url
redirect_url = creation_response.redirect_url
assert redirect_url is not None
# Emulate customer behaviour performing GET request to redirect url
do_get(redirect_url)
recurring_response = fetch_recurring(recurrings, recurring_request.merchant_order.id)
assert recurring_response is not None
subscription_id = recurring_response.recurring_data.subscription.id
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Phase 3: Change status of Scheduled subscription to INACTIVE
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
time.sleep(2)
# prepare subscription update request data
request = SubscriptionUpdateRequest(
request=ApiClient.uuid_request(),
operation=SubscriptionUpdateRequest.Operation.CHANGE_STATUS,
subscription_data=SubscriptionUpdateRequestSubscriptionData(status_to=SubscriptionUpdateRequestSubscriptionData.StatusTo.INACTIVE)
)
response = recurrings.update_subscription(subscription_id, request)
assert response is not None
# explore response result
data = response.subscription_data
assert data.is_executed
assert data.StatusTo.INACTIVE == data.status_to
|
the-stack_0_19639 | from typing import Dict, Any
import os.path as osp
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as tvm
# Texar Library
from texar.torch import ModuleBase
class SimpleFusionEncoder(ModuleBase):
r"""Visual feature extractor. Implementation is adapted
from
https://gitlab.int.petuum.com/shuxin.yao/image_report_generation/blob/master/implementation/Encoders/Encoders.py
Base encoder is set to be DenseNet121. Pretrained weights
are reused from https://github.com/berneylin/chexnet
NOTE: The output features are not a vector. Instead, we
treat the output from the feature layer of the densenet
as the features, and reshape it [batch size, outfeatures, -1]
"""
def __init__(self):
super().__init__()
self.cnn = tvm.densenet121(pretrained=True)
self._load_from_ckpt()
self.out_features = self.cnn.classifier.in_features
def _load_from_ckpt(self):
ckpt = './model.pth.tar'
if osp.exists(ckpt):
pretrained_weight = torch.load(ckpt)['state_dict']
new_state_dict = {}
prefix = 'module.dense_net_121.'
for k, v in pretrained_weight.items():
if 'classifier' not in k:
new_k = k[len(prefix):]
new_state_dict[new_k] = v
msg = self.cnn.load_state_dict(new_state_dict, strict=False)
assert set(msg.missing_keys) == {
"classifier.weight",
"classifier.bias"
}, set(msg.missing_keys)
else:
Warning("No pretrained model is loaded!")
def forward(self, images):
r"""
Extract visual features from the input images
Args:
images (torch.Tensor): dimension
[batch size, channels, height, width]
Returns:
res (torch.Tensor): dimension
[batch size, out_features, 49 = 7 * 7]
"""
batch_size = images.shape[0]
res = self.cnn.features(images)
res = res.view(batch_size, self.out_features, -1)
return res
class MLC(ModuleBase):
r"""Multilabel classifier
Args:
hparams (dict or HParams, optional): MLC hyperparameters.
Missing hyperparameters will be set to default values.
See :meth:`default_hparams` for the hyperparameter structure
and default values.
* fc_in_features (int): Dimension of input visual features
* num_tags (int): Number of tags in total
"""
def __init__(self, hparams=None):
super().__init__(hparams=hparams)
self.classifier = nn.Linear(
in_features=self.hparams.fc_in_features,
out_features=self.hparams.num_tags)
# As per the wingspan project
nn.init.kaiming_normal_(
self.classifier.weight, mode='fan_in')
self.classifier.bias.data.fill_(0)
def forward(self, visual_feature):
r"""Generate logits (scores) for all tags given
the input visual_feature
Args:
visual_feature (torch.Tensor): dimension
[batch size, num_visual_features, visual_dim]
Returns:
tag_scores (torch.Tensor): scores for all tags.
Dimension [batch size, num_tags]
"""
flat_feature = F.avg_pool1d(
visual_feature,
visual_feature.size(-1)
).squeeze(-1)
tag_scores = self.classifier(flat_feature)
return tag_scores
def get_tag_probs(self, visual_feature):
r"""Generate probability distributions for all tags given
the input visual_feature
Args:
visual_feature (torch.Tensor): dimension
[batch size, num_visual_features, visual_dim]
Returns:
tag_probs (torch.Tensor): probability distributions
for all tags. Dimension [batch size, num_tags]
"""
tag_scores = self.forward(visual_feature)
tag_probs = torch.sigmoid(tag_scores)
return tag_probs
@staticmethod
def default_hparams() -> Dict[str, Any]:
r"""Returns a dictionary of hyperparameters with default values.
Returns: (dict) default hyperparameters
"""
return {
'num_tags': 210,
'fc_in_features': 1024,
}
class MLCTrainer(ModuleBase):
r""" Trainer for the Multilabel classifier
Args:
hparams (dict or HParams, optional): MLCTrainer hyperparameters.
Missing hyperparameters will be set to default values.
See :meth:`default_hparams` for the hyperparameter structure
and default values.
* num_tags (int): Number of tags in total
* threshold (float): Threshold to determine if a tag is active
or not
* train_encoder (bool): indicate whether keep training
the encoder or not
"""
def __init__(self, hparams=None):
super().__init__(hparams=hparams)
self.extractor = SimpleFusionEncoder()
hparams_mlc = {
'num_tags': self.hparams.num_tags,
'fc_in_features': self.extractor.out_features,
}
self.mlc = MLC(hparams_mlc)
self.threshold = self.hparams.threshold
self.train_encoder = self.hparams.train_encoder
self.loss = nn.BCEWithLogitsLoss()
def forward(self, batch):
r"""Generate logits (scores) for all tags given
the input visual_feature
Args:
batch (tx.torch.data.Batch[str, Union[torch.Tensor, int]]):
* batch_size: batch size
* label: Dimension [batch size, num_tags]
* img_tensor: Dimension [batch size, channels, height, width]
* token_tensor: Dimension
[batch size, max_sentence_num + 1, max_word_num]
* stop_prob: Dimension [batch size, max_sentence_num + 1]
Returns:
loss (torch.float): classification loss
preds (torch.Tensor): indicators of whether a tag
is active. Dimension [batch size, num_tags]
probs (torch.Tensor): probability distributions
for all tags. Dimension [batch size, num_tags]
"""
if self.train_encoder:
visual_feature = self.extractor(batch.img_tensor)
else:
with torch.no_grad():
visual_feature = self.extractor(batch.img_tensor)
tag_scores = self.mlc(visual_feature)
loss = self.loss(tag_scores, batch.label)
probs = torch.sigmoid(tag_scores)
preds = (probs > self.threshold).to(torch.float)
return {"loss": loss, "preds": preds, "probs": probs}
@staticmethod
def default_hparams() -> Dict[str, Any]:
r"""Returns a dictionary of hyperparameters with default values.
Returns: (dict) default hyperparameters
"""
return {
'num_tags': 210,
'threshold': 0.5,
'train_encoder': False
}
if __name__ == "__main__":
m = MLCTrainer()
|
the-stack_0_19642 | import numpy as np
from numpy import array
import math
from collections import namedtuple
from tsr import linear
from fox_toolbox.utils import volatility
"LINEAR TSR"
def minmax_strikes(vol, expiry, fwd, nb):
"""
NORMAL VOL
vol rates.Volatility
expiry
fwd
nb
'caplet strikes': (fwd, Kmax),
'floorlet strikes': (Kmin, fwd)
"""
n_inv = 5.730390
vol_atm = vol.value
std = n_inv*vol_atm*np.sqrt(expiry)
if vol.type == 'N':
Smin = fwd - std
Smax = fwd + std
kstep = (Smax - Smin)/nb
kmin = Smin + kstep
kmax = Smax - kstep
else:
NotImplemented('vol type other that N is not yer implemented')
minmax_strikes = namedtuple('minmax_strikes_nvol', 'kmin kmax kstep fwd volType')
return minmax_strikes(kmin, kmax, kstep, fwd, vol.type)
def build_strike_ladders(minmax_strikes, neff_capl, neff_floo):
if minmax_strikes.volType == 'N':
caplet_strikes = np.linspace(minmax_strikes.fwd, minmax_strikes.kmax, neff_capl)
floorlet_strikes = np.linspace(minmax_strikes.kmin, minmax_strikes.fwd, neff_floo)
else:
NotImplemented('vol type other that N is not yer implemented')
strike_ladders = namedtuple('strike_ladders', 'floorlet_ladder caplet_ladder')
return strike_ladders(floorlet_strikes, caplet_strikes)
def build_weights(minmax_strikes, neff_capl, neff_floo, tsr_coeff):
w0c = tsr_coeff.a * (minmax_strikes.fwd + minmax_strikes.kstep) + tsr_coeff.b
wic = [2 * tsr_coeff.a * minmax_strikes.kstep for _ in range(neff_capl - 1)]
wif = [-2 * tsr_coeff.a * minmax_strikes.kstep for _ in range(neff_floo - 1)]
w0f = tsr_coeff.a * (minmax_strikes.fwd - minmax_strikes.kstep) + tsr_coeff.b
tsr_weights = namedtuple('tsr_weights', 'capletWeights floorletWeights')
return tsr_weights(array([w0c] + wic), array(wif + [w0f]))
def get_DiscOverAnnuity(strikes, tsr_coeff):
return tsr_coeff.a * strikes + tsr_coeff.b
def get_neff(n):
return n
def tsr_model(swo, dsc_curve, estim_curve, n, mr, payment_date):
"""
swo - rates.Swaption with single or smile volatility
Model settings:
n - number of replication strikes
mr - mean reversion
"""
neff = get_neff(n)
fwd = swo.get_swap_rate(dsc_curve, estim_curve)
tsr_strikes = minmax_strikes(swo.vol, swo.expiry, fwd, neff)
neff_capl = math.ceil((tsr_strikes.kmax - tsr_strikes.fwd)/tsr_strikes.kstep) + 1
neff_floo = math.floor((tsr_strikes.fwd - tsr_strikes.kmin)/tsr_strikes.kstep) + 1
strikes_ladders = build_strike_ladders(tsr_strikes, neff_capl, neff_floo)
tsr_coeff = linear.get_coeff(payment_date, dsc_curve, swo, mr, estim_curve)
tsr_weights = build_weights(tsr_strikes, neff_capl, neff_floo, tsr_coeff)
myBachelierCaplet = array([volatility.BachelierPrice(F=swo.get_swap_rate(dsc_curve, estim_curve), K=strike, v=swo.vol.value*np.sqrt(swo.expiry))
* swo.get_annuity(dsc_curve) / dsc_curve.get_dsc(swo.start_date) for strike in strikes_ladders.caplet_ladder])
myBachelierFloorlet = array([volatility.BachelierPrice(F=swo.get_swap_rate(dsc_curve, estim_curve), K=strike, v=swo.vol.value*np.sqrt(swo.expiry), w=-1)
* swo.get_annuity(dsc_curve) / dsc_curve.get_dsc(swo.start_date) for strike in strikes_ladders.floorlet_ladder])
cms_caplet = tsr_weights.capletWeights.dot(myBachelierCaplet)
cms_floorlet = tsr_weights.floorletWeights.dot(myBachelierFloorlet)
cms_swaplet = cms_caplet - cms_floorlet + swo.strike * dsc_curve.get_fwd_dsc(swo.expiry, payment_date)
return cms_swaplet
|
the-stack_0_19645 | """Prepare Handover."""
from typing import Dict, List
from .. import __handover_drs__, __handover_datasets__, __handover_base__
def add_handover(response: Dict) -> Dict:
"""Add handover to a dataset response."""
response["datasetHandover"] = make_handover(
__handover_datasets__,
[response["datasetId"]],
response["referenceName"],
response["start"],
response["end"],
response["referenceBases"],
response["alternateBases"],
response["variantType"],
)
return response
def make_handover(
paths: List[List[str]], datasetIds: List[str], chr: str = "", start: int = 0, end: int = 0, ref: str = "", alt: str = "", variant: str = ""
) -> List[Dict]:
"""Create one handover for each path (specified in config)."""
alt = alt if alt else variant
handovers = []
start = start + __handover_base__
end = end + __handover_base__
for label, desc, path in paths:
for dataset in set(datasetIds):
handovers.append(
{
"handoverType": {"id": "CUSTOM", "label": label},
"description": desc,
"url": __handover_drs__ + "/" + path.format(dataset=dataset, chr=chr, start=start, end=end, ref=ref, alt=alt),
}
)
return handovers
|
the-stack_0_19646 | import asyncio
from functools import partial
import pytest
from async_lru import alru_cache
alru_cache_attrs = [
'hits',
'misses',
'tasks',
'closed',
'cache_info',
'cache_clear',
'invalidate',
'close',
'open',
]
alru_cache_calable_attrs = alru_cache_attrs.copy()
for attr in ['hits', 'misses', 'tasks', 'closed']:
alru_cache_calable_attrs.remove(attr)
def test_alru_cache_not_callable(loop):
with pytest.raises(NotImplementedError):
alru_cache('foo')
def test_alru_cache_not_coroutine(loop):
with pytest.raises(RuntimeError):
@alru_cache
def not_coro(val):
return val
def test_alru_cache_deco(loop, check_lru):
asyncio.set_event_loop(loop)
@alru_cache
async def coro():
pass
assert asyncio.iscoroutinefunction(coro)
for attr in alru_cache_attrs:
assert hasattr(coro, attr)
for attr in alru_cache_calable_attrs:
assert callable(getattr(coro, attr))
assert isinstance(coro._cache, dict)
assert isinstance(coro.tasks, set)
check_lru(coro, hits=0, misses=0, cache=0, tasks=0)
assert asyncio.iscoroutine(coro())
def test_alru_cache_deco_called(check_lru, loop):
asyncio.set_event_loop(loop)
@alru_cache()
async def coro():
pass
assert asyncio.iscoroutinefunction(coro)
for attr in alru_cache_attrs:
assert hasattr(coro, attr)
for attr in alru_cache_calable_attrs:
assert callable(getattr(coro, attr))
assert isinstance(coro._cache, dict)
assert isinstance(coro.tasks, set)
check_lru(coro, hits=0, misses=0, cache=0, tasks=0)
assert asyncio.iscoroutine(coro())
def test_alru_cache_fn_called(check_lru, loop):
asyncio.set_event_loop(loop)
async def coro():
pass
coro_wrapped = alru_cache(coro)
assert asyncio.iscoroutinefunction(coro_wrapped)
for attr in alru_cache_attrs:
assert hasattr(coro_wrapped, attr)
for attr in alru_cache_calable_attrs:
assert callable(getattr(coro_wrapped, attr))
assert isinstance(coro_wrapped._cache, dict)
assert isinstance(coro_wrapped.tasks, set)
check_lru(coro_wrapped, hits=0, misses=0, cache=0, tasks=0)
assert asyncio.iscoroutine(coro_wrapped())
def test_alru_cache_origin(loop):
asyncio.set_event_loop(loop)
async def coro():
pass
coro_wrapped = alru_cache(coro)
assert coro_wrapped._origin is coro
coro_wrapped = alru_cache(partial(coro))
assert coro_wrapped._origin is coro
@pytest.mark.asyncio
async def test_alru_cache_await_same_result_async(check_lru, loop):
calls = 0
val = object()
@alru_cache(loop=loop)
async def coro():
nonlocal calls
calls += 1
return val
coros = [coro() for _ in range(100)]
ret = await asyncio.gather(*coros, loop=loop)
expected = [val] * 100
assert ret == expected
check_lru(coro, hits=99, misses=1, cache=1, tasks=0)
assert calls == 1
assert await coro() is val
check_lru(coro, hits=100, misses=1, cache=1, tasks=0)
@pytest.mark.asyncio
async def test_alru_cache_await_same_result_coroutine(check_lru, loop):
calls = 0
val = object()
@alru_cache(loop=loop)
@asyncio.coroutine
def coro():
nonlocal calls
calls += 1
return val
coros = [coro() for _ in range(100)]
ret = await asyncio.gather(*coros, loop=loop)
expected = [val] * 100
assert ret == expected
check_lru(coro, hits=99, misses=1, cache=1, tasks=0)
assert calls == 1
assert await coro() is val
check_lru(coro, hits=100, misses=1, cache=1, tasks=0)
@pytest.mark.asyncio
async def test_alru_cache_dict_not_shared(check_lru, loop):
async def coro(val):
return val
coro1 = alru_cache(loop=loop)(coro)
coro2 = alru_cache(loop=loop)(coro)
ret1 = await coro1(1)
check_lru(coro1, hits=0, misses=1, cache=1, tasks=0)
ret2 = await coro2(1)
check_lru(coro2, hits=0, misses=1, cache=1, tasks=0)
assert ret1 == ret2
assert coro1._cache[1].result() == coro2._cache[1].result()
assert coro1._cache != coro2._cache
assert coro1._cache.keys() == coro2._cache.keys()
assert coro1._cache is not coro2._cache
|
the-stack_0_19647 | import os
import pickle
import concurrent.futures
import sys
from argparse import ArgumentParser
import asyncio
from modules.util import Range
from tool.processing import process_task
from task import Task
import json
from output_event import OutputEvent
if sys.version_info[0] < 3:
raise Exception(
"You must use Python 3 or higher. Recommended version is Python 3.7")
async def main_human(task: Task):
async for event in process_task(task):
if event.EventType.IsError:
print("ERROR: " + event.EventType.Text + " Task time: " +
"{:.2f}s".format(event.Time) + " CANCELING TASK!")
else:
print(event.EventType.Text + " Time: " +
"{:.2f}s".format(event.Time))
async def main_api(task: Task):
async for event in process_task(task, h_progress=False):
print(json.dumps(event._asdict()),
file=sys.stderr if event.EventType.IsError else sys.stdout)
if event.EventType.IsError:
sys.exit(-1)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
"--config", default='config/vox-256.yaml', help="path to config")
parser.add_argument("--checkpoint", default='model/vox-cpk.pth.tar',
help="path to checkpoint to restore")
parser.add_argument(
"--source_images", help="paths to source images", nargs="+", required=True)
parser.add_argument(
"--driving_video", help="path to driving video", required=True)
parser.add_argument(
"--result_videos", help="path to output", nargs="+", required=True)
parser.add_argument("--adapt_scale", dest="adapt_scale", action="store_true",
help="adapt movement scale based on convex hull of keypoints")
parser.add_argument("--find_best_frame", dest="find_best_frame", action="store_true",
help="Generate from the frame that is the most alligned with source.")
parser.add_argument("--gpu", dest="gpu",
action="store_true", help="add CUDA support.")
parser.add_argument("--crop", dest="crop", action="store_true",
help="crop face in image and video.")
parser.add_argument("--image_padding", dest="image_padding", type=float, choices=[Range(0.0, 1.0)], default=0.2,
help="how much smaller face should be in the result video (range 0.0-1.0) (only if using ---crop)")
parser.add_argument("--audio", dest="audio", action="store_true",
help="save original audio in result.")
parser.add_argument("--clean_build", dest="clean_build", action="store_true",
help="do not use old temp data for video.")
parser.add_argument("--api", dest="api", action="store_true",
help="return json outputs instead of human readable ones.")
parser.set_defaults(relative=False)
if '--api' in sys.argv:
parser.error = lambda errmsg: (print(json.dumps(OutputEvent(
OutputEvent.Types.ERROR_ARGUMENT_PARSING, 0)._asdict()), file=sys.stderr), sys.exit(-1))
opt = parser.parse_args()
task = Task()
task.adapt_scale = opt.adapt_scale
task.audio = opt.audio
task.checkpoint = opt.checkpoint
task.clean_build = opt.clean_build
task.config = opt.config
task.crop = opt.crop
task.driving_video = opt.driving_video
task.find_best_frame = opt.driving_video
task.gpu = opt.gpu
task.image_padding = opt.image_padding
task.result_videos = opt.result_videos
task.source_images = opt.source_images
loop = asyncio.get_event_loop()
try:
if not os.path.exists("temp"):
os.makedirs("temp")
if not os.path.exists("output"):
os.makedirs("output")
if opt.api:
loop.run_until_complete(main_api(task))
else:
loop.run_until_complete(main_human(task))
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
|
the-stack_0_19649 | # -*- coding: utf-8 -*-
#
# Dropwizard documentation build configuration file, created by
# sphinx-quickstart on Mon Feb 13 11:29:49 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['ytemplates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Dropwizard'
copyright = u'2011-2013, Coda Hale, Yammer Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.6'
# The full version, including alpha/beta/rc tags.
release = '0.6.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'trac'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'yammerdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'tagline': u'Production-ready, out of the box.',
'gradient_start': u'#545d63',
'gradient_end': u'#182127',
'gradient_text': u'#ffffff',
'gradient_bg': u'#363F45',
'landing_logo': u'dropwizard-hat.png',
'landing_logo_width': u'150px',
'github_page': u'https://github.com/codahale/dropwizard',
'mailing_list': u'https://groups.google.com/forum/#!forum/dropwizard-user',
'maven_site': u'http://dropwizard.codahale.com/maven/'
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["./_themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = u'Dropwizard'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = u'dropwizard-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
html_add_permalinks = None
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Dropwizarddoc'
todo_include_todos = True
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Dropwizard.tex', u'Dropwizard Documentation',
u'Coda Hale', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dropwizard', u'Dropwizard Documentation',
[u'Coda Hale'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Dropwizard', u'Dropwizard Documentation',
u'Coda Hale', 'Dropwizard', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'Dropwizard'
epub_author = u'Coda Hale'
epub_publisher = u'Coda Hale'
epub_copyright = u'2012, Coda Hale'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
|
the-stack_0_19652 | import torch
import torch.nn as nn
def conv_block(in_channels, out_channels):
return nn.Sequential(
nn.LeakyReLU(0.2,inplace=True),
nn.Conv2d(in_channels, out_channels, 4, stride=2,padding=1),
nn.BatchNorm2d(out_channels)
)
def deconv_block(in_channels, out_channels,use_dropout=False):
layers = [
nn.ReLU(inplace=True),
nn.ConvTranspose2d(in_channels, out_channels, 4, stride=2,padding=1),
nn.BatchNorm2d(out_channels)
]
if use_dropout:
layers.append(nn.Dropout(0.5))
return nn.Sequential(*layers)
class UNet(nn.Module):
def __init__(self):
super().__init__()
self.conv_down1 = nn.Conv2d(3,64,4,stride=2,padding=1)
self.conv_down2 = conv_block(64,128)
self.conv_down3 = conv_block(128,256)
self.conv_down4 = conv_block(256,512)
self.conv_down5 = conv_block(512,512)
self.conv_down6 = conv_block(512,512)
self.conv_down7 = conv_block(512,512)
self.conv_down8 = conv_block(512,512)
self.conv_up1 = deconv_block(512,512,use_dropout=True)
self.conv_up2 = deconv_block(1024,512,use_dropout=True)
self.conv_up3 = deconv_block(1024,512,use_dropout=True)
self.conv_up4 = deconv_block(1024,512)
self.conv_up5 = deconv_block(1024,256)
self.conv_up6 = deconv_block(512,128)
self.conv_up7 = deconv_block(256,64)
self.conv_up8 = deconv_block(128,64)
self.conv_up9 = nn.Sequential(
nn.ReLU(inplace=True),
nn.ConvTranspose2d(64, 64, 3, stride=1,padding=1),
nn.BatchNorm2d(64)
)
self.conv_up9 = nn.Sequential(
nn.ReLU(inplace=True),
nn.ConvTranspose2d(64, 64, 3, stride=1,padding=1),
nn.BatchNorm2d(64)
)
self.conv_up10 = nn.Sequential(
nn.ReLU(inplace=True),
nn.ConvTranspose2d(64, 32, 3, stride=1,padding=1),
nn.BatchNorm2d(32)
)
self.conv_up11 = nn.Sequential(
nn.ReLU(inplace=True),
nn.ConvTranspose2d(32, 7, 3, stride=1,padding=1)
)
# TODO: rewrite nicely
def forward(self, x):
down1 = self.conv_down1(x)
down2 = self.conv_down2(down1)
down3 = self.conv_down3(down2)
down4 = self.conv_down4(down3)
down5 = self.conv_down5(down4)
down6 = self.conv_down6(down5)
down7 = self.conv_down7(down6)
down8 = self.conv_down8(down7)
up1 = self.conv_up1(down8)
up2 = self.conv_up2(torch.cat([up1, down7], dim=1))
up3 = self.conv_up3(torch.cat([up2, down6], dim=1))
up4 = self.conv_up4(torch.cat([up3, down5], dim=1))
up5 = self.conv_up5(torch.cat([up4, down4], dim=1))
up6 = self.conv_up6(torch.cat([up5, down3], dim=1))
up7 = self.conv_up7(torch.cat([up6, down2], dim=1))
up8 = self.conv_up8(torch.cat([up7, down1], dim=1))
up9 = self.conv_up9(up8)
up10 = self.conv_up10(up9)
up11 = self.conv_up11(up10)
return up11
|
the-stack_0_19653 | """String formatting for table entries."""
__all__ = ['default_formatter', 'Formatter', 'NumberFormatter',
'CurrencyFormatter', 'DateFormatter', 'PercentFormatter',
'DistributionFormatter']
import numpy as np
from datetime import datetime, timezone
class Formatter:
"""String formatter that truncates long values."""
min_width = 4
max_width = 60
etc = ' ...'
def __init__(self, min_width=None, max_width=None, etc=None):
if min_width is not None:
self.min_width = min_width
if max_width is not None:
self.max_width = max_width
if etc is not None:
self.etc = etc
def format_column(self, label, column):
"""Return a formatting function that pads & truncates values."""
if len(column) == 0:
val_width = 0
else:
val_width = max(len(self.format_value(v)) for v in column)
val_width = min(val_width, self.max_width)
width = max(val_width, len(str(label)), self.min_width, len(self.etc))
def pad(value, label=False):
if label:
raw = value
else:
raw = self.format_value(value)
if len(raw) > width:
prefix = raw[:width-len(self.etc)] + self.etc
else:
prefix = raw
return prefix.ljust(width)
return pad
@staticmethod
def format_value(value):
"""Pretty-print an arbitrary value."""
if isinstance(value, (bool, np.bool_)):
return str(value)
elif isinstance(value, (int, np.integer)):
return '{:,d}'.format(value)
elif isinstance(value, (float, np.floating)):
return '{:g}'.format(value)
else:
return str(value)
def convert_column(self, values):
"""Convert each value using the the convert_value method."""
return list(map(self.convert_value, values))
@staticmethod
def convert_value(value):
"""Identity conversion (override to convert values)."""
return value
@property
def converts_values(self):
"""Whether this Formatter also converts values."""
return self.convert_value is not Formatter.convert_value or \
self.convert_column is not Formatter.convert_column
default_formatter = Formatter()
class FunctionFormatter(Formatter):
"""Format values using a function."""
def __init__(self, fn):
self.format_value = lambda v: str(fn(v))
class NumberFormatter(Formatter):
"""Format numbers that may have delimiters."""
def __init__(self, decimals=2, decimal_point='.', separator=',', int_to_float=False, *args, **vargs):
super().__init__(*args, **vargs)
self.decimals = decimals
self.decimal_point = decimal_point
self.separator = separator
self.int_to_float = int_to_float
def convert_value(self, value):
"""Convert string 93,000.00 to float 93000.0."""
if isinstance(value, str):
value = value.replace(self.separator, '')
if self.decimal_point not in value:
return int(value)
else:
return float(value.replace(self.decimal_point, '.'))
elif self.int_to_float:
return float(value)
else:
return value
def format_value(self, value):
if isinstance(value, (int, np.integer)):
return ('{:' + self.separator + 'd}').format(value)
else:
return ('{:' + self.separator + '.' + str(self.decimals) + 'f}').format(value)
class CurrencyFormatter(NumberFormatter):
"""Format currency and convert to float."""
def __init__(self, symbol="$", *args, **vargs):
super().__init__(*args, **vargs)
assert isinstance(symbol, str)
self.symbol = symbol
def convert_value(self, value):
"""Convert value to float. If value is a string, ensure that the first
character is the same as symbol ie. the value is in the currency this
formatter is representing.
"""
if isinstance(value, str):
assert value.startswith(self.symbol), "Currency does not start with " + self.symbol
value = value.lstrip(self.symbol)
return super().convert_value(value)
def format_value(self, value):
"""Format currency."""
return self.symbol + super().format_value(value)
class DateFormatter(Formatter):
"""Format date & time and convert to UNIX timestamp."""
def __init__(self, format="%Y-%m-%d %H:%M:%S.%f", *args, **vargs):
super().__init__(*args, **vargs)
assert isinstance(format, str)
self.format = format
def convert_value(self, value):
"""Convert 2015-08-03 to a Unix timestamp int."""
return datetime.strptime(value, self.format).timestamp()
def format_value(self, value):
"""Format timestamp as a string."""
return datetime.fromtimestamp(value).strftime(self.format)
class PercentFormatter(Formatter):
"""Format a number as a percentage."""
def __init__(self, decimals=2, *args, **vargs):
super().__init__(*args, **vargs)
assert isinstance(decimals, int)
self.decimals = decimals
def format_value(self, value):
"""Format number as percentage."""
return ('{:.' + str(self.decimals) + '%}').format(value)
class DistributionFormatter(PercentFormatter):
"""Normalize a column and format as percentages."""
def convert_column(self, values):
"""Normalize values."""
assert all(values >= 0), 'Cannot normalize a column with negatives'
total = sum(values)
if total > 0:
return values / total
else:
return values
|
the-stack_0_19662 | #!/usr/bin/python
import os
import subprocess
import time
import datetime
from random import shuffle
import random
file = open("/home/pi/gitprojects/rasberryPiAlarm/cronOutput.txt","a")
y =datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
file.write("the time is "+y+"\n")
dirNpath = "/home/pi/gitprojects/alarm/";
dirpath = dirNpath+"/music"
alarmdir = os.listdir(dirpath)
shuffle(alarmdir)
for files in alarmdir:
if (files.endswith(".mp3") or files.endswith(".mp4")):
command1 = 'omxplayer '+dirpath+'/'+files
file.write("\t command executed : "+command1+"\n")
# os.system('omxplayer '+dirpath+'/'+files)
proc = subprocess.Popen("omxplayer " +dirpath+'/'+files, shell=True)
ranNumber = random.randint(22, 28)*10
time.sleep(ranNumber)
os.system("sudo ps -ef | grep omx | grep -v grep | awk '{print $2}' | xargs kill")
file.close()
exit()
# os.system('omxplayer /home/pi/Downloads/music/omChant.mp4')
# proc = subprocess.Popen("omChant", shell=True)
# exit();
# print(os.getcwd())
#
# if(files.endswith(".mp3")):
# print(files)
# files = files.replace(" ", "\ ")
# proc = subprocess.Popen("omxplayer "+files, shell=True)
# pid = proc.pid
# time.sleep(10)
# id = os.system("pgrep omxplayer")
# os.system("killall omxplayer.bin ")
# time.sleep(3)
# exit()
# pid = proc.pid
# time.sleep(50)
# proc.kill()
# print(pid)
# id = os.system("pgrep omxplayer")
# print('ID of omx = '+str(id))
# print(id)
# time.sleep(50)
# print(os.getpid("omx"))
#os.system("kill "+str(id))
#os.system("omxplayer Kuch\ kar\ gujarne\ ka.mp3 &")
#raw_input()
# proc = subprocess.Popen("omxplayer Kuch\ kar\ gujarne\ ka.mp3", shell=True)
# time.sleep(5)
# print(proc.pid)
# proc.kill()
# proc.terminate()
# id = os.system("pgrep omxplayer")
# os.system("kill "+str(id))
|
the-stack_0_19664 | import json
import os
import time
from sys import maxsize as infinity
import chess
import numpy as np
import tensorflow as tf
from flask import Flask, jsonify, request, send_from_directory
from flask_cors import CORS
from keras.models import model_from_json
app = Flask(__name__)
CORS(app)
@app.route('/', methods=['GET'])
def process():
if request.method == 'GET':
input_data = dict(request.args)
if input_data == {}:
return app.send_static_file('chess.html')
else:
print('------------------------')
print('Input Data: ', input_data)
print('------------------------')
move = input_data['move_sent_to_py'][0]
position = input_data['position'][0]
computer_color = input_data['ComputerColor'][0]
engine = Engine(move, position, computer_color)
jsonified = jsonify(engine.build_output_data())
return jsonified
class Engine():
def __init__(self, move_recieved, position_recieved, computer_color):
''' Chess engine.
Recieves
move recieved in uci form (i.e. a1b1)
position recieved in FEN notation
computer colour as 'w' or 'b'
Returns a dict
['move from'] = int between 0 and 63
['move to'] = int between 0 and 63
['bits'] = position in binary form (explained in build_binary_move)
['position'] = position in FEN notation
'''
print('Last user move made: ', move_recieved)
print('Last position recorded: ', position_recieved)
self.pieces = {
'P': 1,
'N': 2,
'B': 3,
'R': 4,
'Q': 5,
'K': 6,
'p': 7,
'n': 8,
'b': 9,
'r': 10,
'q': 11,
'k': 12
}
self.side = computer_color
self.first_move = False
self.move_recieved = move_recieved
if position_recieved == 'None':
self.board = chess.Board()
self.first_move = True
else:
self.board = chess.Board(position_recieved)
self.last_turn = self.board.fen().split()[1]
self.turns = int(self.board.fen().split()[5])
print(self.last_turn, ' played their move')
if self.move_recieved != 'None':
self.check_for_promotion(*squares_to_numbers(move_recieved,
mirror=False))
self.move_recieved = chess.Move.from_uci(move_recieved)
self.board.push(self.move_recieved)
def check_for_promotion(self, mv_frm, mv_to):
'Checks for client-side promotion'
white_queen = chess.Piece(piece_type=chess.QUEEN, color=chess.WHITE)
black_queen = chess.Piece(piece_type=chess.QUEEN, color=chess.BLACK)
if self.board.piece_at(mv_frm).piece_type == chess.PAWN:
if chess.square_rank(mv_to) == 7:
if self.board.piece_at(mv_frm).color == chess.WHITE:
promoted_binary = bin(self.pieces['Q'])[2:].zfill(4)
self.board.set_piece_at(square=mv_frm,
piece=white_queen)
elif chess.square_rank(mv_to) == 0:
if self.board.piece_at(mv_frm).color == chess.BLACK:
promoted_binary = bin(self.pieces['q'])[2:].zfill(4)
self.board.set_piece_at(square=mv_frm,
piece=black_queen)
def build_binary_move(self):
'''Javascript chess gui uses binary encoding to represent moves:
0000 0000 0000 0000 0000 0111 1111 -> From 0x7F
0000 0000 0000 0011 1111 1000 0000 -> To >> 7, 0x7F
0000 0000 0011 1100 0000 0000 0000 -> Captured >> 14, 0xF
0000 0000 0100 0000 0000 0000 0000 -> EP 0x40000
0000 0000 1000 0000 0000 0000 0000 -> Pawn Start 0x80000
0000 1111 0000 0000 0000 0000 0000 -> Promoted Piece >> 20, 0xF
0001 0000 0000 0000 0000 0000 0000 -> Castle 0x1000000
This function takes moves made and converts them to binary form.
'''
captured_piece_binary = '0000'
en_passant_binary = '0'
pawn_start_binary = '0'
promoted_binary = '0000'
castling_binary = '0'
white_queen = chess.Piece(piece_type=chess.QUEEN, color=chess.WHITE)
black_queen = chess.Piece(piece_type=chess.QUEEN, color=chess.BLACK)
if self.board.is_capture(self.uci_move):
if self.board.is_en_passant(self.uci_move):
captured_piece = self.board.piece_at(self.board.ep_square)
else:
captured_piece = self.board.piece_at(self.move_to_square)
captured_piece_binary = bin(self.pieces[str(captured_piece)])[2:].zfill(4)
if self.board.is_en_passant(self.uci_move):
en_passant_binary = '1'
if self.board.piece_at(self.move_from_square).piece_type == chess.PAWN:
if chess.square_distance(self.move_from_square,
self.move_to_square) == 2:
pawn_start_binary = '1'
if len(str(self.uci_move)) > 4:
promoted_piece = str(self.uci_move)[4]
if self.side == 'w':
promoted_piece = promoted_piece.upper()
promoted_binary = bin(self.pieces[promoted_piece])[2:].zfill(4)
if self.board.is_castling(self.uci_move):
castling_binary = '1'
result = castling_binary + promoted_binary + pawn_start_binary + \
en_passant_binary + captured_piece_binary
return result
def minimax(self, node, depth, player, alpha, beta):
if player == 'w':
player = 1
elif player == 'b':
player = -1
if depth == 0 or node.children == []:
return [player*node.value]
if node.children[0] is not None:
predicted_child = node.children[0][0]
favourite_child = None
best_advantage = -1*player*infinity
for child, current_value in node.children:
node.board.push(child)
result = self.minimax(Node(node.board), depth-1,
-1*player, alpha, beta)
opposition_value = result[0]
advantage_score = player*current_value + opposition_value
if player == 1:
if advantage_score > best_advantage:
best_advantage = advantage_score
favourite_child = child
alpha = max(alpha, best_advantage)
if beta <= alpha:
node.board.pop()
break
elif player == -1:
if advantage_score < best_advantage:
best_advantage = advantage_score
favourite_child = child
beta = min(beta, best_advantage)
if beta <= alpha:
node.board.pop()
break
node.board.pop()
return [best_advantage, favourite_child, predicted_child]
def build_output_data(self):
'''Takes the result from minimax and returns a dict
minimax uses depth 1 prior to 15 turns. After 15 turns,
search depth increases to 3 to allow for checkmating.'''
output_data = {}
if self.turns > 15:
sdepth = 3
else:
sdepth = 1
if (self.last_turn != self.side or
self.first_move is True or self.move_recieved == 'None'):
result = self.minimax(Node(board=self.board),
depth=sdepth, player=self.side,
alpha=-1*infinity, beta=infinity)
print('result:', result)
self.uci_move = result[1]
pick_from = str(self.uci_move)[0:2].upper()
pick_to = str(self.uci_move)[2:4].upper()
self.move_from_square = int(getattr(chess, pick_from))
self.move_to_square = int(getattr(chess, pick_to))
output_data['move_from'] = self.move_from_square
output_data['move_to'] = self.move_to_square
output_data['bits'] = self.build_binary_move()
self.board.push(self.uci_move)
output_data['position'] = self.board.fen()
print('-----------------------------')
print('Output Data: ', output_data)
print('Best score: ', result[0])
print('NN prediction: ', result[2])
print('-----------------------------')
return output_data
class Node():
'Node class used for minimax'
def __init__(self, board):
self.material_values = {
chess.KING: 50000,
chess.QUEEN: 5000,
chess.ROOK: 900,
chess.KNIGHT: 500,
chess.BISHOP: 500,
chess.PAWN: 10
}
self.board = board
self.turns = int(self.board.fen().split()[5])
self.children = []
if self.turns > 15:
self.create_children(15)
else:
self.create_children(100)
if self.children == []:
self.value = 0
else:
self.value = self.children[0][1]
def create_children(self, n):
self.best_moves = self.get_best_moves(*self.predict_moves())
self.children.extend(self.best_moves[:n])
def predict_moves(self):
'''Uses the saved deep learning models to return a 2 lists of
moved_from probabilities and moved_to probabilities'''
t1 = time.time()
nn_input = self.board.position_list_one_hot()
nn_input = np.array(nn_input).reshape(1, 8, 8, 12)
with graph.as_default():
predictions = list(move_from_model.predict(nn_input))
probabilities = list(predictions[0])
move_from_squares = sorted(range(len(probabilities)),
key=lambda k: probabilities[k])
move_from_squares = (list(reversed(move_from_squares)))
with graph.as_default():
predictions = list(move_to_model.predict(nn_input))
probabilities = list(predictions[0])
move_to_squares = sorted(range(len(probabilities)),
key=lambda k: probabilities[k])
moved_to_squares = (list(reversed(move_from_squares)))
return move_from_squares, move_to_squares
def get_material_scores(self, moves):
'''Generates material scores found by projecting the outcome
if a move is made. If a capture is made, the material value goes
up by the piece captured. If checkmate, the material value goes up
by the value of the king. If stalemate, the material value goes down
by 100000.'''
material_scores = []
for move in moves:
material_score = 0
if self.board.is_capture(move):
if self.board.is_en_passant(move):
captured_piece = chess.PAWN
else:
moved_to = getattr(chess, str(move)[2:4].upper())
captured_piece = self.board.piece_at(moved_to).piece_type
material_score += self.material_values[captured_piece]
self.board.push(move)
if self.board.is_checkmate():
material_score += self.material_values[chess.KING]
elif self.board.is_stalemate():
material_score -= 100000
else:
material_score += 0
self.board.pop()
material_scores.append(material_score)
return material_scores
def get_best_moves(self, from_sqs_list, to_sqs_list):
'''Matches the probabilities found in predict_moves and minimizes
the distance between the probabilities and a legal move. Returns
a list of legal moves, ordered by score (found by combining prediction
score and material score).'''
legal_moves = [str(legal) for legal in list(self.board.legal_moves)]
legal_moves_numbered = [squares_to_numbers(move)
for move in list(self.board.legal_moves)]
to_sqs_list = list(reversed(to_sqs_list))
total_uncertainties = []
for fro, to in legal_moves_numbered:
uncertainty_from = from_sqs_list.index(fro)
uncertainty_to = to_sqs_list.index(to)
total_uncertainties.append(uncertainty_from + uncertainty_to)
moves_ordered = [chess.Move.from_uci(move) for _, move in
sorted(zip(total_uncertainties, legal_moves),
key=lambda x: x[0])]
prediction_scores = [(400-uncertainty) for uncertainty in
sorted(total_uncertainties)]
material_scores = self.get_material_scores(moves_ordered)
total_scores = [p_score + m_score for p_score, m_score in
zip(prediction_scores, material_scores)]
return ([[move, score] for score, move in
sorted(zip(total_scores, moves_ordered),
key=lambda x: x[0], reverse=True)])
def squares_to_numbers(move, mirror=True):
'''converts a move in uci form (i.e. a1b1) to its squares
returns two ints
in the above case 0, 1
can be mirrored, which will return 56, 57'''
first_square = str(move)[0:2].upper()
second_square = str(move)[2:4].upper()
first_square_num = getattr(chess, first_square)
second_square_num = getattr(chess, second_square)
if mirror is True:
first_square_num = chess.square_mirror(first_square_num)
second_square_num = chess.square_mirror(second_square_num)
return (first_square_num, second_square_num)
def position_list_one_hot(self):
'''method added to the python-chess library for faster
conversion of board to one hot encoding. Resulted in 100%
increase in conversion speed by bypassing conversion to fen() first.
'''
builder = []
builder_append = builder.append
for square in chess.SQUARES_180:
mask = chess.BB_SQUARES[square]
if not self.occupied & mask:
builder.extend([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
elif bool(self.occupied_co[chess.WHITE] & mask):
if self.pawns & mask:
builder.extend([0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0])
elif self.knights & mask:
builder.extend([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0])
elif self.bishops & mask:
builder.extend([0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0])
elif self.rooks & mask:
builder.extend([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0])
elif self.queens & mask:
builder.extend([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0])
elif self.kings & mask:
builder.extend([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1])
elif self.pawns & mask:
builder.extend([1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
elif self.knights & mask:
builder.extend([0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
elif self.bishops & mask:
builder.extend([0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
elif self.rooks & mask:
builder.extend([0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0])
elif self.queens & mask:
builder.extend([0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0])
elif self.kings & mask:
builder.extend([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0])
return builder
chess.BaseBoard.position_list_one_hot = position_list_one_hot
global graph
graph = tf.get_default_graph()
model_folder = r'static/chessai/model'
moved_from_file = os.path.join(model_folder, 'moved_from_model.json')
moved_from_weights = os.path.join(model_folder, 'moved_from_weights.h5')
moved_to_file = os.path.join(model_folder, 'moved_to_model.json')
moved_to_weights = os.path.join(model_folder, 'moved_to_weights.h5')
with open(moved_from_file, 'r') as moved_from_json:
move_from_model = model_from_json(moved_from_json.read())
with open(moved_to_file, 'r') as moved_to_json:
move_to_model = model_from_json(moved_to_json.read())
move_from_model.load_weights(moved_from_weights)
move_to_model.load_weights(moved_to_weights)
if __name__ == '__main__':
gpu_mode = False
if gpu_mode is True:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
else:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
app.run(debug=True)
|
the-stack_0_19665 | #!/usr/bin/python3
import os
import subprocess
def notify(msg,level):
try:
command = f'notify-send -u {level} -t 2000 "NaughtyLust" "{msg}"'
subprocess.Popen(command,shell=True)
except:
pass
def run_command(command):
ret = 0
try:
out = subprocess.Popen(command, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = out.communicate()
if stdout:
ret = 0 # when no output checking is not necessary
elif stderr:
ret = 1
except:
return -1
return ret
for path in os.getenv('NAUTILUS_SCRIPT_SELECTED_FILE_PATHS', '').splitlines():
# open the first path
# If some files are selected alongwith at least one directory
# the directory will be selected
command = f'code "{path}"'
res = run_command(command)
if res == 0:
notify('Opening in VScode','normal')
break
else:
notify('Something went WRONG! could not open in VScode','critical')
break |
the-stack_0_19666 | import argparse,sys
parse = argparse.ArgumentParser(prog="report",description="a report program",usage='%(prog)s.py [options] -i input_file')
parse.add_argument("-i",help="input file",type=str,required=True)
parse.add_argument("-l",help="left number",type=int,default=15)
parse.add_argument("-r",help="left number",type=int,default=15)
parse.add_argument("-d",help="deepth",type=int,default=5)
parse.add_argument("-id1",help="deepth",type=float,default=0.8)
parse.add_argument("-id2",help="deepth",type=float,default=0.5)
args = parse.parse_args()
deep = args.d
two_end_of_seq = args.id1
middle_seq = args.id2
label = str()
f1 = open ("{filename}".format(filename=args.i),"r")
head = args.i.split(".")[0]
info = []
seq_tmp = str()
line_count = 0
for each_line in f1:
each_line = each_line.strip()
if each_line.startswith(">"):
if seq_tmp == "":
continue
else:
info.append(seq_tmp)
seq_tmp = ""
else:
seq_tmp = seq_tmp + each_line
DATA = []
total_deepth = int()
if len(info) < deep:
sys.exit("The deepth is less than {deep}!!!".format(deep=deep))
else:
total_deepth = len(info)
for a in range(0,len(info[0])-1):
linshi = {}
A_count = 0
C_count = 0
G_count = 0
T_count = 0
other_count = 0
for i in info:
if i[a] == "A" or i[a] == "a":
A_count += 1
elif i[a] == "C" or i[a] == "c":
C_count += 1
elif i[a] == "G" or i[a] == "g":
G_count += 1
elif i[a] == "T" or i[a] == "t":
T_count += 1
elif i[a] == "-":
other_count += 1
linshi["A"] = A_count
linshi["C"] = C_count
linshi["G"] = G_count
linshi["T"] = T_count
linshi["-"] = other_count
DATA.append(linshi)
#print DATA
start = str()
middle = str()
end = str()
total = str()
left = int(args.l)
right = int(args.r)
for i in range(0,left):
if int(DATA[i]["-"]) > round(two_end_of_seq*total_deepth):
first = "-"
# first = max(DATA[i],key=DATA[i].get)
# last = str()
start = start + first
else:
# first == "-":
if "-" in DATA[i].keys():
del DATA[i]["-"]
second = max(DATA[i],key=DATA[i].get)
# if int(DATA[i][second]) >= deep:
last = second
# else:
# last = first
start = start + last
for i in range (left,len(DATA)-right-1):
if int(DATA[i]["-"]) > round(middle_seq*total_deepth):
first = "-"
middle = middle + first
else:
if "-" in DATA[i].keys():
del DATA[i]["-"]
a = max(DATA[i],key=DATA[i].get)
middle = middle + a
#print seq
for i in range (len(DATA)-right-1,len(DATA)):
first = max(DATA[i],key=DATA[i].get)
last = str()
if int(DATA[i]["-"]) > round(two_end_of_seq*total_deepth):
first = "-"
end = end + first
else:
if "-" in DATA[i].keys():
del DATA[i]["-"]
second = max(DATA[i],key=DATA[i].get)
last = second
# else:
# last = first
end = end + last
total = start + middle + end
total = total.replace("-","")
rest = ">"+head +"_"+"result" + ":" + "consensus_size={depth}".format(depth=len(info))+":"+"length={length}".format(length=len(DATA))
#print rest
#print total
f2 = open ("{label}_1.fasta".format(label=args.i),"a")
f2.write(rest + "\n" + total + "\n")
|
the-stack_0_19669 | # -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Logging functionalities."""
from typing import TYPE_CHECKING, List, Dict, Any
from libcloudforensics.providers.gcp.internal import common
if TYPE_CHECKING:
import googleapiclient
class GoogleCloudLog:
"""Class representing a Google Cloud Logs interface.
Attributes:
project_id: Google Cloud project ID.
gcl_api_client: Client to interact with GCP logging API.
Example use:
# pylint: disable=line-too-long
gcp = GoogleCloudLog(project_id='your_project_name')
gcp.ListLogs()
gcp.ExecuteQuery(filter='resource.type="gce_instance" labels."compute.googleapis.com/resource_name"="instance-1"')
See https://cloud.google.com/logging/docs/view/advanced-queries for filter details.
"""
LOGGING_API_VERSION = 'v2'
def __init__(self, project_id: str) -> None:
"""Initialize the GoogleCloudProject object.
Args:
project_id (str): The name of the project.
"""
self.project_id = project_id
self.gcl_api_client = None
def GclApi(self) -> 'googleapiclient.discovery.Resource':
"""Get a Google Compute Logging service object.
Returns:
googleapiclient.discovery.Resource: A Google Compute Logging service
object.
"""
if self.gcl_api_client:
return self.gcl_api_client
self.gcl_api_client = common.CreateService(
'logging', self.LOGGING_API_VERSION)
return self.gcl_api_client
def ListLogs(self) -> List[str]:
"""List logs in project.
Returns:
List[str]: The project logs available.
Raises:
RuntimeError: If API call failed.
"""
logs = []
gcl_instance_client = self.GclApi().logs()
responses = common.ExecuteRequest(
gcl_instance_client, 'list', {'parent': 'projects/' + self.project_id})
for response in responses:
for logtypes in response.get('logNames', []):
logs.append(logtypes)
return logs
def ExecuteQuery(self, qfilter: str) -> List[Dict[str, Any]]:
"""Query logs in GCP project.
Args:
qfilter (str): The query filter to use.
Returns:
List[Dict]: Log entries returned by the query, e.g. [{'projectIds':
[...], 'resourceNames': [...]}, {...}]
Raises:
RuntimeError: If API call failed.
"""
body = {
'resourceNames': 'projects/' + self.project_id,
'filter': qfilter,
'orderBy': 'timestamp desc',
}
entries = []
gcl_instance_client = self.GclApi().entries()
responses = common.ExecuteRequest(
gcl_instance_client, 'list', {'body': body}, throttle=True)
for response in responses:
for entry in response.get('entries', []):
entries.append(entry)
return entries
|
the-stack_0_19670 | """CS 61A presents Ants Vs. SomeBees."""
import random
from ucb import main, interact, trace
from collections import OrderedDict
################
# Core Classes #
################
class Place:
"""A Place holds insects and has an exit to another Place."""
is_hive = False
def __init__(self, name, exit=None):
"""Create a Place with the given NAME and EXIT.
name -- A string; the name of this Place.
exit -- The Place reached by exiting this Place (may be None).
"""
self.name = name
self.exit = exit
self.bees = [] # A list of Bees
self.ant = None # An Ant
self.entrance = None # A Place
# Phase 1: Add an entrance to the exit
# BEGIN Problem 2
if self.exit is not None:
self.exit.entrance = self
# END Problem 2
def add_insect(self, insect):
"""
Asks the insect to add itself to the current place. This method exists so
it can be enhanced in subclasses.
"""
insect.add_to(self)
def remove_insect(self, insect):
"""
Asks the insect to remove itself from the current place. This method exists so
it can be enhanced in subclasses.
"""
insect.remove_from(self)
def __str__(self):
return self.name
class Insect:
"""An Insect, the base class of Ant and Bee, has health and a Place."""
damage = 0
is_waterproof = False
# ADD CLASS ATTRIBUTES HERE
def __init__(self, health, place=None):
"""Create an Insect with a health amount and a starting PLACE."""
self.health = health
self.place = place # set by Place.add_insect and Place.remove_insect
def reduce_health(self, amount):
"""Reduce health by AMOUNT, and remove the insect from its place if it
has no health remaining.
>>> test_insect = Insect(5)
>>> test_insect.reduce_health(2)
>>> test_insect.health
3
"""
self.health -= amount
if self.health <= 0:
self.death_callback()
self.place.remove_insect(self)
def action(self, gamestate):
"""The action performed each turn.
gamestate -- The GameState, used to access game state information.
"""
def death_callback(self):
# overriden by the gui
pass
def add_to(self, place):
"""Add this Insect to the given Place
By default just sets the place attribute, but this should be overriden in the subclasses
to manipulate the relevant attributes of Place
"""
self.place = place
def remove_from(self, place):
self.place = None
def __repr__(self):
cname = type(self).__name__
return '{0}({1}, {2})'.format(cname, self.health, self.place)
class Ant(Insect):
"""An Ant occupies a place and does work for the colony."""
implemented = False # Only implemented Ant classes should be instantiated
food_cost = 0
is_container = False
doubled = False
# ADD CLASS ATTRIBUTES HERE
def __init__(self, health=1):
"""Create an Insect with a HEALTH quantity."""
super().__init__(health)
@classmethod
def construct(cls, gamestate):
"""Create an Ant for a given GameState, or return None if not possible."""
if cls.food_cost > gamestate.food:
print('Not enough food remains to place ' + cls.__name__)
return
return cls()
def can_contain(self, other):
return False
def store_ant(self, other):
assert False, "{0} cannot contain an ant".format(self)
def remove_ant(self, other):
assert False, "{0} cannot contain an ant".format(self)
def add_to(self, place):
if place.ant is None:
place.ant = self
else:
# BEGIN Problem 8
if isinstance(place.ant,ContainerAnt) and ContainerAnt.can_contain(place.ant, self):
ContainerAnt.store_ant(place.ant, self)
elif isinstance(self, ContainerAnt) and ContainerAnt.can_contain(self, place.ant):
ContainerAnt.store_ant(self, place.ant)
place.ant = self
else:
assert place.ant is None, 'Two ants in {0}'.format(place)
# END Problem 8
Insect.add_to(self, place)
def remove_from(self, place):
if place.ant is self:
place.ant = None
elif place.ant is None:
assert False, '{0} is not in {1}'.format(self, place)
else:
place.ant.remove_ant(self)
Insect.remove_from(self, place)
def double(self):
"""Double this ants's damage, if it has not already been doubled."""
# BEGIN Problem 12
self.damage *= 2
self.doubled = True
# END Problem 12
class HarvesterAnt(Ant):
"""HarvesterAnt produces 1 additional food per turn for the colony."""
name = 'Harvester'
implemented = True
food_cost = 2
def action(self, gamestate):
"""Produce 1 additional food for the colony.
gamestate -- The GameState, used to access game state information.
"""
# BEGIN Problem 1
gamestate.food += 1
# END Problem 1
class ThrowerAnt(Ant):
"""ThrowerAnt throws a leaf each turn at the nearest Bee in its range."""
name = 'Thrower'
implemented = True
damage = 1
food_cost = 3
min_range = 0
max_range = float('inf')
def nearest_bee(self):
"""Return the nearest Bee in a Place that is not the HIVE, connected to
the ThrowerAnt's Place by following entrances.
This method returns None if there is no such Bee (or none in range).
"""
# BEGIN Problem 3 and 4
current_place = self.place
dist = 0
while current_place.bees == [] or self.min_range > dist:
current_place = current_place.entrance
dist += 1
if current_place.is_hive is True or dist > self.max_range:
return None
return random_bee(current_place.bees)
# END Problem 3 and 4
def throw_at(self, target):
"""Throw a leaf at the TARGET Bee, reducing its health."""
if target is not None:
target.reduce_health(self.damage)
def action(self, gamestate):
"""Throw a leaf at the nearest Bee in range."""
self.throw_at(self.nearest_bee())
def random_bee(bees):
"""Return a random bee from a list of bees, or return None if bees is empty."""
assert isinstance(bees, list), "random_bee's argument should be a list but was a %s" % type(bees).__name__
if bees:
return random.choice(bees)
##############
# Extensions #
##############
class ShortThrower(ThrowerAnt):
"""A ThrowerAnt that only throws leaves at Bees at most 3 places away."""
name = 'Short'
food_cost = 2
# OVERRIDE CLASS ATTRIBUTES HERE
# BEGIN Problem 4
implemented = True # Change to True to view in the GUI
max_range = 3
# END Problem 4
class LongThrower(ThrowerAnt):
"""A ThrowerAnt that only throws leaves at Bees at least 5 places away."""
name = 'Long'
food_cost = 2
# OVERRIDE CLASS ATTRIBUTES HERE
# BEGIN Problem 4
implemented = True # Change to True to view in the GUI
min_range = 5
# END Problem 4
class FireAnt(Ant):
"""FireAnt cooks any Bee in its Place when it expires."""
name = 'Fire'
damage = 3
food_cost = 5
# OVERRIDE CLASS ATTRIBUTES HERE
# BEGIN Problem 5
implemented = True # Change to True to view in the GUI
# END Problem 5
def __init__(self, health=3):
"""Create an Ant with a HEALTH quantity."""
super().__init__(health)
def reduce_health(self, amount):
"""Reduce health by AMOUNT, and remove the FireAnt from its place if it
has no health remaining.
Make sure to reduce the health of each bee in the current place, and apply
the additional damage if the fire ant dies.
"""
# BEGIN Problem 5
place = self.place
Ant.reduce_health(self,amount)
if self.health <= 0:
amount += self.damage
if len(place.bees) > 0:
for bee in place.bees[:]:
Insect.reduce_health(bee,amount)
# END Problem 5
# BEGIN Problem 6
# The WallAnt class
class WallAnt(Ant):
name = 'Wall'
food_cost = 4
implemented = True
def __init__(self, health=4):
"""Create an Ant with a HEALTH quantity."""
super().__init__(health)
# END Problem 6
# BEGIN Problem 7
# The HungryAnt Class
class HungryAnt(Ant):
name = 'Hungry'
food_cost = 4
implemented = True
time_to_chew = 3
def __init__(self, health=1):
super().__init__(health)
self.chew_timer = 0
def eat(self,bee):
Insect.reduce_health(bee,bee.health)
self.chew_timer = self.time_to_chew
def action(self, gamestate):
if self.chew_timer > 0:
self.chew_timer -= 1
elif len(self.place.bees) > 0:
self.eat(random_bee(self.place.bees))
# END Problem 7
class ContainerAnt(Ant):
"""
ContainerAnt can share a space with other ants by containing them.
"""
is_container = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ant_contained = None
def can_contain(self, other):
# BEGIN Problem 8
return self.ant_contained == None and not isinstance(other,ContainerAnt)
# END Problem 8
def store_ant(self, ant):
# BEGIN Problem 8
self.ant_contained = ant
# END Problem 8
def remove_ant(self, ant):
if self.ant_contained is not ant:
assert False, "{} does not contain {}".format(self, ant)
self.ant_contained = None
def remove_from(self, place):
# Special handling for container ants (this is optional)
if place.ant is self:
# Container was removed. Contained ant should remain in the game
place.ant = place.ant.ant_contained
Insect.remove_from(self, place)
else:
# default to normal behavior
Ant.remove_from(self, place)
def action(self, gamestate):
# BEGIN Problem 8
if self.ant_contained is not None:
self.ant_contained.action(gamestate)
# END Problem 8
class BodyguardAnt(ContainerAnt):
"""BodyguardAnt provides protection to other Ants."""
name = 'Bodyguard'
food_cost = 4
# OVERRIDE CLASS ATTRIBUTES HERE
# BEGIN Problem 8
implemented = True # Change to True to view in the GUI
# END Problem 8
def __init__(self, health=2):
super().__init__(health)
# BEGIN Problem 9
# The TankAnt class
class TankAnt(ContainerAnt):
name = 'Tank'
food_cost = 6
implemented = True
damage = 1
def __init__(self,health=2):
super().__init__(health)
def action(self,gamestate):
ContainerAnt.action(self,gamestate)
for bee in self.place.bees[:]:
Insect.reduce_health(bee,self.damage)
# END Problem 9
class Water(Place):
"""Water is a place that can only hold waterproof insects."""
def add_insect(self, insect):
"""Add an Insect to this place. If the insect is not waterproof, reduce
its health to 0."""
# BEGIN Problem 10
Place.add_insect(self,insect)
if insect.is_waterproof is False:
Insect.reduce_health(insect, insect.health)
# END Problem 10
# BEGIN Problem 11
# The ScubaThrower class
class ScubaThrower(ThrowerAnt):
name = 'Scuba'
food_cost = 6
is_waterproof = True
implemented = True
def __init__(self,health=1):
super().__init__(health)
# END Problem 11
# BEGIN Problem 12
class QueenAnt(ScubaThrower): # You should change this line
# END Problem 12
"""The Queen of the colony. The game is over if a bee enters her place."""
name = 'Queen'
food_cost = 7
# OVERRIDE CLASS ATTRIBUTES HERE
# BEGIN Problem 12
implemented = True # Change to True to view in the GUI
# END Problem 12
@classmethod
def construct(cls, gamestate):
"""
Returns a new instance of the Ant class if it is possible to construct, or
returns None otherwise. Remember to call the construct() method of the superclass!
"""
# BEGIN Problem 12
if not gamestate.only_queen:
gamestate.only_queen = True
return super().construct(gamestate)
return None
# END Problem 12
def action(self, gamestate):
"""A queen ant throws a leaf, but also doubles the damage of ants
in her tunnel.
"""
# BEGIN Problem 12
super().action(gamestate)
place = self.place.exit
while place:
if place.ant is not None:
if not place.ant.doubled:
place.ant.double()
if place.ant.is_container and place.ant.ant_contained is not None and not place.ant.ant_contained.doubled:
place.ant.ant_contained.double()
place = place.exit
# END Problem 12
def reduce_health(self, amount):
"""Reduce health by AMOUNT, and if the QueenAnt has no health
remaining, signal the end of the game.
"""
# BEGIN Problem 12
if amount >= self.health:
ants_lose()
else:
Insect.reduce_health(self,amount)
# END Problem 12
def remove_from(self, place):
pass
class AntRemover(Ant):
"""Allows the player to remove ants from the board in the GUI."""
name = 'Remover'
implemented = False
def __init__(self):
super().__init__(0)
class Bee(Insect):
"""A Bee moves from place to place, following exits and stinging ants."""
name = 'Bee'
damage = 1
is_waterproof = True
is_slowed = False
slow_turn = 0
is_scared = False
scare_turn = 0
direction = True
# OVERRIDE CLASS ATTRIBUTES HERE
def sting(self, ant):
"""Attack an ANT, reducing its health by 1."""
ant.reduce_health(self.damage)
def move_to(self, place):
"""Move from the Bee's current Place to a new PLACE."""
self.place.remove_insect(self)
place.add_insect(self)
def blocked(self):
"""Return True if this Bee cannot advance to the next Place."""
# Special handling for NinjaAnt
# BEGIN Problem Optional 1
return self.place.ant is not None
# END Problem Optional 1
def action(self, gamestate):
"""A Bee's action stings the Ant that blocks its exit if it is blocked,
or moves to the exit of its current place otherwise.
gamestate -- The GameState, used to access game state information.
"""
if self.scare_turn > 0:
self.direction = False
else:
self.direction = True
if self.direction:
destination = self.place.exit
else:
destination = self.place.entrance
# Extra credit: Special handling for bee direction
if self.blocked():
self.sting(self.place.ant)
elif self.health > 0 and destination is not None:
if self.is_slowed:
if gamestate.time % 2 == 0 and self.slow_turn > 0:
if not destination.is_hive:
self.move_to(destination)
if self.scare_turn > 0:
self.scare_turn -= 1
self.slow_turn -= 1
if self.slow_turn == 0:
self.is_slowed = False
else:
if not destination.is_hive:
self.move_to(destination)
if self.scare_turn > 0:
self.scare_turn -= 1
def add_to(self, place):
place.bees.append(self)
Insect.add_to(self, place)
def remove_from(self, place):
place.bees.remove(self)
Insect.remove_from(self, place)
def slow(self, length):
"""Slow the bee for a further LENGTH turns."""
# BEGIN Problem EC
self.is_slowed = True
self.slow_turn += length
# END Problem EC
def scare(self, length):
"""
If this Bee has not been scared before, cause it to attempt to
go backwards LENGTH times.
"""
# BEGIN Problem EC
if self.is_scared:
self.scare_turn = 0
else:
self.scare_turn = length
self.is_scared = True
# END Problem EC
############
# Optional #
############
class NinjaAnt(Ant):
"""NinjaAnt does not block the path and damages all bees in its place.
This class is optional.
"""
name = 'Ninja'
damage = 1
food_cost = 5
# OVERRIDE CLASS ATTRIBUTES HERE
# BEGIN Problem Optional 1
implemented = False # Change to True to view in the GUI
# END Problem Optional 1
def action(self, gamestate):
# BEGIN Problem Optional 1
"*** YOUR CODE HERE ***"
# END Problem Optional 1
############
# Statuses #
############
class SlowThrower(ThrowerAnt):
"""ThrowerAnt that causes Slow on Bees."""
name = 'Slow'
food_cost = 4
# BEGIN Problem EC
implemented = True # Change to True to view in the GUI
# END Problem EC
def throw_at(self, target):
if target:
target.slow(3)
class ScaryThrower(ThrowerAnt):
"""ThrowerAnt that intimidates Bees, making them back away instead of advancing."""
name = 'Scary'
food_cost = 6
# BEGIN Problem EC
implemented = True # Change to True to view in the GUI
# END Problem EC
def throw_at(self, target):
# BEGIN Problem EC
if target:
target.scare(2)
# END Problem EC
class LaserAnt(ThrowerAnt):
# This class is optional. Only one test is provided for this class.
name = 'Laser'
food_cost = 10
# OVERRIDE CLASS ATTRIBUTES HERE
# BEGIN Problem Optional 2
implemented = False # Change to True to view in the GUI
# END Problem Optional 2
def __init__(self, health=1):
super().__init__(health)
self.insects_shot = 0
def insects_in_front(self):
# BEGIN Problem Optional 2
return {}
# END Problem Optional 2
def calculate_damage(self, distance):
# BEGIN Problem Optional 2
return 0
# END Problem Optional 2
def action(self, gamestate):
insects_and_distances = self.insects_in_front()
for insect, distance in insects_and_distances.items():
damage = self.calculate_damage(distance)
insect.reduce_health(damage)
if damage:
self.insects_shot += 1
##################
# Bees Extension #
##################
class Wasp(Bee):
"""Class of Bee that has higher damage."""
name = 'Wasp'
damage = 2
class Hornet(Bee):
"""Class of bee that is capable of taking two actions per turn, although
its overall damage output is lower. Immune to statuses.
"""
name = 'Hornet'
damage = 0.25
def action(self, gamestate):
for i in range(2):
if self.health > 0:
super().action(gamestate)
def __setattr__(self, name, value):
if name != 'action':
object.__setattr__(self, name, value)
class NinjaBee(Bee):
"""A Bee that cannot be blocked. Is capable of moving past all defenses to
assassinate the Queen.
"""
name = 'NinjaBee'
def blocked(self):
return False
class Boss(Wasp, Hornet):
"""The leader of the bees. Combines the high damage of the Wasp along with
status immunity of Hornets. Damage to the boss is capped up to 8
damage by a single attack.
"""
name = 'Boss'
damage_cap = 8
action = Wasp.action
def reduce_health(self, amount):
super().reduce_health(self.damage_modifier(amount))
def damage_modifier(self, amount):
return amount * self.damage_cap / (self.damage_cap + amount)
class Hive(Place):
"""The Place from which the Bees launch their assault.
assault_plan -- An AssaultPlan; when & where bees enter the colony.
"""
is_hive = True
def __init__(self, assault_plan):
self.name = 'Hive'
self.assault_plan = assault_plan
self.bees = []
for bee in assault_plan.all_bees:
self.add_insect(bee)
# The following attributes are always None for a Hive
self.entrance = None
self.ant = None
self.exit = None
def strategy(self, gamestate):
exits = [p for p in gamestate.places.values() if p.entrance is self]
for bee in self.assault_plan.get(gamestate.time, []):
bee.move_to(random.choice(exits))
gamestate.active_bees.append(bee)
class GameState:
"""An ant collective that manages global game state and simulates time.
Attributes:
time -- elapsed time
food -- the colony's available food total
places -- A list of all places in the colony (including a Hive)
bee_entrances -- A list of places that bees can enter
"""
def __init__(self, strategy, beehive, ant_types, create_places, dimensions, food=2):
"""Create an GameState for simulating a game.
Arguments:
strategy -- a function to deploy ants to places
beehive -- a Hive full of bees
ant_types -- a list of ant classes
create_places -- a function that creates the set of places
dimensions -- a pair containing the dimensions of the game layout
"""
self.time = 0
self.food = food
self.strategy = strategy
self.beehive = beehive
self.ant_types = OrderedDict((a.name, a) for a in ant_types)
self.dimensions = dimensions
self.active_bees = []
self.configure(beehive, create_places)
self.only_queen = False
def configure(self, beehive, create_places):
"""Configure the places in the colony."""
self.base = AntHomeBase('Ant Home Base')
self.places = OrderedDict()
self.bee_entrances = []
def register_place(place, is_bee_entrance):
self.places[place.name] = place
if is_bee_entrance:
place.entrance = beehive
self.bee_entrances.append(place)
register_place(self.beehive, False)
create_places(self.base, register_place, self.dimensions[0], self.dimensions[1])
def simulate(self):
"""Simulate an attack on the ant colony (i.e., play the game)."""
num_bees = len(self.bees)
try:
while True:
self.beehive.strategy(self) # Bees invade
self.strategy(self) # Ants deploy
for ant in self.ants: # Ants take actions
if ant.health > 0:
ant.action(self)
for bee in self.active_bees[:]: # Bees take actions
if bee.health > 0:
bee.action(self)
if bee.health <= 0:
num_bees -= 1
self.active_bees.remove(bee)
if num_bees == 0:
raise AntsWinException()
self.time += 1
except AntsWinException:
print('All bees are vanquished. You win!')
return True
except AntsLoseException:
print('The ant queen has perished. Please try again.')
return False
def deploy_ant(self, place_name, ant_type_name):
"""Place an ant if enough food is available.
This method is called by the current strategy to deploy ants.
"""
ant_type = self.ant_types[ant_type_name]
ant = ant_type.construct(self)
if ant:
self.places[place_name].add_insect(ant)
self.food -= ant.food_cost
return ant
def remove_ant(self, place_name):
"""Remove an Ant from the game."""
place = self.places[place_name]
if place.ant is not None:
place.remove_insect(place.ant)
@property
def ants(self):
return [p.ant for p in self.places.values() if p.ant is not None]
@property
def bees(self):
return [b for p in self.places.values() for b in p.bees]
@property
def insects(self):
return self.ants + self.bees
def __str__(self):
status = ' (Food: {0}, Time: {1})'.format(self.food, self.time)
return str([str(i) for i in self.ants + self.bees]) + status
class AntHomeBase(Place):
"""AntHomeBase at the end of the tunnel, where the queen resides."""
def add_insect(self, insect):
"""Add an Insect to this Place.
Can't actually add Ants to a AntHomeBase. However, if a Bee attempts to
enter the AntHomeBase, a AntsLoseException is raised, signaling the end
of a game.
"""
assert isinstance(insect, Bee), 'Cannot add {0} to AntHomeBase'
raise AntsLoseException()
def ants_win():
"""Signal that Ants win."""
raise AntsWinException()
def ants_lose():
"""Signal that Ants lose."""
raise AntsLoseException()
def ant_types():
"""Return a list of all implemented Ant classes."""
all_ant_types = []
new_types = [Ant]
while new_types:
new_types = [t for c in new_types for t in c.__subclasses__()]
all_ant_types.extend(new_types)
return [t for t in all_ant_types if t.implemented]
class GameOverException(Exception):
"""Base game over Exception."""
pass
class AntsWinException(GameOverException):
"""Exception to signal that the ants win."""
pass
class AntsLoseException(GameOverException):
"""Exception to signal that the ants lose."""
pass
def interactive_strategy(gamestate):
"""A strategy that starts an interactive session and lets the user make
changes to the gamestate.
For example, one might deploy a ThrowerAnt to the first tunnel by invoking
gamestate.deploy_ant('tunnel_0_0', 'Thrower')
"""
print('gamestate: ' + str(gamestate))
msg = '<Control>-D (<Control>-Z <Enter> on Windows) completes a turn.\n'
interact(msg)
###########
# Layouts #
###########
def wet_layout(queen, register_place, tunnels=3, length=9, moat_frequency=3):
"""Register a mix of wet and and dry places."""
for tunnel in range(tunnels):
exit = queen
for step in range(length):
if moat_frequency != 0 and (step + 1) % moat_frequency == 0:
exit = Water('water_{0}_{1}'.format(tunnel, step), exit)
else:
exit = Place('tunnel_{0}_{1}'.format(tunnel, step), exit)
register_place(exit, step == length - 1)
def dry_layout(queen, register_place, tunnels=3, length=9):
"""Register dry tunnels."""
wet_layout(queen, register_place, tunnels, length, 0)
#################
# Assault Plans #
#################
class AssaultPlan(dict):
"""The Bees' plan of attack for the colony. Attacks come in timed waves.
An AssaultPlan is a dictionary from times (int) to waves (list of Bees).
>>> AssaultPlan().add_wave(4, 2)
{4: [Bee(3, None), Bee(3, None)]}
"""
def add_wave(self, bee_type, bee_health, time, count):
"""Add a wave at time with count Bees that have the specified health."""
bees = [bee_type(bee_health) for _ in range(count)]
self.setdefault(time, []).extend(bees)
return self
@property
def all_bees(self):
"""Place all Bees in the beehive and return the list of Bees."""
return [bee for wave in self.values() for bee in wave]
|
the-stack_0_19671 | from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
from prompt_toolkit.application import Application
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.keys import Keys
from prompt_toolkit.styles import Style, merge_styles
from prompt_toolkit.formatted_text import FormattedText
from questionary import utils
from questionary.constants import DEFAULT_QUESTION_PREFIX, DEFAULT_STYLE
from questionary.prompts import common
from questionary.prompts.common import Choice, InquirerControl, Separator
from questionary.question import Question
def checkbox(
message: str,
choices: Sequence[Union[str, Choice, Dict[str, Any]]],
default: Optional[str] = None,
validate: Callable[[List[str]], bool] = lambda a: True,
qmark: str = DEFAULT_QUESTION_PREFIX,
style: Optional[Style] = None,
use_pointer: bool = True,
initial_choice: Optional[Union[str, Choice, Dict[str, Any]]] = None,
**kwargs: Any,
) -> Question:
"""Ask the user to select from a list of items.
This is a multiselect, the user can choose one, none or many of the
items.
Args:
message: Question text
choices: Items shown in the selection, this can contain `Choice` or
or `Separator` objects or simple items as strings. Passing
`Choice` objects, allows you to configure the item more
(e.g. preselecting it or disabling it).
default: Default return value (single value). If you want to preselect
multiple items, use `Choice("foo", checked=True)` instead.
validate: Require the entered value to pass a validation. The
value can not be submitted until the validator accepts
it (e.g. to check minimum password length).
This should be a function accepting the input and
returning a boolean. An optional second return value
is the error message to display.
qmark: Question prefix displayed in front of the question.
By default this is a `?`
style: A custom color and style for the question parts. You can
configure colors as well as font types for different elements.
use_pointer: Flag to enable the pointer in front of the currently
highlighted element.
initial_choice: A value corresponding to a selectable item in the choices,
to initially set the pointer position to.
Returns:
Question: Question instance, ready to be prompted (using `.ask()`).
"""
merged_style = merge_styles(
[
DEFAULT_STYLE,
# Disable the default inverted colours bottom-toolbar behaviour (for
# the error message). However it can be re-enabled with a custom
# style.
Style([("bottom-toolbar", "noreverse")]),
style,
]
)
if not callable(validate):
raise ValueError("validate must be callable")
ic = InquirerControl(
choices, default, use_pointer=use_pointer, initial_choice=initial_choice
)
def get_prompt_tokens() -> List[Tuple[str, str]]:
tokens = []
tokens.append(("class:qmark", qmark))
tokens.append(("class:question", " {} ".format(message)))
if ic.is_answered:
nbr_selected = len(ic.selected_options)
if nbr_selected == 0:
tokens.append(("class:answer", " done"))
elif nbr_selected == 1:
if isinstance(ic.get_selected_values()[0].title, list):
ts = ic.get_selected_values()[0].title
tokens.append(
(
"class:answer",
"".join([token[1] for token in ts]), # type:ignore
)
)
else:
tokens.append(
(
"class:answer",
" [{}]".format(ic.get_selected_values()[0].title),
)
)
else:
tokens.append(
("class:answer", " done ({} selections)".format(nbr_selected))
)
else:
tokens.append(
(
"class:instruction",
" (Use arrow keys to move, "
"<space> to select, "
"<a> to toggle, "
"<i> to invert)",
)
)
return tokens
def get_selected_values() -> List[Any]:
return [c.value for c in ic.get_selected_values()]
def perform_validation(selected_values: List[str]) -> bool:
verdict = validate(selected_values)
if isinstance(verdict, bool):
valid = verdict
error_message = FormattedText(
[("class:validation-toolbar", "Invalid selection")]
)
else:
valid, error_message = verdict
if isinstance(error_message, str):
error_message = FormattedText(
[("class:validation-toolbar", error_message)]
)
ic.error_message = (
error_message if not valid and ic.submission_attempted else None
)
return valid
layout = common.create_inquirer_layout(ic, get_prompt_tokens, **kwargs)
bindings = KeyBindings()
@bindings.add(Keys.ControlQ, eager=True)
@bindings.add(Keys.ControlC, eager=True)
def _(event):
event.app.exit(exception=KeyboardInterrupt, style="class:aborting")
@bindings.add(" ", eager=True)
def toggle(_event):
pointed_choice = ic.get_pointed_at().value
if pointed_choice in ic.selected_options:
ic.selected_options.remove(pointed_choice)
else:
ic.selected_options.append(pointed_choice)
perform_validation(get_selected_values())
@bindings.add("i", eager=True)
def invert(_event):
inverted_selection = [
c.value
for c in ic.choices
if not isinstance(c, Separator)
and c.value not in ic.selected_options
and not c.disabled
]
ic.selected_options = inverted_selection
perform_validation(get_selected_values())
@bindings.add("a", eager=True)
def all(_event):
all_selected = True # all choices have been selected
for c in ic.choices:
if (
not isinstance(c, Separator)
and c.value not in ic.selected_options
and not c.disabled
):
# add missing ones
ic.selected_options.append(c.value)
all_selected = False
if all_selected:
ic.selected_options = []
perform_validation(get_selected_values())
@bindings.add(Keys.Down, eager=True)
@bindings.add("j", eager=True)
def move_cursor_down(_event):
ic.select_next()
while not ic.is_selection_valid():
ic.select_next()
@bindings.add(Keys.Up, eager=True)
@bindings.add("k", eager=True)
def move_cursor_up(_event):
ic.select_previous()
while not ic.is_selection_valid():
ic.select_previous()
@bindings.add(Keys.ControlM, eager=True)
def set_answer(event):
selected_values = get_selected_values()
ic.submission_attempted = True
if perform_validation(selected_values):
ic.is_answered = True
event.app.exit(result=selected_values)
@bindings.add(Keys.Any)
def other(_event):
"""Disallow inserting other text. """
pass
return Question(
Application(
layout=layout,
key_bindings=bindings,
style=merged_style,
**utils.used_kwargs(kwargs, Application.__init__),
)
)
|
the-stack_0_19672 | import argparse
import os
import time
import yaml
import shutil
import warnings
import logging
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as NativeDDP
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.utils
from torchvision import datasets, models, transforms
import autogluon.core as ag
from autotimm.utils.model import save_checkpoint, reduce_tensor, adjust_learning_rate, load_checkpoint
from autotimm.utils.metrics import AverageMeter, accuracy
from autotimm.models.model_zoo import get_model_list
from autotimm.models.network import get_input_size, init_network
from autotimm.data.dataloaders import get_pytorch_train_loader, get_pytorch_val_loader
model_names = get_model_list()
def parse_args():
parser = argparse.ArgumentParser(description='Model-based Asynchronous HPO')
parser.add_argument('--data_name', default="", type=str, help='dataset name')
parser.add_argument('--data_path', default="", type=str, help='path to dataset')
parser.add_argument('--model', metavar='MODEL', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('-j', '--workers', type=int, default=4, metavar='N',
help='how many training processes to use (default: 1)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--image-size', default=None, type=int, help="resolution of image")
parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--lr-schedule', default="step", type=str, metavar="SCHEDULE",
choices=["step", "linear", "cosine"],
help="Type of LR schedule: {}, {}, {}".format("step", "linear", "cosine"),)
parser.add_argument('--warmup', default=0, type=int, metavar="E", help="number of warmup epochs")
parser.add_argument('--label-smoothing', default=0.0, type=float, metavar="S", help="label smoothing")
parser.add_argument('--mixup', default=0.0, type=float, metavar="ALPHA", help="mixup alpha")
parser.add_argument('--optimizer', default="sgd", type=str, choices=("sgd", "rmsprop"))
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('--augmentation', type=str, default=None, choices=[None, "autoaugment", "original-mstd0.5", "rand-m9-n3-mstd0.5", "augmix-m5-w4-d2"],
help="augmentation method",)
parser.add_argument('--log_interval', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--amp', action='store_true', default=False,
help='use NVIDIA Apex AMP or Native AMP for mixed precision training')
parser.add_argument('--apex-amp', action='store_true', default=False,
help='Use NVIDIA Apex AMP mixed precision')
parser.add_argument('--native-amp', action='store_true', default=False,
help='Use Native Torch AMP mixed precision')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--output-dir', default="/home/yiran.wu/work_dirs/pytorch_model_benchmark", type=str,
help='output directory for model and log')
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
args = parser.parse_args()
return args
torch.backends.cudnn.benchmark = True
def train_loop():
opt = parse_args()
task_name = opt.data_name + '-' + opt.model
opt.output_dir = os.path.join(opt.output_dir, task_name)
if not os.path.exists(opt.output_dir):
os.makedirs(opt.output_dir)
_logger = logging.getLogger('')
filehandler = logging.FileHandler(os.path.join(opt.output_dir, 'summary.log'))
streamhandler = logging.StreamHandler()
_logger.setLevel(logging.INFO)
_logger.addHandler(filehandler)
_logger.addHandler(streamhandler)
ngpus_per_node = torch.cuda.device_count()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
input_size = get_input_size(opt.model)
# dataloaders, num_class = load_data(opt.data_path, input_size, batch_size, ngpus_per_node)
train_loader, num_classes = get_pytorch_train_loader(opt.data_path, "train", input_size, opt.batch_size, augmentation = opt.augmentation)
valid_loader, _ = get_pytorch_val_loader(opt.data_path, "val", input_size, opt.batch_size)
test_batch_size = int(opt.batch_size/ max(ngpus_per_node, 1))
test_loader, _ = get_pytorch_val_loader(opt.data_path, "test", input_size, test_batch_size)
# model ddp
opt.distributed = False
if 'WORLD_SIZE' in os.environ:
opt.distributed = int(os.environ['WORLD_SIZE']) > 1
opt.local_rank = int(os.environ["LOCAL_RANK"])
else:
opt.local_rank = 0
opt.device = 'cuda:0'
opt.world_size = 1
opt.rank = 0 # global rank
if opt.distributed:
opt.device = 'cuda:%d' % opt.local_rank
torch.cuda.set_device(opt.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
opt.world_size = torch.distributed.get_world_size()
opt.rank = torch.distributed.get_rank()
_logger.info('Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d.'
% (opt.rank, opt.world_size))
else:
_logger.info('Training with a single process on 1 GPUs.')
assert opt.rank >= 0
# model
model = init_network(opt.model, num_classes, pretrained=opt.pretrained)
# move model to GPU, enable channels last layout if set
model.cuda()
# setup distributed training
if opt.distributed:
if opt.local_rank == 0:
_logger.info("Using native Torch DistributedDataParallel.")
model = NativeDDP(model, device_ids=[opt.local_rank]) # can use device str in Torch >= 1.1
# NOTE: EMA model does not need to be wrapped by DDP
criterion = nn.CrossEntropyLoss().cuda()
# Observe that all parameters are being optimized
optimizer = optim.SGD(model.parameters(), lr=opt.lr, momentum=opt.momentum)
best_acc1 = 0
# Training
def train(epoch, loader, num_classes):
batch_time_m = AverageMeter('Time', ':6.3f')
data_time_m = AverageMeter('Data', ':6.3f')
losses_m = AverageMeter('Loss', ':.4e')
top1_m = AverageMeter('Acc@1', ':6.2f')
top5_m = AverageMeter('Acc@5', ':6.2f')
# loader = dataloaders['train']
lr = adjust_learning_rate(optimizer, epoch, opt)
model.train()
end = time.time()
last_idx = len(loader) - 1
num_updates = epoch * len(loader)
for batch_idx, (inputs, targets) in enumerate(loader):
last_batch = batch_idx == last_idx
data_time_m.update(time.time() - end)
inputs, targets = inputs.to(device), targets.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
acc1, acc5 = accuracy(outputs, targets, topk=(1, min(num_classes, 5)))
top1_m.update(acc1.item(), outputs.size(0))
top5_m.update(acc5.item(), outputs.size(0))
loss.backward()
optimizer.step()
num_updates += 1
batch_time_m.update(time.time() - end)
if last_batch or batch_idx % opt.log_interval == 0:
if opt.distributed:
reduced_loss = reduce_tensor(loss.data)
acc1 = reduce_tensor(acc1)
acc5 = reduce_tensor(acc5)
losses_m.update(reduced_loss.item(), inputs.size(0))
else:
losses_m.update(loss.item(), inputs.size(0))
if opt.local_rank == 0:
_logger.info(
'Train: {} [{:>4d}/{} ({:>3.0f}%)] '
'Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) '
'Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'
'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s '
'({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) '
'LR: {lr:.3e} '
'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format(
epoch,
batch_idx, len(loader),
100. * batch_idx / last_idx,
loss=losses_m,
top1=top1_m,
top5=top5_m,
batch_time=batch_time_m,
rate=inputs.size(0) * opt.world_size / batch_time_m.val,
rate_avg=inputs.size(0) * opt.world_size / batch_time_m.avg,
lr=lr,
data_time=data_time_m))
def val(loader):
batch_time_m = AverageMeter('Time', ':6.3f')
data_time_m = AverageMeter('Data', ':6.3f')
losses_m = AverageMeter('Loss', ':.4e')
top1_m = AverageMeter('Acc@1', ':6.2f')
top5_m = AverageMeter('Acc@5', ':6.2f')
model.eval()
end = time.time()
last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(loader):
last_batch = batch_idx == last_idx
inputs, targets = inputs.to(device), targets.to(device)
outputs = model(inputs)
loss = criterion(outputs, targets)
acc1, acc5 = accuracy(outputs, targets, topk=(1, min(num_classes, 5)))
torch.cuda.synchronize()
if opt.distributed:
reduced_loss = reduce_tensor(loss.data)
losses_m.update(reduced_loss.item(), inputs.size(0))
acc1 = reduce_tensor(acc1)
acc5 = reduce_tensor(acc5)
else:
losses_m.update(loss.item(), inputs.size(0))
top1_m.update(acc1.item(), outputs.size(0))
top5_m.update(acc5.item(), outputs.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
if opt.local_rank == 0 and (last_batch or batch_idx % opt.log_interval == 0):
log_name = 'Val-log'
_logger.info(
'{0}: [{1:>4d}/{2}] '
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) '
'Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(
log_name, batch_idx, last_idx, batch_time=batch_time_m,
loss=losses_m, top1=top1_m, top5=top5_m))
val_acc1 = top1_m.avg
return val_acc1
def test(loader):
batch_time_m = AverageMeter('Time', ':6.3f')
data_time_m = AverageMeter('Data', ':6.3f')
losses_m = AverageMeter('Loss', ':.4e')
top1_m = AverageMeter('Acc@1', ':6.2f')
top5_m = AverageMeter('Acc@5', ':6.2f')
model.eval()
end = time.time()
last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(loader):
last_batch = batch_idx == last_idx
inputs, targets = inputs.to(device), targets.to(device)
outputs = model(inputs)
loss = criterion(outputs, targets)
acc1, acc5 = accuracy(outputs, targets, topk=(1, min(num_classes, 5)))
torch.cuda.synchronize()
if opt.distributed:
reduced_loss = reduce_tensor(loss.data)
losses_m.update(reduced_loss.item(), inputs.size(0))
acc1 = reduce_tensor(acc1)
acc5 = reduce_tensor(acc5)
else:
losses_m.update(loss.item(), inputs.size(0))
top1_m.update(acc1.item(), outputs.size(0))
top5_m.update(acc5.item(), outputs.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
if opt.local_rank == 0 and (last_batch or batch_idx % opt.log_interval == 0):
log_name = 'Test-log'
_logger.info(
'{0}: [{1:>4d}/{2}] '
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) '
'Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) '
'Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format(
log_name, batch_idx, last_idx, batch_time=batch_time_m,
loss=losses_m, top1=top1_m, top5=top5_m))
_logger.info("The top 1 test accuracy of best model is {:>7.4f}".format(top1_m.avg))
for epoch in range(0, opt.epochs):
train(epoch, train_loader, num_classes)
val_acc1 = val(valid_loader)
is_best = val_acc1 > best_acc1
best_acc1 = max(val_acc1, best_acc1)
if not opt.multiprocessing_distributed or (opt.multiprocessing_distributed
and opt.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.module.state_dict() if hasattr(model, 'module') else model.state_dict(),
'best_acc1': best_acc1,
'optimizer': optimizer.state_dict(),
}, is_best, checkpoint_dir = opt.output_dir)
print("Training and validation finished!\n")
print("Testing start now!\n")
# print(list(model.modules()))
model = init_network(opt.model, num_classes, pretrained=opt.pretrained)
# move model to GPU, enable channels last layout if set
model.cuda()
best_ckpt_path = os.path.join(opt.output_dir, 'model_best.pth.tar')
load_checkpoint(model, best_ckpt_path)
test(test_loader)
# @ag.args(
# learning_rate=ag.space.Real(lower=1e-6, upper=1, log=True),
# momentum=ag.space.Real(lower=0.88, upper=0.9),
# batch_size=ag.space.Int(lower=128, upper=256),
# epochs=10,
# )
# def train_finetune(args, reporter):
# return train_loop(args, reporter)
if __name__ == '__main__':
# myscheduler = ag.scheduler.FIFOScheduler(train_finetune,
# resource={'num_cpus': 32, 'num_gpus': 4},
# checkpoint='checkpoint',
# num_trials=2,
# time_attr='epoch',
# reward_attr="accuracy")
# # Run experiment
# myscheduler.run()
# myscheduler.join_jobs()
# myscheduler.get_training_curves(plot=True, use_legend=False)
# print('The Best Configuration and Accuracy are: {}, {}'.format(myscheduler.get_best_config(),
# myscheduler.get_best_reward()))
train_loop()
|
the-stack_0_19674 | # Authors: Pierre Ablin <[email protected]>
# Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
import warnings
from itertools import product
import numpy as np
from numpy.testing import assert_allclose
from picard import picard, permute, amari_distance
from picard.densities import Tanh, Exp, Cube, check_density
def test_dimension_reduction():
N, T = 5, 1000
n_components = 3
rng = np.random.RandomState(42)
S = rng.laplace(size=(N, T))
A = rng.randn(N, N)
X = np.dot(A, S)
K, W, Y = picard(X, n_components=n_components, ortho=False,
random_state=rng, max_iter=2)
assert K.shape == (n_components, N)
assert W.shape == (n_components, n_components)
assert Y.shape, (n_components, T)
with warnings.catch_warnings(record=True) as w:
K, W, Y = picard(X, n_components=n_components, ortho=False,
whiten=False, max_iter=1)
assert len(w) == 2
def test_dots():
N, T = 5, 100
rng = np.random.RandomState(42)
S = rng.laplace(size=(N, T))
A = rng.randn(N, N)
X = np.dot(A, S)
n_components = [N, 3]
tf = [False, True]
w_inits = [None, 'id']
for n_component, ortho, whiten, w_init in product(n_components, tf, tf,
w_inits):
if w_init == 'id':
if whiten:
w_init = np.eye(n_component)
else:
w_init = np.eye(N)
with warnings.catch_warnings(record=True):
K, W, Y, X_mean = picard(X, ortho=ortho, whiten=whiten,
return_X_mean=True, w_init=w_init,
n_components=n_component,
random_state=rng, max_iter=2,
verbose=False)
if not whiten:
K = np.eye(N)
if ortho and whiten:
assert_allclose(Y.dot(Y.T) / T, np.eye(n_component), atol=1e-8)
Y_prime = np.dot(W, K).dot(X - X_mean[:, None])
assert_allclose(Y, Y_prime, atol=1e-7)
def test_pre_fastica():
N, T = 3, 1000
rng = np.random.RandomState(42)
names = ['tanh', 'cube']
for j, fun in enumerate([Tanh(params=dict(alpha=0.5)), 'cube']):
if j == 0:
S = rng.laplace(size=(N, T))
else:
S = rng.uniform(low=-1, high=1, size=(N, T))
A = rng.randn(N, N)
X = np.dot(A, S)
K, W, Y = picard(X, fun=fun, ortho=False, random_state=0,
fastica_it=10)
if fun == 'tanh':
fun = Tanh()
elif fun == 'exp':
fun = Exp()
elif fun == 'cube':
fun = Cube()
# Get the final gradient norm
psiY = fun.score_and_der(Y)[0]
G = np.inner(psiY, Y) / float(T) - np.eye(N)
err_msg = 'fun %s, gradient norm greater than tol' % names[j]
assert_allclose(G, np.zeros((N, N)), atol=1e-7,
err_msg=err_msg)
assert Y.shape == X.shape
assert W.shape == A.shape
assert K.shape == A.shape
WA = W.dot(K).dot(A)
WA = permute(WA) # Permute and scale
err_msg = 'fun %s, wrong unmixing matrix' % names[j]
assert_allclose(WA, np.eye(N), rtol=0, atol=1e-1,
err_msg=err_msg)
def test_picard():
N, T = 3, 1000
rng = np.random.RandomState(42)
names = ['tanh', 'cube']
for j, fun in enumerate([Tanh(params=dict(alpha=0.5)), 'cube']):
if j == 0:
S = rng.laplace(size=(N, T))
else:
S = rng.uniform(low=-1, high=1, size=(N, T))
A = rng.randn(N, N)
X = np.dot(A, S)
K, W, Y = picard(X, fun=fun, ortho=False, random_state=0)
if fun == 'tanh':
fun = Tanh()
elif fun == 'exp':
fun = Exp()
elif fun == 'cube':
fun = Cube()
# Get the final gradient norm
psiY = fun.score_and_der(Y)[0]
G = np.inner(psiY, Y) / float(T) - np.eye(N)
err_msg = 'fun %s, gradient norm greater than tol' % names[j]
assert_allclose(G, np.zeros((N, N)), atol=1e-7,
err_msg=err_msg)
assert Y.shape == X.shape
assert W.shape == A.shape
assert K.shape == A.shape
WA = W.dot(K).dot(A)
WA = permute(WA) # Permute and scale
err_msg = 'fun %s, wrong unmixing matrix' % names[j]
assert_allclose(WA, np.eye(N), rtol=0, atol=1e-1,
err_msg=err_msg)
def test_extended():
N, T = 4, 2000
n = N // 2
rng = np.random.RandomState(42)
S = np.concatenate((rng.laplace(size=(n, T)),
rng.uniform(low=-1, high=1, size=(n, T))),
axis=0)
print(S.shape)
A = rng.randn(N, N)
X = np.dot(A, S)
K, W, Y = picard(X, ortho=False, random_state=0,
extended=True)
assert Y.shape == X.shape
assert W.shape == A.shape
assert K.shape == A.shape
WA = W.dot(K).dot(A)
WA = permute(WA) # Permute and scale
err_msg = 'wrong unmixing matrix'
assert_allclose(WA, np.eye(N), rtol=0, atol=1e-1,
err_msg=err_msg)
def test_shift():
N, T = 5, 1000
rng = np.random.RandomState(42)
S = rng.laplace(size=(N, T))
A = rng.randn(N, N)
offset = rng.randn(N)
X = np.dot(A, S) + offset[:, None]
_, W, Y, X_mean = picard(X, ortho=False, whiten=False,
return_X_mean=True, random_state=rng)
assert_allclose(offset, X_mean, rtol=0, atol=0.2)
WA = W.dot(A)
WA = permute(WA)
assert_allclose(WA, np.eye(N), rtol=0, atol=0.2)
_, W, Y, X_mean = picard(X, ortho=False, whiten=False,
centering=False, return_X_mean=True,
random_state=rng)
assert_allclose(X_mean, 0)
def test_picardo():
N, T = 3, 2000
rng = np.random.RandomState(4)
S = rng.laplace(size=(N, T))
A = rng.randn(N, N)
X = np.dot(A, S)
names = ['tanh', 'exp', 'cube']
for fastica_it in [None, 2]:
for fun in names:
print(fun)
K, W, Y = picard(X, fun=fun, ortho=True, random_state=rng,
fastica_it=fastica_it, verbose=True,
extended=True)
if fun == 'tanh':
fun = Tanh()
elif fun == 'exp':
fun = Exp()
elif fun == 'cube':
fun = Cube()
# Get the final gradient norm
psiY = fun.score_and_der(Y)[0]
G = np.inner(psiY, Y) / float(T) - np.eye(N)
G = (G - G.T) / 2. # take skew-symmetric part
err_msg = 'fun %s, gradient norm greater than tol' % fun
assert_allclose(G, np.zeros((N, N)), atol=1e-7,
err_msg=err_msg)
assert Y.shape == X.shape
assert W.shape == A.shape
assert K.shape == A.shape
WA = W.dot(K).dot(A)
WA = permute(WA) # Permute and scale
err_msg = 'fun %s, wrong unmixing matrix' % fun
assert_allclose(WA, np.eye(N), rtol=0, atol=0.1,
err_msg=err_msg)
def test_bad_custom_density():
class CustomDensity(object):
def log_lik(self, Y):
return Y ** 4 / 4
def score_and_der(self, Y):
return Y ** 3, 3 * Y ** 2 + 2.
fun = CustomDensity()
X = np.random.randn(2, 10)
try:
picard(X, fun=fun, random_state=0)
except AssertionError:
pass
else:
raise(AssertionError, 'Bad function undetected')
def test_fun():
for fun in [Tanh(), Exp(), Cube()]:
check_density(fun)
def test_no_regression():
n_tests = 10
baseline = {}
baseline['lap', True] = 17.
baseline['lap', False] = 23.
baseline['gauss', True] = 58.
baseline['gauss', False] = 60.
N, T = 10, 1000
for mode in ['lap', 'gauss']:
for ortho in [True, False]:
n_iters = []
for i in range(n_tests):
rng = np.random.RandomState(i)
if mode == 'lap':
S = rng.laplace(size=(N, T))
else:
S = rng.randn(N, T)
A = rng.randn(N, N)
X = np.dot(A, S)
_, _, _, n_iter = picard(X, return_n_iter=True,
ortho=ortho, random_state=rng)
n_iters.append(n_iter)
n_mean = np.mean(n_iters)
nb_mean = baseline[mode, ortho]
err_msg = 'mode=%s, ortho=%s. %d iterations, expecting <%d.'
assert n_mean < nb_mean, err_msg % (mode, ortho, n_mean, nb_mean)
def test_amari_distance():
p = 3
rng = np.random.RandomState(0)
A = rng.randn(p, p)
W = np.linalg.pinv(A)
scale = rng.randn(p)
perm = np.argsort(rng.randn(p))
W = W[perm]
W *= scale[:, None]
assert amari_distance(W, A) < 1e-6
|
the-stack_0_19675 | import os
import random
os.environ['THEANO_FLAGS'] = 'floatX=float32,device=cpu'
os.environ['CUDA_VISIBLE_DEVICES'] = ''
import argparse
import sys
from multiprocessing import cpu_count
from rllab.misc.instrument import run_experiment_lite
from rllab.misc.instrument import VariantGenerator
from rllab import config
from curriculum.experiments.starts.maze.maze_ant.maze_ant_online_tscl_algo import run_task
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--ec2', '-e', action='store_true', default=False, help="add flag to run in ec2")
parser.add_argument('--clone', '-c', action='store_true', default=False,
help="add flag to copy file and checkout current")
parser.add_argument('--local_docker', '-d', action='store_true', default=False,
help="add flag to run in local dock")
parser.add_argument('--type', '-t', type=str, default='', help='set instance type')
parser.add_argument('--price', '-p', type=str, default='', help='set betting price')
parser.add_argument('--subnet', '-sn', type=str, default='', help='set subnet like us-west-1a')
parser.add_argument('--name', '-n', type=str, default='', help='set exp prefix name and new file name')
parser.add_argument('--debug', action='store_true', default=False, help="run code without multiprocessing")
args = parser.parse_args()
# subnets = [
# 'us-east-2b', 'us-east-2c', 'us-east-2a',
# ]
subnets = [
'ap-northeast-2c', 'ap-northeast-2a', 'ap-southeast-1b'
]
# subnets = [
# 'ap-northeast-2a', 'ap-northeast-2c', 'us-east-2b', 'ap-south-1a', 'us-east-2c', 'us-east-2a', 'ap-south-1b',
# 'us-east-1b', 'us-east-1a', 'us-east-1d', 'us-east-1e', 'eu-west-1c', 'eu-west-1a', 'eu-west-1b'
# ]
ec2_instance = 'c4.4xlarge'
# configure instan
info = config.INSTANCE_TYPE_INFO[ec2_instance]
config.AWS_INSTANCE_TYPE = ec2_instance
# config.AWS_SPOT_PRICE = str(info["price"])
config.AWS_SPOT_PRICE = '1.1'
n_parallel = int(info["vCPU"] / 2) # make the default 4 if not using ec2
args.ec2=False
if args.ec2:
mode = 'ec2'
elif args.local_docker:
mode = 'local_docker'
n_parallel = cpu_count() if not args.debug else 1
else:
mode = 'local'
n_parallel = cpu_count() if not args.debug else 1
# n_parallel = multiprocessing.cpu_count()
vg = VariantGenerator()
vg.add('maze_id', [0]) # default is 0
vg.add('start_size', [15]) # this is the ultimate start we care about: getting the pendulum upright
vg.add('start_goal', [[0, 4, 0.55, 1, 0, 0, 0, 0, 1, 0, -1, 0, -1, 0, 1,]])
vg.add('start_range',
lambda maze_id: [4] if maze_id == 0 else [7]) # this will be used also as bound of the state_space
# vg.add('start_center', lambda maze_id: [(2, 2)] if maze_id == 0 else [(0, 0)])
vg.add('start_center', lambda maze_id, start_size: [(2, 2)] if maze_id == 0 and start_size == 2
else [(2, 2, 0, 0)] if maze_id == 0 and start_size == 4
else [(0, 0)] if start_size == 2
else [(0, 0, 0, 0)])
vg.add('ultimate_goal', lambda maze_id: [(0, 4)] if maze_id == 0 else [(2, 4), (0, 0)] if maze_id == 12 else [(4, 4)])
vg.add('goal_size', [2]) # this is the ultimate goal we care about: getting the pendulum upright
vg.add('goal_range',
lambda maze_id: [4] if maze_id == 0 else [7])
vg.add('goal_center', lambda maze_id: [(2, 2)] if maze_id == 0 else [(0, 0)])
vg.add('terminal_eps', [1.0])
# brownian params
vg.add('brownian_variance', [1])
vg.add('initial_brownian_horizon', [200])
vg.add('brownian_horizon', [50])
vg.add('baseline', ["MLP"])
# goal-algo params
vg.add('min_reward', [0.1])
vg.add('max_reward', [0.9])
vg.add('distance_metric', ['L2'])
vg.add('extend_dist_rew', [False]) # !!!!
vg.add('inner_weight', [0]) #TODO: try different inner weights
vg.add('goal_weight', lambda inner_weight: [1000] if inner_weight > 0 else [1])
vg.add('regularize_starts', [0])
vg.add('persistence', [1])
vg.add('n_traj', [3]) # only for labeling and plotting (for now, later it will have to be equal to persistence!)
vg.add('filter_bad_starts', [False])
vg.add('sampling_res', [2])
vg.add('with_replacement', [True])
# replay buffer
vg.add('replay_buffer', [True])
vg.add('coll_eps', [0.05]) # should try this
vg.add('num_new_starts', [200])
vg.add('num_old_starts', [100])
vg.add('feasibility_path_length', [100])
# sampling params
vg.add('horizon', lambda maze_id: [2000] if maze_id == 0 else [500]) #TODO: change
vg.add('outer_iters', lambda maze_id: [2000] if maze_id == 0 else [1000])
vg.add('inner_iters', [5]) # again we will have to divide/adjust the
vg.add('pg_batch_size', [60000]) #TODO: change
# policy initialization
vg.add('output_gain', [0.1])
vg.add('policy_init_std', [1])
vg.add('learn_std', [False]) #2
vg.add('adaptive_std', [False])
vg.add('discount', [0.995]) #1
vg.add('seed_with', ['only_goods'])
#vg.add('seed_with', ['all_previous'])
# vg.add('seed', [2,3,4])
vg.add('seed', [43, 13, 23, 33, 53, 63, 73])
# vg.add('seed', range(100, 600, 100))
# sweeping: horizon, seed, feasibility_path_length, pg_batch_size
# possible important: learn_std
# Launching
subnets = [
"us-west-2a","us-west-2b", 'us-west-2c',
]
# mode = 'ec2'
mode = "local"
exp_prefix = 'ant-startgen-online2'
print("\n" + "**********" * 10 + "\nexp_prefix: {}\nvariants: {}".format(exp_prefix, vg.size))
for vv in vg.variants():
# import pdb; pdb.set_trace()
print('Running on type {}, with price {}, parallel {} on the subnets: '.format(config.AWS_INSTANCE_TYPE,
config.AWS_SPOT_PRICE,
n_parallel),
*subnets)
if mode in ['ec2', 'local_docker']:
subnet = random.choice(subnets)
config.AWS_REGION_NAME = subnet[:-1]
config.AWS_KEY_NAME = config.ALL_REGION_AWS_KEY_NAMES[
config.AWS_REGION_NAME]
config.AWS_IMAGE_ID = config.ALL_REGION_AWS_IMAGE_IDS[
config.AWS_REGION_NAME]
config.AWS_SECURITY_GROUP_IDS = \
config.ALL_REGION_AWS_SECURITY_GROUP_IDS[
config.AWS_REGION_NAME]
config.AWS_NETWORK_INTERFACES = [
dict(
SubnetId=config.ALL_SUBNET_INFO[subnet]["SubnetID"],
Groups=config.AWS_SECURITY_GROUP_IDS,
DeviceIndex=0,
AssociatePublicIpAddress=True,
)
]
run_experiment_lite(
# use_cloudpickle=False,
stub_method_call=run_task,
variant=vv,
mode=mode,
# Number of parallel workers for sampling
n_parallel=n_parallel,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="last",
seed=vv['seed'],
# plot=True,
exp_prefix=exp_prefix,
# exp_name=exp_name,
# for sync the pkl file also during the training
sync_s3_pkl=True,
# sync_s3_png=True,
sync_s3_html=True,
# # use this ONLY with ec2 or local_docker!!!
pre_commands=[
'export MPLBACKEND=Agg',
'pip install --upgrade pip',
'pip install --upgrade -I tensorflow',
'pip install git+https://github.com/tflearn/tflearn.git',
'pip install dominate',
'pip install multiprocessing_on_dill',
'pip install scikit-image',
'conda install numpy -n rllab3 -y',
],
# terminate_machine=False,
)
# sys.exit()
if mode == 'local_docker':
sys.exit()
else:
# run_task(vv)
run_experiment_lite(
# use_cloudpickle=False,
stub_method_call=run_task,
variant=vv,
mode='local',
n_parallel=5,
# Only keep the snapshot parameters for the last iteration
snapshot_mode="last",
seed=vv['seed'],
exp_prefix=exp_prefix,
# exp_name=exp_name,
)
sys.exit()
|
the-stack_0_19677 | # -*- coding: utf-8 -*-
"""
bromelia._internal_utils
~~~~~~~~~~~~~~~~~~~~~~~~
Defines utility functions that are consumed internally by the library.
:copyright: (c) 2020-present Henrique Marques Ribeiro.
:license: MIT, see LICENSE for more details.
"""
import ipaddress
import logging
import os
import struct
import yaml
from collections import namedtuple
from .exceptions import InvalidConfigKey
from .exceptions import InvalidConfigValue
from .definitions import diameter_application_ids
from .definitions import diameter_avps
from .definitions import diameter_command_codes
def convert_to_1_byte(content):
return struct.pack(">B", content)
def convert_to_2_bytes(content):
return struct.pack(">H", content)
def convert_to_3_bytes(content):
return content.to_bytes(3, byteorder="big")
def convert_to_4_bytes(content):
return struct.pack(">L", content)
def convert_to_8_bytes(content):
return struct.pack(">q", content)
def convert_to_integer_from_bytes(integer):
return int.from_bytes(integer, byteorder="big")
def header_representation(header):
cmd_code = header.command_code
application_id = header.application_id
cmd_code_str, cmd_code_int = command_code_look_up(cmd_code)
flag_representation = ""
if header.is_request():
flag_representation += " REQ|"
cmd_code_str = cmd_code_str[:3]
else:
flag_representation += " "
cmd_code_str = cmd_code_str[4:]
if header.is_proxiable():
flag_representation += "PXY|"
if header.is_error():
flag_representation += "ERR|"
application_id = header.application_id
app_id_str, app_id_int = application_id_look_up(application_id)
return {
"cmd_code_str": cmd_code_str,
"cmd_code_int": cmd_code_int,
"flag_representation": flag_representation[:-1],
"app_id_str": app_id_str,
"app_id_int": app_id_int
}
def application_id_look_up(application_id):
if not application_id:
return "", "Unknown"
for application in diameter_application_ids:
if application["id"] == convert_to_integer_from_bytes(application_id):
return application["long_name"], application["id"]
return "", "Unknown"
def command_code_look_up(command_code):
if not command_code:
return "", "Unknown"
for code in diameter_command_codes:
if code["id"] == convert_to_integer_from_bytes(command_code):
return code["short_name"], code["id"]
return "", "Unknown"
def avp_look_up(avp):
if not avp.get_vendor_id():
if avp.get_code() == 0:
return "Unknown"
for diameter_avp in diameter_avps:
if diameter_avp["id"] == avp.get_code():
return diameter_avp["name"]
return "Unknown"
def _convert_config_to_connection_obj(config):
LocalNode = namedtuple("LocalNode", [
"host_name",
"realm",
"ip_address",
"port"
]
)
PeerNode = namedtuple("PeerNode", [
"host_name",
"realm",
"ip_address",
"port"
]
)
Connection = namedtuple("Connection", [
"name",
"mode",
"transport_type",
"local_node",
"peer_node",
"application_ids",
"watchdog_timeout"
]
)
config_mask = [
"MODE",
"TRANSPORT_TYPE",
"APPLICATIONS",
"LOCAL_NODE_HOSTNAME",
"LOCAL_NODE_REALM",
"LOCAL_NODE_IP_ADDRESS",
"LOCAL_NODE_PORT",
"PEER_NODE_HOSTNAME",
"PEER_NODE_REALM",
"PEER_NODE_IP_ADDRESS",
"PEER_NODE_PORT",
"WATCHDOG_TIMEOUT"
]
for key in config.keys():
if key not in config_mask:
raise InvalidConfigKey(f"Invalid config key '{key}' found")
for key, value in config.items():
if key == "MODE":
if value not in ["CLIENT", "SERVER"]:
raise InvalidConfigValue("Invalid config value '{value}' "\
f"found for config key '{key}'. It MUST be "\
"either 'CLIENT' or 'SERVER'")
mode = value
elif key == "TRANSPORT_TYPE":
if value not in ["TCP", "SCTP"]:
raise InvalidConfigValue("Invalid config value '{value}' "\
f"found for config key '{key}'. It MUST be "\
"either 'TCP' or 'SCTP'")
transport_type = value
elif key == "APPLICATIONS":
if value:
for app in value:
app_keys = app.keys()
if not [key for key in app_keys if key in ["vendor_id", "app_id"]]:
raise InvalidConfigValue("Invalid config value "\
f"found for config key '{key}'. It "\
"MUST be a dictionary with "\
"'vendor_id' and 'app_id' keys")
for key in app_keys:
if not isinstance(app[key], bytes):
raise InvalidConfigValue("Invalid config value "\
f"'{value}' found for config key "\
f"'{key}'. It MUST be a "\
"dictionary with byte value in "\
"each key")
application_ids = value
elif key == "LOCAL_NODE_HOSTNAME":
local_node_host_name = value
elif key == "LOCAL_NODE_REALM":
local_node_realm = value
elif key == "LOCAL_NODE_IP_ADDRESS":
try:
ipaddress.IPv4Address(value)
local_node_ip_address = value
except ipaddress.AddressValueError:
raise InvalidConfigValue(f"Invalid config value '{value}' "\
f"found for config key '{key}'. It MUST "\
"correspond to a valid IPv4 address format")
elif key == "LOCAL_NODE_PORT":
local_node_port = value
elif key == "PEER_NODE_HOSTNAME":
peer_node_host_name = value
elif key == "PEER_NODE_REALM":
peer_node_realm = value
elif key == "PEER_NODE_IP_ADDRESS":
try:
ipaddress.IPv4Address(value)
peer_node_ip_address = value
except ipaddress.AddressValueError:
raise InvalidConfigValue(f"Invalid config value '{value}' "\
f"found for config key '{key}'. It MUST "\
"correspond to a valid IPv4 address format")
elif key == "PEER_NODE_PORT":
peer_node_port = value
elif key == "WATCHDOG_TIMEOUT":
if not isinstance(value, int):
raise InvalidConfigValue(f"Invalid config value '{value}' "\
f"found for config key '{key}'. It MUST be "\
"'int'")
watchdog_timeout = value
local_node = LocalNode(host_name=local_node_host_name,
realm=local_node_realm,
ip_address=local_node_ip_address,
port=local_node_port
)
peer_node = PeerNode(host_name=peer_node_host_name,
realm=peer_node_realm,
ip_address=peer_node_ip_address,
port=peer_node_port
)
connection = Connection(name="bromelia",
mode=mode,
transport_type=transport_type,
application_ids=application_ids,
local_node=local_node,
peer_node=peer_node,
watchdog_timeout=watchdog_timeout)
return connection
def _convert_file_to_config(filepath=None, variables_dictionary=globals()):
if not filepath:
filepath = os.path.join(os.getcwd(), "config.yaml")
try:
if os.path.exists(filepath):
config_file = open(filepath, "r")
from_config_file = yaml.load(config_file, Loader=yaml.FullLoader)
except Exception as e:
logging.exception(f"_convert_file_to_config - exception: {e}")
if from_config_file["api_version"] != "v1":
raise
configs = list()
for spec in from_config_file["spec"]:
for application in spec["applications"]:
vendor_id = application["vendor_id"]
app_id = application["app_id"]
application["vendor_id"] = variables_dictionary[vendor_id]
application["app_id"] = variables_dictionary[app_id]
configs.append({
"MODE": spec["mode"].upper(),
"TRANSPORT_TYPE": spec["transport_type"].upper(),
"APPLICATIONS": spec["applications"],
"LOCAL_NODE_HOSTNAME": spec["local"]["hostname"],
"LOCAL_NODE_REALM": spec["local"]["realm"],
"LOCAL_NODE_IP_ADDRESS": spec["local"]["ip_address"],
"LOCAL_NODE_PORT": spec["local"]["port"],
"PEER_NODE_HOSTNAME": spec["peer"]["hostname"],
"PEER_NODE_REALM": spec["peer"]["realm"],
"PEER_NODE_IP_ADDRESS": spec["peer"]["ip_address"],
"PEER_NODE_PORT": spec["peer"]["port"],
"WATCHDOG_TIMEOUT": 60
})
return configs
def get_app_ids(apps):
text = ""
for index, app in enumerate(apps):
app_name = application_id_look_up(app['app_id'])[0]
text += f"{app_name};"
if index == (len(apps) - 1):
return text[:-1]
|
the-stack_0_19678 | # -*- coding: utf-8 -*-
import sys
import os
import random
import cv2
from django.http import HttpResponse
from config import config
import time as t
import caffe
import numpy as np
root = config.Config.root
caffe_root = config.Config.caffe_root
sys.path.insert(0, caffe_root + 'python')
# GPU加速
caffe.set_device(0)
caffe.set_mode_gpu()
def bbreg(boundingbox, reg):
reg = reg.T
# calibrate bouding boxes
if reg.shape[1] == 1:
# print("reshape of reg")
pass # reshape of reg
w = boundingbox[:, 2] - boundingbox[:, 0] + 1
h = boundingbox[:, 3] - boundingbox[:, 1] + 1
bb0 = boundingbox[:, 0] + reg[:, 0] * w
bb1 = boundingbox[:, 1] + reg[:, 1] * h
bb2 = boundingbox[:, 2] + reg[:, 2] * w
bb3 = boundingbox[:, 3] + reg[:, 3] * h
boundingbox[:, 0:4] = np.array([bb0, bb1, bb2, bb3]).T
# print("bb", boundingbox)
return boundingbox
def pad(boxesA, w, h):
boxes = boxesA.copy() # shit, value parameter!!!
# print('#################')
# print('boxes', boxes)
# print('w,h', w, h)
tmph = boxes[:, 3] - boxes[:, 1] + 1
tmpw = boxes[:, 2] - boxes[:, 0] + 1
numbox = boxes.shape[0]
# print('tmph', tmph)
# print('tmpw', tmpw)
dx = np.ones(numbox)
dy = np.ones(numbox)
edx = tmpw
edy = tmph
x = boxes[:, 0:1][:, 0]
y = boxes[:, 1:2][:, 0]
ex = boxes[:, 2:3][:, 0]
ey = boxes[:, 3:4][:, 0]
tmp = np.where(ex > w)[0]
if tmp.shape[0] != 0:
edx[tmp] = -ex[tmp] + w - 1 + tmpw[tmp]
ex[tmp] = w - 1
tmp = np.where(ey > h)[0]
if tmp.shape[0] != 0:
edy[tmp] = -ey[tmp] + h - 1 + tmph[tmp]
ey[tmp] = h - 1
tmp = np.where(x < 1)[0]
if tmp.shape[0] != 0:
dx[tmp] = 2 - x[tmp]
x[tmp] = np.ones_like(x[tmp])
tmp = np.where(y < 1)[0]
if tmp.shape[0] != 0:
dy[tmp] = 2 - y[tmp]
y[tmp] = np.ones_like(y[tmp])
# for python index from 0, while matlab from 1
dy = np.maximum(0, dy - 1)
dx = np.maximum(0, dx - 1)
y = np.maximum(0, y - 1)
x = np.maximum(0, x - 1)
edy = np.maximum(0, edy - 1)
edx = np.maximum(0, edx - 1)
ey = np.maximum(0, ey - 1)
ex = np.maximum(0, ex - 1)
# print("dy" ,dy )
# print("dx" ,dx )
# print("y " ,y )
# print("x " ,x )
# print("edy" ,edy)
# print("edx" ,edx)
# print("ey" ,ey )
# print("ex" ,ex )
# print('boxes', boxes)
return [dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph]
def rerec(bboxA):
# convert bboxA to square
w = bboxA[:, 2] - bboxA[:, 0]
h = bboxA[:, 3] - bboxA[:, 1]
l = np.maximum(w, h).T
# print('bboxA', bboxA)
# print('w', w)
# print('h', h)
# print('l', l)
bboxA[:, 0] = bboxA[:, 0] + w * 0.5 - l * 0.5
bboxA[:, 1] = bboxA[:, 1] + h * 0.5 - l * 0.5
bboxA[:, 2:4] = bboxA[:, 0:2] + np.repeat([l], 2, axis=0).T
return bboxA
def nms(boxes, threshold, type):
"""nms
:boxes: [:,0:5]
:threshold: 0.5 like
:type: 'Min' or others
:returns: TODO
"""
if boxes.shape[0] == 0:
return np.array([])
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
s = boxes[:, 4]
area = np.multiply(x2 - x1 + 1, y2 - y1 + 1)
I = np.array(s.argsort()) # read s using I
pick = [];
while len(I) > 0:
xx1 = np.maximum(x1[I[-1]], x1[I[0:-1]])
yy1 = np.maximum(y1[I[-1]], y1[I[0:-1]])
xx2 = np.minimum(x2[I[-1]], x2[I[0:-1]])
yy2 = np.minimum(y2[I[-1]], y2[I[0:-1]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
if type == 'Min':
o = inter / np.minimum(area[I[-1]], area[I[0:-1]])
else:
o = inter / (area[I[-1]] + area[I[0:-1]] - inter)
pick.append(I[-1])
I = I[np.where(o <= threshold)[0]]
return pick
def generateBoundingBox(map, reg, scale, t):
stride = 2
cellsize = 12
map = map.T
dx1 = reg[0, :, :].T
dy1 = reg[1, :, :].T
dx2 = reg[2, :, :].T
dy2 = reg[3, :, :].T
(x, y) = np.where(map >= t)
yy = y
xx = x
'''
if y.shape[0] == 1: # only one point exceed threshold
y = y.T
x = x.T
score = map[x,y].T
dx1 = dx1.T
dy1 = dy1.T
dx2 = dx2.T
dy2 = dy2.T
# a little stange, when there is only one bb created by PNet
#print("1: x,y", x,y)
a = (x*map.shape[1]) + (y+1)
x = a/map.shape[0]
y = a%map.shape[0] - 1
#print("2: x,y", x,y)
else:
score = map[x,y]
'''
# print("dx1.shape", dx1.shape)
# print('map.shape', map.shape)
score = map[x, y]
reg = np.array([dx1[x, y], dy1[x, y], dx2[x, y], dy2[x, y]])
if reg.shape[0] == 0:
pass
boundingbox = np.array([yy, xx]).T
bb1 = np.fix((stride * (boundingbox) + 1) / scale).T # matlab index from 1, so with "boundingbox-1"
bb2 = np.fix((stride * (boundingbox) + cellsize - 1 + 1) / scale).T # while python don't have to
score = np.array([score])
boundingbox_out = np.concatenate((bb1, bb2, score, reg), axis=0)
# print('(x,y)',x,y)
# print('score', score)
# print('reg', reg)
return boundingbox_out.T
def drawBoxes(im, boxes):
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
for i in range(x1.shape[0]):
cv2.rectangle(im, (int(x1[i]), int(y1[i])), (int(x2[i]), int(y2[i])), (0, 255, 0), 1)
return im
from time import time
_tstart_stack = []
def tic():
_tstart_stack.append(time())
def toc(fmt="Elapsed: %s s"):
print(fmt % (time() - _tstart_stack.pop()))
def detect_face(img, minsize, PNet, RNet, ONet, threshold, fastresize, factor):
img2 = img.copy()
factor_count = 0
total_boxes = np.zeros((0, 9), np.float)
points = []
h = img.shape[0]
w = img.shape[1]
minl = min(h, w)
img = img.astype(float)
m = 12.0 / minsize
minl = minl * m
# total_boxes = np.load('total_boxes.npy')
# total_boxes = np.load('total_boxes_242.npy')
# total_boxes = np.load('total_boxes_101.npy')
# create scale pyramid
scales = []
while minl >= 12:
scales.append(m * pow(factor, factor_count))
minl *= factor
factor_count += 1
# first stage
for scale in scales:
hs = int(np.ceil(h * scale))
ws = int(np.ceil(w * scale))
if fastresize:
im_data = (img - 127.5) * 0.0078125 # [0,255] -> [-1,1]
im_data = cv2.resize(im_data, (ws, hs)) # default is bilinear
else:
im_data = cv2.resize(img, (ws, hs)) # default is bilinear
im_data = (im_data - 127.5) * 0.0078125 # [0,255] -> [-1,1]
# im_data = imResample(img, hs, ws); print("scale:", scale)
im_data = np.swapaxes(im_data, 0, 2)
im_data = np.array([im_data], dtype=np.float)
PNet.blobs['data'].reshape(1, 3, ws, hs)
PNet.blobs['data'].data[...] = im_data
out = PNet.forward()
boxes = generateBoundingBox(out['prob1'][0, 1, :, :], out['conv4-2'][0], scale, threshold[0])
if boxes.shape[0] != 0:
# print(boxes[4:9])
# print('im_data', im_data[0:5, 0:5, 0], '\n')
# print('prob1', out['prob1'][0,0,0:3,0:3])
pick = nms(boxes, 0.5, 'Union')
if len(pick) > 0:
boxes = boxes[pick, :]
if boxes.shape[0] != 0:
total_boxes = np.concatenate((total_boxes, boxes), axis=0)
# np.save('total_boxes_101.npy', total_boxes)
#####
# 1 #
#####
# print("[1]:", total_boxes.shape[0])
# print(total_boxes)
# return total_boxes, []
numbox = total_boxes.shape[0]
if numbox > 0:
# nms
pick = nms(total_boxes, 0.7, 'Union')
total_boxes = total_boxes[pick, :]
# print("[2]:", total_boxes.shape[0])
# revise and convert to square
regh = total_boxes[:, 3] - total_boxes[:, 1]
regw = total_boxes[:, 2] - total_boxes[:, 0]
t1 = total_boxes[:, 0] + total_boxes[:, 5] * regw
t2 = total_boxes[:, 1] + total_boxes[:, 6] * regh
t3 = total_boxes[:, 2] + total_boxes[:, 7] * regw
t4 = total_boxes[:, 3] + total_boxes[:, 8] * regh
t5 = total_boxes[:, 4]
total_boxes = np.array([t1, t2, t3, t4, t5]).T
# print("[3]:",total_boxes.shape[0])
# print(regh)
# print(regw)
# print('t1',t1)
# print(total_boxes)
total_boxes = rerec(total_boxes) # convert box to square
# print("[4]:", total_boxes.shape[0])
total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4])
# print("[4.5]:", total_boxes.shape[0])
# print(total_boxes)
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = pad(total_boxes, w, h)
# print(total_boxes.shape)
# print(total_boxes)
numbox = total_boxes.shape[0]
if numbox > 0:
# second stage
# print('tmph', tmph)
# print('tmpw', tmpw)
# print("y,ey,x,ex", y, ey, x, ex, )
# print("edy", edy)
# tempimg = np.load('tempimg.npy')
# construct input for RNet
tempimg = np.zeros((numbox, 24, 24, 3)) # (24, 24, 3, numbox)
for k in range(numbox):
tmp = np.zeros((int(tmph[k]) + 1, int(tmpw[k]) + 1, 3))
# print("dx[k], edx[k]:", dx[k], edx[k])
# print("dy[k], edy[k]:", dy[k], edy[k])
# print("img.shape", img[y[k]:ey[k]+1, x[k]:ex[k]+1].shape)
# print("tmp.shape", tmp[dy[k]:edy[k]+1, dx[k]:edx[k]+1].shape)
tmp[int(dy[k]):int(edy[k]) + 1, int(dx[k]):int(edx[k]) + 1] = img[int(y[k]):int(ey[k]) + 1,
int(x[k]):int(ex[k]) + 1]
# print("y,ey,x,ex", y[k], ey[k], x[k], ex[k])
# print("tmp", tmp.shape)
tempimg[k, :, :, :] = cv2.resize(tmp, (24, 24))
# tempimg[k,:,:,:] = imResample(tmp, 24, 24)
# print('tempimg', tempimg[k,:,:,:].shape)
# print(tempimg[k,0:5,0:5,0] )
# print(tempimg[k,0:5,0:5,1] )
# print(tempimg[k,0:5,0:5,2] )
# print(k)
# print(tempimg.shape)
# print(tempimg[0,0,0,:])
tempimg = (tempimg - 127.5) * 0.0078125 # done in imResample function wrapped by python
# np.save('tempimg.npy', tempimg)
# RNet
tempimg = np.swapaxes(tempimg, 1, 3)
# print(tempimg[0,:,0,0])
RNet.blobs['data'].reshape(numbox, 3, 24, 24)
RNet.blobs['data'].data[...] = tempimg
out = RNet.forward()
# print(out['conv5-2'].shape)
# print(out['prob1'].shape)
score = out['prob1'][:, 1]
# print('score', score)
pass_t = np.where(score > threshold[1])[0]
# print('pass_t', pass_t)
score = np.array([score[pass_t]]).T
total_boxes = np.concatenate((total_boxes[pass_t, 0:4], score), axis=1)
# print("[5]:", total_boxes.shape[0])
# print(total_boxes)
# print("1.5:",total_boxes.shape)
mv = out['conv5-2'][pass_t, :].T
# print("mv", mv)
if total_boxes.shape[0] > 0:
pick = nms(total_boxes, 0.7, 'Union')
# print('pick', pick)
if len(pick) > 0:
total_boxes = total_boxes[pick, :]
# print("[6]:", total_boxes.shape[0])
total_boxes = bbreg(total_boxes, mv[:, pick])
# print("[7]:", total_boxes.shape[0])
total_boxes = rerec(total_boxes)
# print("[8]:", total_boxes.shape[0])
#####
# 2 #
#####
# print("2:", total_boxes.shape)
numbox = total_boxes.shape[0]
if numbox > 0:
# third stage
total_boxes = np.fix(total_boxes)
[dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph] = pad(total_boxes, w, h)
# print('tmpw', tmpw)
# print('tmph', tmph)
# print('y ', y)
# print('ey', ey)
# print('x ', x)
# print('ex', ex)
tempimg = np.zeros((numbox, 48, 48, 3))
for k in range(numbox):
tmp = np.zeros((int(tmph[k]), int(tmpw[k]), 3))
tmp[int(dy[k]):int(edy[k]) + 1, int(dx[k]):int(edx[k]) + 1] = img[int(y[k]):int(ey[k]) + 1,
int(x[k]):int(ex[k]) + 1]
tempimg[k, :, :, :] = cv2.resize(tmp, (48, 48))
tempimg = (tempimg - 127.5) * 0.0078125 # [0,255] -> [-1,1]
# ONet
tempimg = np.swapaxes(tempimg, 1, 3)
ONet.blobs['data'].reshape(numbox, 3, 48, 48)
ONet.blobs['data'].data[...] = tempimg
out = ONet.forward()
score = out['prob1'][:, 1]
points = out['conv6-3']
pass_t = np.where(score > threshold[2])[0]
points = points[pass_t, :]
score = np.array([score[pass_t]]).T
total_boxes = np.concatenate((total_boxes[pass_t, 0:4], score), axis=1)
# print("[9]:", total_boxes.shape[0])
mv = out['conv6-2'][pass_t, :].T
w = total_boxes[:, 3] - total_boxes[:, 1] + 1
h = total_boxes[:, 2] - total_boxes[:, 0] + 1
points[:, 0:5] = np.tile(w, (5, 1)).T * points[:, 0:5] + np.tile(total_boxes[:, 0], (5, 1)).T - 1
points[:, 5:10] = np.tile(h, (5, 1)).T * points[:, 5:10] + np.tile(total_boxes[:, 1], (5, 1)).T - 1
if total_boxes.shape[0] > 0:
total_boxes = bbreg(total_boxes, mv[:, :])
# print("[10]:", total_boxes.shape[0])
pick = nms(total_boxes, 0.7, 'Min')
# print(pick)
if len(pick) > 0:
total_boxes = total_boxes[pick, :]
# print("[11]:", total_boxes.shape[0])
points = points[pick, :]
#####
# 3 #
#####
# print("3:", total_boxes.shape)
return total_boxes, points
def initFaceDetector():
minsize = 20
caffe_model_path = "/home/duino/iactive/mtcnn/model"
threshold = [0.6, 0.7, 0.7]
factor = 0.709
caffe.set_mode_cpu()
PNet = caffe.Net(caffe_model_path + "/det1.prototxt", caffe_model_path + "/det1.caffemodel", caffe.TEST)
RNet = caffe.Net(caffe_model_path + "/det2.prototxt", caffe_model_path + "/det2.caffemodel", caffe.TEST)
ONet = caffe.Net(caffe_model_path + "/det3.prototxt", caffe_model_path + "/det3.caffemodel", caffe.TEST)
return (minsize, PNet, RNet, ONet, threshold, factor)
def haveFace(img, facedetector):
minsize = facedetector[0]
PNet = facedetector[1]
RNet = facedetector[2]
ONet = facedetector[3]
threshold = facedetector[4]
factor = facedetector[5]
if max(img.shape[0], img.shape[1]) < minsize:
return False, []
img_matlab = img.copy()
tmp = img_matlab[:, :, 2].copy()
img_matlab[:, :, 2] = img_matlab[:, :, 0]
img_matlab[:, :, 0] = tmp
# tic()
boundingboxes, points = detect_face(img_matlab, minsize, PNet, RNet, ONet, threshold, False, factor)
# toc()
containFace = (True, False)[boundingboxes.shape[0] == 0]
return containFace, boundingboxes
def locate(imgpath):
starttime=t.time()
minsize = 20
caffe_model_path = root + 'model/mtcnn'
threshold = [0.6, 0.7, 0.7]
factor = 0.709
caffe.set_mode_cpu()
PNet = caffe.Net(caffe_model_path + "/det1.prototxt", caffe_model_path + "/det1.caffemodel", caffe.TEST)
RNet = caffe.Net(caffe_model_path + "/det2.prototxt", caffe_model_path + "/det2.caffemodel", caffe.TEST)
ONet = caffe.Net(caffe_model_path + "/det3.prototxt", caffe_model_path + "/det3.caffemodel", caffe.TEST)
img = cv2.imread(imgpath)
img_matlab = img.copy()
tmp = img_matlab[:, :, 2].copy()
img_matlab[:, :, 2] = img_matlab[:, :, 0]
img_matlab[:, :, 0] = tmp
boundingboxes, points = detect_face(img_matlab, minsize, PNet, RNet, ONet, threshold, False, factor)
x1 = boundingboxes[:, 0]
y1 = boundingboxes[:, 1]
x2 = boundingboxes[:, 2]
y2 = boundingboxes[:, 3]
height = y2 - y1
width = x2 - x1
result= '['
for i in range(x1.shape[0]):
result+= '{"x":'+str(int(x1[0]))+',"y":'+str(int(y1[0]))+',"width":'+str(int(width[0]))+',"height":'+str(int(height[0]))+'},'
result=result[:-1]
result+=']'
endtime=t.time()
return '{"status":true, "data":'+ str(result) +' ,"msg":"成功","runtime":'+ str(endtime-starttime)+'}'
# 保存上传文件
def handle_uploaded_file(file, filename):
if not os.path.exists('upload/'):
os.mkdir('upload/')
with open('upload/' + filename, 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
# api返回函数
def getLocate(request):
if request.method == 'POST':
if len(request.FILES) != 1:
return HttpResponse('{"status":false,"data":"","msg":"图片参数错误!"}')
name = str(random.randint(10000, 99999)) + str(t.time()) # 随机保存图片的名字
handle_uploaded_file(request.FILES['pic'], str(name))
returnData = HttpResponse( str( locate(root + "RestServer/upload/" + name)))
os.remove(root + "RestServer/upload/" + str(name))
return HttpResponse( returnData )
|
the-stack_0_19680 | import fluids
import pygame
import numpy as np
import scipy
import random
import sys
interesting_seeds = [
27,
54,
55,
57,
64,
]
np.random.seed(interesting_seeds[0])
random.seed(interesting_seeds[0])
simulator = fluids.FluidSim(visualization_level=4, # How much debug visualization you want to enable. Set to 0 for no vis
fps=0, # If set to non 0, caps the FPS. Target is 30
obs_space=fluids.OBS_GRID,# OBS_BIRDSEYE, OBS_GRID, or OBS_NONE
background_control=fluids.BACKGROUND_CSP) # BACKGROUND_CSP or BACKGROUND_NULL
state = fluids.State(
layout=fluids.STATE_CITY,
use_traffic_lights=False,
background_cars=0, # How many background cars
background_peds=0,
controlled_cars=1, # How many cars to control. Set to 0 for background cars only
)
simulator.set_state(state)
car_keys = simulator.get_control_keys()
step_counter = 0
max_steps_counter = 1500
while True:
actions = {}
# Uncomment any of these lines.
# VelocityAction is vel for car to move along trajectory
# SteeringAction is steer, acc control
# KeyboardAction is use keyboard input
# SteeringVelAction is steer, vel control
# actions = simulator.get_supervisor_actions(fluids.SteeringAction, keys=car_keys)
# actions = simulator.get_supervisor_actions(fluids.VelocityAction, keys=car_keys)
# actions = simulator.get_supervisor_actions(fluids.SteeringAccAction, keys=car_keys)
actions = simulator.get_supervisor_actions(fluids.SteeringVelAction, keys=car_keys)
# actions = {k:fluids.VelocityAction(1) for k in car_keys}
# actions = {k:fluids.SteeringAction(0, 1) for k in car_keys}
# actions = {k:fluids.KeyboardAction() for k in car_keys}
# actions = {k:fluids.SteeringVelAction(0, 1) for k in car_keys}
rew = simulator.step(actions)
obs = simulator.get_observations(car_keys)
simulator.render()
step_counter += 1
if step_counter > max_steps_counter or simulator.reached_goal:
break
# collect preformance metrics for agent car
results = simulator.wrap_up()
print(results)
|
the-stack_0_19681 | # Copyright 2021, Kay Hayen, mailto:[email protected]
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Recursion into other modules.
"""
import glob
import os
from nuitka import ModuleRegistry, Options
from nuitka.Errors import NuitkaForbiddenImportEncounter
from nuitka.importing import ImportCache, Importing, StandardLibrary
from nuitka.ModuleRegistry import addUsedModule, getRootTopModule
from nuitka.pgo.PGO import decideInclusionFromPGO
from nuitka.plugins.Plugins import Plugins
from nuitka.PythonVersions import python_version
from nuitka.Tracing import recursion_logger
from nuitka.utils.FileOperations import listDir
from nuitka.utils.ModuleNames import ModuleName
from .Importing import getModuleNameAndKindFromFilename, locateModule
def _recurseTo(module_name, module_filename, module_kind):
from nuitka.tree import Building
module, is_added = Building.buildModule(
module_filename=module_filename,
module_name=module_name,
source_code=None,
is_top=False,
is_main=False,
is_extension=module_kind == "extension",
is_fake=False,
hide_syntax_error=True,
)
ImportCache.addImportedModule(module)
return module, is_added
def recurseTo(signal_change, module_name, module_filename, module_kind, reason):
try:
module = ImportCache.getImportedModuleByNameAndPath(
module_name, module_filename
)
except KeyError:
module = None
if module is None:
Plugins.onModuleRecursion(
module_filename=module_filename,
module_name=module_name,
module_kind=module_kind,
)
module, added_flag = _recurseTo(
module_name=module_name,
module_filename=module_filename,
module_kind=module_kind,
)
if added_flag and signal_change is not None:
signal_change("new_code", module.getSourceReference(), reason)
return module
def decideRecursion(module_filename, module_name, module_kind, extra_recursion=False):
# Many branches, which make decisions immediately, by returning
# pylint: disable=too-many-branches,too-many-return-statements
if module_name == "__main__":
return False, "Main program is not followed to a second time."
# In -m mode, when including the package, do not duplicate main program.
if (
Options.hasPythonFlagPackageMode()
and not Options.shallMakeModule()
and module_name.getBasename() == "__main__"
):
if module_name.getPackageName() == getRootTopModule().getRuntimePackageValue():
return False, "Main program is already included in package mode."
plugin_decision = Plugins.onModuleEncounter(
module_filename=module_filename,
module_name=module_name,
module_kind=module_kind,
)
if plugin_decision is not None:
return plugin_decision
if module_kind == "extension":
if Options.isStandaloneMode():
return True, "Extension module needed for standalone mode."
else:
return False, "Extension module cannot be inspected."
# PGO decisions are not overruling plugins, but all command line options, they are
# supposed to be applied already.
is_stdlib = StandardLibrary.isStandardLibraryPath(module_filename)
if not is_stdlib or Options.shallFollowStandardLibrary():
# TODO: Bad placement of this function or should PGO also know about
# bytecode modules loaded or not.
from nuitka.tree.Building import decideCompilationMode
if (
decideCompilationMode(is_top=False, module_name=module_name, for_pgo=True)
== "compiled"
):
pgo_decision = decideInclusionFromPGO(
module_name=module_name,
module_kind=module_kind,
)
if pgo_decision is not None:
return pgo_decision, "PGO based decision"
no_case, reason = module_name.matchesToShellPatterns(
patterns=Options.getShallFollowInNoCase()
)
if no_case:
return (False, "Module %s instructed by user to not follow to." % reason)
any_case, reason = module_name.matchesToShellPatterns(
patterns=Options.getShallFollowModules()
)
if any_case:
return (True, "Module %s instructed by user to follow to." % reason)
if extra_recursion:
return (True, "Lives in plug-in directory.")
if is_stdlib and Options.shallFollowStandardLibrary():
return (True, "Instructed by user to follow to standard library.")
if Options.shallFollowAllImports():
if is_stdlib:
if StandardLibrary.isStandardLibraryNoAutoInclusionModule(module_name):
return (
True,
"Instructed by user to follow all modules, including non-automatic standard library modules.",
)
else:
return (
True,
"Instructed by user to follow to all non-standard library modules.",
)
if Options.shallFollowNoImports():
return (None, "Instructed by user to not follow at all.")
# Means, we were not given instructions how to handle things.
return (
None,
"Default behavior in non-standalone mode, not following without request.",
)
def considerFilename(module_filename):
module_filename = os.path.normpath(module_filename)
if os.path.isdir(module_filename):
module_filename = os.path.abspath(module_filename)
module_name = os.path.basename(module_filename)
return module_filename, module_name
elif module_filename.endswith(".py"):
module_name = os.path.basename(module_filename)[:-3]
return module_filename, module_name
elif module_filename.endswith(".pyw"):
module_name = os.path.basename(module_filename)[:-4]
return module_filename, module_name
else:
return None
def isSameModulePath(path1, path2):
if os.path.basename(path1) == "__init__.py":
path1 = os.path.dirname(path1)
if os.path.basename(path2) == "__init__.py":
path2 = os.path.dirname(path2)
return os.path.abspath(path1) == os.path.abspath(path2)
def checkPluginSinglePath(plugin_filename, module_package):
# Many branches, for the decision is very complex, pylint: disable=too-many-branches
# The importing wants these to be unique.
plugin_filename = os.path.abspath(plugin_filename)
if Options.isShowInclusion():
recursion_logger.info(
"Checking detail plug-in path '%s' '%s':"
% (plugin_filename, module_package)
)
module_name, module_kind = Importing.getModuleNameAndKindFromFilename(
plugin_filename
)
module_name = ModuleName.makeModuleNameInPackage(module_name, module_package)
if module_kind == "extension" and not Options.isStandaloneMode():
recursion_logger.warning(
"Cannot include '%s' unless using at least standalone mode."
% module_name.asString()
)
if module_kind is not None:
decision, reason = decideRecursion(
module_filename=plugin_filename,
module_name=module_name,
module_kind=module_kind,
extra_recursion=True,
)
if decision:
module = recurseTo(
signal_change=None,
module_filename=plugin_filename,
module_name=module_name,
module_kind=module_kind,
reason=reason,
)
if module:
if Options.isShowInclusion():
recursion_logger.info(
"Included '%s' as '%s'."
% (
module.getFullName(),
module,
)
)
ImportCache.addImportedModule(module)
if module.isCompiledPythonPackage():
package_filename = module.getFilename()
if os.path.isdir(package_filename):
# Must be a namespace package.
assert python_version >= 0x300
package_dir = package_filename
# Only include it, if it contains actual modules, which will
# recurse to this one and find it again.
else:
package_dir = os.path.dirname(package_filename)
# Real packages will always be included.
ModuleRegistry.addRootModule(module)
if Options.isShowInclusion():
recursion_logger.info("Package directory '%s'." % package_dir)
for sub_path, sub_filename in listDir(package_dir):
if sub_filename in ("__init__.py", "__pycache__"):
continue
assert sub_path != plugin_filename
if Importing.isPackageDir(sub_path) and not os.path.exists(
sub_path + ".py"
):
checkPluginSinglePath(
sub_path, module_package=module.getFullName()
)
elif sub_path.endswith(".py"):
checkPluginSinglePath(
sub_path, module_package=module.getFullName()
)
elif module.isCompiledPythonModule():
ModuleRegistry.addRootModule(module)
elif module.isPythonExtensionModule():
if Options.isStandaloneMode():
ModuleRegistry.addRootModule(module)
else:
recursion_logger.warning(
"Failed to include module from '%s'." % plugin_filename
)
def checkPluginPath(plugin_filename, module_package):
if Options.isShowInclusion():
recursion_logger.info(
"Checking top level plug-in path %s %s" % (plugin_filename, module_package)
)
plugin_info = considerFilename(module_filename=plugin_filename)
if plugin_info is not None:
# File or package makes a difference, handle that
if os.path.isfile(plugin_info[0]) or Importing.isPackageDir(plugin_info[0]):
checkPluginSinglePath(plugin_filename, module_package=module_package)
elif os.path.isdir(plugin_info[0]):
for sub_path, sub_filename in listDir(plugin_info[0]):
assert sub_filename != "__init__.py"
if Importing.isPackageDir(sub_path) or sub_path.endswith(".py"):
checkPluginSinglePath(sub_path, module_package=None)
else:
recursion_logger.warning(
"Failed to include module from %r." % plugin_info[0]
)
else:
recursion_logger.warning("Failed to recurse to directory %r." % plugin_filename)
def checkPluginFilenamePattern(pattern):
if Options.isShowInclusion():
recursion_logger.info("Checking plug-in pattern '%s':" % pattern)
assert not os.path.isdir(pattern), pattern
found = False
for filename in glob.iglob(pattern):
if filename.endswith(".pyc"):
continue
if not os.path.isfile(filename):
continue
found = True
checkPluginSinglePath(filename, module_package=None)
if not found:
recursion_logger.warning("Didn't match any files against pattern %r." % pattern)
def _addParentPackageUsages(using_module, module_name, signal_change, source_ref):
for parent_package_name in module_name.getParentPackageNames():
_parent_package_name, parent_package_filename, _finding = locateModule(
module_name=parent_package_name, parent_package=None, level=0
)
assert parent_package_filename is not None, parent_package_name
assert _parent_package_name == parent_package_name
_parent_package_name, package_module_kind = getModuleNameAndKindFromFilename(
parent_package_filename
)
_decision, reason = decideRecursion(
module_filename=parent_package_filename,
module_name=parent_package_name,
module_kind=package_module_kind,
)
used_package_module = recurseTo(
signal_change=signal_change,
module_name=parent_package_name,
module_filename=parent_package_filename,
module_kind=package_module_kind,
reason=reason,
)
addUsedModule(
module=used_package_module,
using_module=using_module,
usage_tag="package",
reason=reason,
source_ref=source_ref,
)
def considerUsedModules(module, signal_change):
for (
used_module_name,
used_module_filename,
finding,
level,
source_ref,
) in module.getUsedModules():
if finding == "not-found":
Importing.warnAbout(
importing=module,
source_ref=source_ref,
module_name=used_module_name,
level=level,
)
try:
if used_module_filename is None:
continue
_module_name, module_kind = getModuleNameAndKindFromFilename(
used_module_filename
)
decision, reason = decideRecursion(
module_filename=used_module_filename,
module_name=used_module_name,
module_kind=module_kind,
)
if decision:
_addParentPackageUsages(
using_module=module,
module_name=used_module_name,
signal_change=signal_change,
source_ref=source_ref,
)
used_module = recurseTo(
signal_change=signal_change,
module_name=used_module_name,
module_filename=used_module_filename,
module_kind=module_kind,
reason=reason,
)
addUsedModule(
module=used_module,
using_module=module,
usage_tag="import",
reason=reason,
source_ref=source_ref,
)
except NuitkaForbiddenImportEncounter as e:
recursion_logger.sysexit(
"Error, forbidden import of '%s' in module '%s' at '%s' encountered."
% (e, module.getFullName().asString(), source_ref.getAsString())
)
Plugins.considerImplicitImports(module=module, signal_change=signal_change)
|
the-stack_0_19682 | # -*- coding: utf-8 -*-
"""
@author: Khaled Ghobashy
"""
# Standard library imports
import os
import sys
import json
import inspect
# 3rd party library imports
import sympy as sm
# Local applicataion imports
from .....symbolic.components.matrices import AbstractMatrix, vector, quatrenion
from .....symbolic.systems.configuration_classes import Simple_geometry, Equal_to
################################################################################
class Encoder(json.JSONEncoder):
"""
A subclass of the `json.JSONEncoder` that over-rides the `default` method
that calls a custom `JSONify` function that returns a compatibale type
that can be serialzed in JSON.
"""
def default(self, obj):
return JSONify(obj)
################################################################################
################################################################################
def JSONify(instance):
"""
A function that takes in a symbolic object or a class and returns a
compatibale type that can be serialzed in JSON.
TODO:
DataTypes map
"""
# check if the given instance is a class
if inspect.isclass(instance):
constructor = instance.__name__
return constructor
# check if the given instance is a basic scalar data type that can be
# understod by the JSON encoder directly.
if isinstance(instance, (str, float, int, bool)):
return instance
# check if the given instance is a basic sequence/iterable data type that
# can be understod by the JSON encoder directly.
elif isinstance(instance, dict):
return {k: JSONify(v) for k,v in instance.items()}
elif isinstance(instance, list):
alias = [JSONify(value) for value in instance]
return alias
elif isinstance(instance, (tuple, sm.Tuple)):
alias = tuple(JSONify(value) for value in instance)
return alias
# Conversions of basic symbolic scalars / symbols to JSON
elif isinstance(instance, (sm.Number,)):
return float(instance)
elif isinstance(instance, (vector, quatrenion, sm.Symbol)):
text = str(instance)
return text
# Conversion of sympy matrices.
elif isinstance(instance, (sm.ImmutableDenseMatrix, sm.MutableDenseMatrix)):
if 1 in instance.shape:
alias = [JSONify(value) for value in instance]
else:
alias = [JSONify(value) for value in instance.tolist()]
data_object = {'constructor': 'array', 'args': alias}
return data_object
# Conversion of symbolic geometries.
elif isinstance(instance, tuple(Simple_geometry.__subclasses__())):
constructor = JSONify(instance.__class__)
args = [JSONify(arg) for arg in instance.args]
data_object = {'constructor': constructor, 'args': args}
return data_object
# Conversion of symbolic geometries.
elif isinstance(instance, tuple(AbstractMatrix.__subclasses__())):
constructor = JSONify(instance.__class__)
args = [JSONify(arg) for arg in instance.args]
data_object = {'constructor': constructor, 'args': args}
return data_object
# Conversion of Lambda functions.
elif isinstance(instance, (sm.Function, sm.Lambda)):
constructor = JSONify(instance.__class__)
args = [JSONify(arg) for arg in instance.args]
data_object = {'constructor': constructor, 'args': args}
return data_object
# Fall back to basic string message if datatype not included in previous
# casses.
else:
return 'Data type not supported'
################################################################################
################################################################################
class generator(object):
"""
This class serves as a
"""
def __init__(self, sym_config):
self.config = sym_config
self.configuration_name = self.config.name
self.topology_name = self.config.topology.name
self.graph = self.config.graph
self.input_nodes = self.config.input_nodes
self.output_nodes = self.config.output_nodes
self.intermediat_nodes = self.config.intermediat_nodes
self.primary_equalities = self.config.primary_equalities
self.geometries_map = self.config.geometries_map
self.data = self.construct()
def write_JSON_file(self, file_path=''):
name = '%s.json'%self.configuration_name
file_name = os.path.join(file_path, name)
json_text = self.dump_JSON_text()
with open(file_name, 'w') as f:
f.write(json_text)
def dump_JSON_text(self):
data = self.construct()
json_text = json.dumps(data, cls=Encoder, indent=4)
return json_text
def construct(self):
config_info = {}
config_info['topology_name'] = self.topology_name
config_info['configuration_name'] = self.configuration_name
config_info['subsystem_name'] = ''
data = {}
data['information'] = config_info
data['user_inputs'] = self.construct_data_dict(self.input_nodes)
data['evaluations'] = self.construct_data_dict(self.intermediat_nodes)
data['outputs'] = self.construct_data_dict(self.output_nodes)
data['geometries_map'] = self.geometries_map
return data
def construct_data_dict(self, nodes):
storage_dict = {}
for node in nodes:
feeding_nodes = self.get_feeding_nodes(node)
if len(feeding_nodes) == 1 and issubclass(self.graph.nodes[node]['rhs_function'], Equal_to):
n = feeding_nodes[0]
storage_dict[node] = self.check_attribute_access((n, node))
else:
sym_equality = self.graph.nodes[node]['equality']
storage_dict[node] = JSONify(sym_equality.rhs)
return storage_dict
def check_attribute_access(self, edge):
parent_node = edge[0]
attribute = self.graph.edges[edge]['passed_attr']
if attribute:
data_dict = {'constructor': 'getattribute',
'args': [parent_node, attribute]}
return data_dict
else:
return parent_node
def get_feeding_nodes(self, node):
return list(self.graph.predecessors(node))
|
the-stack_0_19684 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SubnetSharedPublicIpAddressConfigurationFragment(Model):
"""Configuration for public IP address sharing.
:param allowed_ports: Backend ports that virtual machines on this subnet
are allowed to expose
:type allowed_ports: list[~azure.mgmt.devtestlabs.models.PortFragment]
"""
_attribute_map = {
'allowed_ports': {'key': 'allowedPorts', 'type': '[PortFragment]'},
}
def __init__(self, allowed_ports=None):
super(SubnetSharedPublicIpAddressConfigurationFragment, self).__init__()
self.allowed_ports = allowed_ports
|
the-stack_0_19685 | """
This script processes the output from the C preprocessor and extracts all
qstr. Each qstr is transformed into a qstr definition of the form 'Q(...)'.
This script works with Python 2.6, 2.7, 3.3 and 3.4.
"""
from __future__ import print_function
import io
import os
import re
import subprocess
import sys
import multiprocessing, multiprocessing.dummy
# Extract MP_QSTR_FOO macros.
_MODE_QSTR = "qstr"
# Extract MP_COMPRESSED_ROM_TEXT("") macros. (Which come from MP_ERROR_TEXT)
_MODE_COMPRESS = "compress"
def is_c_source(fname):
return os.path.splitext(fname)[1] in [".c"]
def is_cxx_source(fname):
return os.path.splitext(fname)[1] in [".cc", ".cp", ".cxx", ".cpp", ".CPP", ".c++", ".C"]
def preprocess():
if any(src in args.dependencies for src in args.changed_sources):
sources = args.sources
elif any(args.changed_sources):
sources = args.changed_sources
else:
sources = args.sources
csources = []
cxxsources = []
for source in sources:
if is_cxx_source(source):
cxxsources.append(source)
elif is_c_source(source):
csources.append(source)
try:
os.makedirs(os.path.dirname(args.output[0]))
except OSError:
pass
def pp(flags):
def run(files):
return subprocess.check_output(args.pp + flags + files)
return run
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
p = multiprocessing.dummy.Pool(cpus)
with open(args.output[0], "wb") as out_file:
for flags, sources in (
(args.cflags, csources),
(args.cxxflags, cxxsources),
):
batch_size = (len(sources) + cpus - 1) // cpus
chunks = [sources[i : i + batch_size] for i in range(0, len(sources), batch_size or 1)]
for output in p.imap(pp(flags), chunks):
out_file.write(output)
def write_out(fname, output):
if output:
for m, r in [("/", "__"), ("\\", "__"), (":", "@"), ("..", "@@")]:
fname = fname.replace(m, r)
with open(args.output_dir + "/" + fname + "." + args.mode, "w") as f:
f.write("\n".join(output) + "\n")
def process_file(f):
re_line = re.compile(r"#[line]*\s\d+\s\"([^\"]+)\"")
if args.mode == _MODE_QSTR:
re_match = re.compile(r"MP_QSTR_[_a-zA-Z0-9]+")
elif args.mode == _MODE_COMPRESS:
re_match = re.compile(r'MP_COMPRESSED_ROM_TEXT\("([^"]*)"\)')
output = []
last_fname = None
for line in f:
if line.isspace():
continue
# match gcc-like output (# n "file") and msvc-like output (#line n "file")
if line.startswith(("# ", "#line")):
m = re_line.match(line)
assert m is not None
fname = m.group(1)
if not is_c_source(fname) and not is_cxx_source(fname):
continue
if fname != last_fname:
write_out(last_fname, output)
output = []
last_fname = fname
continue
for match in re_match.findall(line):
if args.mode == _MODE_QSTR:
name = match.replace("MP_QSTR_", "")
output.append("Q(" + name + ")")
elif args.mode == _MODE_COMPRESS:
output.append(match)
if last_fname:
write_out(last_fname, output)
return ""
def cat_together():
import glob
import hashlib
hasher = hashlib.md5()
all_lines = []
outf = open(args.output_dir + "/out", "wb")
for fname in glob.glob(args.output_dir + "/*." + args.mode):
with open(fname, "rb") as f:
lines = f.readlines()
all_lines += lines
all_lines.sort()
all_lines = b"\n".join(all_lines)
outf.write(all_lines)
outf.close()
hasher.update(all_lines)
new_hash = hasher.hexdigest()
# print(new_hash)
old_hash = None
try:
with open(args.output_file + ".hash") as f:
old_hash = f.read()
except IOError:
pass
mode_full = "QSTR"
if args.mode == _MODE_COMPRESS:
mode_full = "Compressed data"
if old_hash != new_hash:
print(mode_full, "updated")
try:
# rename below might fail if file exists
os.remove(args.output_file)
except:
pass
os.rename(args.output_dir + "/out", args.output_file)
with open(args.output_file + ".hash", "w") as f:
f.write(new_hash)
else:
print(mode_full, "not updated")
if __name__ == "__main__":
if len(sys.argv) < 6:
print("usage: %s command mode input_filename output_dir output_file" % sys.argv[0])
sys.exit(2)
class Args:
pass
args = Args()
args.command = sys.argv[1]
if args.command == "pp":
named_args = {
s: []
for s in [
"pp",
"output",
"cflags",
"cxxflags",
"sources",
"changed_sources",
"dependencies",
]
}
for arg in sys.argv[1:]:
if arg in named_args:
current_tok = arg
else:
named_args[current_tok].append(arg)
if not named_args["pp"] or len(named_args["output"]) != 1:
print("usage: %s %s ..." % (sys.argv[0], " ... ".join(named_args)))
sys.exit(2)
for k, v in named_args.items():
setattr(args, k, v)
preprocess()
sys.exit(0)
args.mode = sys.argv[2]
args.input_filename = sys.argv[3] # Unused for command=cat
args.output_dir = sys.argv[4]
args.output_file = None if len(sys.argv) == 5 else sys.argv[5] # Unused for command=split
if args.mode not in (_MODE_QSTR, _MODE_COMPRESS):
print("error: mode %s unrecognised" % sys.argv[2])
sys.exit(2)
try:
os.makedirs(args.output_dir)
except OSError:
pass
if args.command == "split":
with io.open(args.input_filename, encoding="utf-8") as infile:
process_file(infile)
if args.command == "cat":
cat_together()
|
the-stack_0_19686 | import asyncio
import json
import logging
import time
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple
import traceback
import aiohttp
from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey
import chia.server.ws_connection as ws # lgtm [py/import-and-import-from]
from chia.consensus.coinbase import create_puzzlehash_for_pk
from chia.consensus.constants import ConsensusConstants
from chia.daemon.keychain_proxy import (
KeychainProxy,
KeychainProxyConnectionFailure,
connect_to_keychain_and_validate,
wrap_local_keychain,
)
from chia.pools.pool_config import PoolWalletConfig, load_pool_config, add_auth_key
from chia.protocols import farmer_protocol, harvester_protocol
from chia.protocols.pool_protocol import (
ErrorResponse,
get_current_authentication_token,
GetFarmerResponse,
PoolErrorCode,
PostFarmerPayload,
PostFarmerRequest,
PutFarmerPayload,
PutFarmerRequest,
AuthenticationPayload,
)
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.server.outbound_message import NodeType, make_msg
from chia.server.server import ssl_context_for_root
from chia.server.ws_connection import WSChiaConnection
from chia.ssl.create_ssl import get_mozilla_ca_crt
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.util.bech32m import decode_puzzle_hash
from chia.util.byte_types import hexstr_to_bytes
from chia.util.config import load_config, save_config, config_path_for_filename, get_config_lock
from chia.util.hash import std_hash
from chia.util.ints import uint8, uint16, uint32, uint64
from chia.util.keychain import Keychain
from chia.wallet.derive_keys import (
master_sk_to_farmer_sk,
master_sk_to_pool_sk,
master_sk_to_wallet_sk,
find_authentication_sk,
find_owner_sk,
)
from chia.wallet.puzzles.singleton_top_layer import SINGLETON_MOD
singleton_mod_hash = SINGLETON_MOD.get_tree_hash()
log = logging.getLogger(__name__)
UPDATE_POOL_INFO_INTERVAL: int = 3600
UPDATE_POOL_FARMER_INFO_INTERVAL: int = 300
UPDATE_HARVESTER_CACHE_INTERVAL: int = 90
"""
HARVESTER PROTOCOL (FARMER <-> HARVESTER)
"""
class HarvesterCacheEntry:
def __init__(self):
self.data: Optional[dict] = None
self.last_update: float = 0
def bump_last_update(self):
self.last_update = time.time()
def set_data(self, data):
self.data = data
self.bump_last_update()
def needs_update(self, update_interval: int):
return time.time() - self.last_update > update_interval
def expired(self, update_interval: int):
return time.time() - self.last_update > update_interval * 10
class Farmer:
def __init__(
self,
root_path: Path,
farmer_config: Dict,
pool_config: Dict,
consensus_constants: ConsensusConstants,
local_keychain: Optional[Keychain] = None,
):
self.keychain_proxy: Optional[KeychainProxy] = None
self.local_keychain = local_keychain
self._root_path = root_path
self.config = farmer_config
self.pool_config = pool_config
# Keep track of all sps, keyed on challenge chain signage point hash
self.sps: Dict[bytes32, List[farmer_protocol.NewSignagePoint]] = {}
# Keep track of harvester plot identifier (str), target sp index, and PoSpace for each challenge
self.proofs_of_space: Dict[bytes32, List[Tuple[str, ProofOfSpace]]] = {}
# Quality string to plot identifier and challenge_hash, for use with harvester.RequestSignatures
self.quality_str_to_identifiers: Dict[bytes32, Tuple[str, bytes32, bytes32, bytes32]] = {}
# number of responses to each signage point
self.number_of_responses: Dict[bytes32, int] = {}
# A dictionary of keys to time added. These keys refer to keys in the above 4 dictionaries. This is used
# to periodically clear the memory
self.cache_add_time: Dict[bytes32, uint64] = {}
# Interval to request plots from connected harvesters
self.update_harvester_cache_interval = UPDATE_HARVESTER_CACHE_INTERVAL
self.cache_clear_task: Optional[asyncio.Task] = None
self.update_pool_state_task: Optional[asyncio.Task] = None
self.constants = consensus_constants
self._shut_down = False
self.server: Any = None
self.state_changed_callback: Optional[Callable] = None
self.log = log
self.started = False
self.harvester_handshake_task: Optional[asyncio.Task] = None
# From p2_singleton_puzzle_hash to pool state dict
self.pool_state: Dict[bytes32, Dict] = {}
# From p2_singleton to auth PrivateKey
self.authentication_keys: Dict[bytes32, PrivateKey] = {}
# Last time we updated pool_state based on the config file
self.last_config_access_time: uint64 = uint64(0)
self.harvester_cache: Dict[str, Dict[str, HarvesterCacheEntry]] = {}
async def ensure_keychain_proxy(self) -> KeychainProxy:
if not self.keychain_proxy:
if self.local_keychain:
self.keychain_proxy = wrap_local_keychain(self.local_keychain, log=self.log)
else:
self.keychain_proxy = await connect_to_keychain_and_validate(self._root_path, self.log)
if not self.keychain_proxy:
raise KeychainProxyConnectionFailure("Failed to connect to keychain service")
return self.keychain_proxy
async def get_all_private_keys(self):
keychain_proxy = await self.ensure_keychain_proxy()
return await keychain_proxy.get_all_private_keys()
async def setup_keys(self) -> bool:
no_keys_error_str = "No keys exist. Please run 'chia keys generate' or open the UI."
self.all_root_sks: List[PrivateKey] = [sk for sk, _ in await self.get_all_private_keys()]
self._private_keys = [master_sk_to_farmer_sk(sk) for sk in self.all_root_sks] + [
master_sk_to_pool_sk(sk) for sk in self.all_root_sks
]
if len(self.get_public_keys()) == 0:
log.warning(no_keys_error_str)
return False
config = load_config(self._root_path, "config.yaml")
if "xch_target_address" not in self.config:
self.config = config["farmer"]
if "xch_target_address" not in self.pool_config:
self.pool_config = config["pool"]
if "xch_target_address" not in self.config or "xch_target_address" not in self.pool_config:
log.debug("xch_target_address missing in the config")
return False
# This is the farmer configuration
self.farmer_target_encoded = self.config["xch_target_address"]
self.farmer_target = decode_puzzle_hash(self.farmer_target_encoded)
self.pool_public_keys = [G1Element.from_bytes(bytes.fromhex(pk)) for pk in self.config["pool_public_keys"]]
# This is the self pooling configuration, which is only used for original self-pooled plots
self.pool_target_encoded = self.pool_config["xch_target_address"]
self.pool_target = decode_puzzle_hash(self.pool_target_encoded)
self.pool_sks_map: Dict = {}
for key in self.get_private_keys():
self.pool_sks_map[bytes(key.get_g1())] = key
assert len(self.farmer_target) == 32
assert len(self.pool_target) == 32
if len(self.pool_sks_map) == 0:
log.warning(no_keys_error_str)
return False
return True
async def _start(self):
async def start_task():
# `Farmer.setup_keys` returns `False` if there are no keys setup yet. In this case we just try until it
# succeeds or until we need to shut down.
while not self._shut_down:
if await self.setup_keys():
self.update_pool_state_task = asyncio.create_task(self._periodically_update_pool_state_task())
self.cache_clear_task = asyncio.create_task(self._periodically_clear_cache_and_refresh_task())
log.debug("start_task: initialized")
self.started = True
return
await asyncio.sleep(1)
asyncio.create_task(start_task())
def _close(self):
self._shut_down = True
async def _await_closed(self):
if self.cache_clear_task is not None:
await self.cache_clear_task
if self.update_pool_state_task is not None:
await self.update_pool_state_task
self.started = False
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
async def on_connect(self, peer: WSChiaConnection):
self.state_changed("add_connection", {})
async def handshake_task():
# Wait until the task in `Farmer._start` is done so that we have keys available for the handshake. Bail out
# early if we need to shut down or if the harvester is not longer connected.
while not self.started and not self._shut_down and peer in self.server.get_connections():
await asyncio.sleep(1)
if self._shut_down:
log.debug("handshake_task: shutdown")
self.harvester_handshake_task = None
return
if peer not in self.server.get_connections():
log.debug("handshake_task: disconnected")
self.harvester_handshake_task = None
return
# Sends a handshake to the harvester
handshake = harvester_protocol.HarvesterHandshake(
self.get_public_keys(),
self.pool_public_keys,
)
msg = make_msg(ProtocolMessageTypes.harvester_handshake, handshake)
await peer.send_message(msg)
self.harvester_handshake_task = None
if peer.connection_type is NodeType.HARVESTER:
self.harvester_handshake_task = asyncio.create_task(handshake_task())
def set_server(self, server):
self.server = server
def state_changed(self, change: str, data: Dict[str, Any]):
if self.state_changed_callback is not None:
self.state_changed_callback(change, data)
def handle_failed_pool_response(self, p2_singleton_puzzle_hash: bytes32, error_message: str):
self.log.error(error_message)
self.pool_state[p2_singleton_puzzle_hash]["pool_errors_24h"].append(
ErrorResponse(uint16(PoolErrorCode.REQUEST_FAILED.value), error_message).to_json_dict()
)
def on_disconnect(self, connection: ws.WSChiaConnection):
self.log.info(f"peer disconnected {connection.get_peer_logging()}")
self.state_changed("close_connection", {})
async def _pool_get_pool_info(self, pool_config: PoolWalletConfig) -> Optional[Dict]:
try:
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(
f"{pool_config.pool_url}/pool_info", ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log)
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"GET /pool_info response: {response}")
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in GET /pool_info {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in GET /pool_info {pool_config.pool_url}, {e}"
)
return None
async def _pool_get_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, authentication_sk: PrivateKey
) -> Optional[Dict]:
authentication_token = get_current_authentication_token(authentication_token_timeout)
message: bytes32 = std_hash(
AuthenticationPayload(
"get_farmer", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token
)
)
signature: G2Element = AugSchemeMPL.sign(authentication_sk, message)
get_farmer_params = {
"launcher_id": pool_config.launcher_id.hex(),
"authentication_token": authentication_token,
"signature": bytes(signature).hex(),
}
try:
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(
f"{pool_config.pool_url}/farmer",
params=get_farmer_params,
ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log),
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
log_level = logging.INFO
if "error_code" in response:
log_level = logging.WARNING
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
self.log.log(log_level, f"GET /farmer response: {response}")
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in GET /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in GET /farmer {pool_config.pool_url}, {e}"
)
return None
async def _pool_post_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey
) -> Optional[Dict]:
auth_sk: Optional[PrivateKey] = self.get_authentication_sk(pool_config)
assert auth_sk is not None
post_farmer_payload: PostFarmerPayload = PostFarmerPayload(
pool_config.launcher_id,
get_current_authentication_token(authentication_token_timeout),
auth_sk.get_g1(),
pool_config.payout_instructions,
None,
)
assert owner_sk.get_g1() == pool_config.owner_public_key
signature: G2Element = AugSchemeMPL.sign(owner_sk, post_farmer_payload.get_hash())
post_farmer_request = PostFarmerRequest(post_farmer_payload, signature)
self.log.debug(f"POST /farmer request {post_farmer_request}")
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{pool_config.pool_url}/farmer",
json=post_farmer_request.to_json_dict(),
ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log),
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
log_level = logging.INFO
if "error_code" in response:
log_level = logging.WARNING
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
self.log.log(log_level, f"POST /farmer response: {response}")
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in POST /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in POST /farmer {pool_config.pool_url}, {e}"
)
return None
async def _pool_put_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey
) -> None:
auth_sk: Optional[PrivateKey] = self.get_authentication_sk(pool_config)
assert auth_sk is not None
put_farmer_payload: PutFarmerPayload = PutFarmerPayload(
pool_config.launcher_id,
get_current_authentication_token(authentication_token_timeout),
auth_sk.get_g1(),
pool_config.payout_instructions,
None,
)
assert owner_sk.get_g1() == pool_config.owner_public_key
signature: G2Element = AugSchemeMPL.sign(owner_sk, put_farmer_payload.get_hash())
put_farmer_request = PutFarmerRequest(put_farmer_payload, signature)
self.log.debug(f"PUT /farmer request {put_farmer_request}")
try:
async with aiohttp.ClientSession() as session:
async with session.put(
f"{pool_config.pool_url}/farmer",
json=put_farmer_request.to_json_dict(),
ssl=ssl_context_for_root(get_mozilla_ca_crt(), log=self.log),
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
log_level = logging.INFO
if "error_code" in response:
log_level = logging.WARNING
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
self.log.log(log_level, f"PUT /farmer response: {response}")
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in PUT /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in PUT /farmer {pool_config.pool_url}, {e}"
)
def get_authentication_sk(self, pool_config: PoolWalletConfig) -> Optional[PrivateKey]:
if pool_config.p2_singleton_puzzle_hash in self.authentication_keys:
return self.authentication_keys[pool_config.p2_singleton_puzzle_hash]
auth_sk: Optional[PrivateKey] = find_authentication_sk(self.all_root_sks, pool_config.owner_public_key)
if auth_sk is not None:
self.authentication_keys[pool_config.p2_singleton_puzzle_hash] = auth_sk
return auth_sk
async def update_pool_state(self):
config = load_config(self._root_path, "config.yaml")
pool_config_list: List[PoolWalletConfig] = load_pool_config(self._root_path)
for pool_config in pool_config_list:
p2_singleton_puzzle_hash = pool_config.p2_singleton_puzzle_hash
try:
authentication_sk: Optional[PrivateKey] = self.get_authentication_sk(pool_config)
if authentication_sk is None:
self.log.error(f"Could not find authentication sk for {p2_singleton_puzzle_hash}")
continue
add_auth_key(self._root_path, pool_config, authentication_sk.get_g1())
if p2_singleton_puzzle_hash not in self.pool_state:
self.pool_state[p2_singleton_puzzle_hash] = {
"points_found_since_start": 0,
"points_found_24h": [],
"points_acknowledged_since_start": 0,
"points_acknowledged_24h": [],
"next_farmer_update": 0,
"next_pool_info_update": 0,
"current_points": 0,
"current_difficulty": None,
"pool_errors_24h": [],
"authentication_token_timeout": None,
}
self.log.info(f"Added pool: {pool_config}")
pool_state = self.pool_state[p2_singleton_puzzle_hash]
pool_state["pool_config"] = pool_config
# Skip state update when self pooling
if pool_config.pool_url == "":
continue
enforce_https = config["full_node"]["selected_network"] == "mainnet"
if enforce_https and not pool_config.pool_url.startswith("https://"):
self.log.error(f"Pool URLs must be HTTPS on mainnet {pool_config.pool_url}")
continue
# TODO: Improve error handling below, inform about unexpected failures
if time.time() >= pool_state["next_pool_info_update"]:
pool_state["next_pool_info_update"] = time.time() + UPDATE_POOL_INFO_INTERVAL
# Makes a GET request to the pool to get the updated information
pool_info = await self._pool_get_pool_info(pool_config)
if pool_info is not None and "error_code" not in pool_info:
pool_state["authentication_token_timeout"] = pool_info["authentication_token_timeout"]
# Only update the first time from GET /pool_info, gets updated from GET /farmer later
if pool_state["current_difficulty"] is None:
pool_state["current_difficulty"] = pool_info["minimum_difficulty"]
if time.time() >= pool_state["next_farmer_update"]:
pool_state["next_farmer_update"] = time.time() + UPDATE_POOL_FARMER_INFO_INTERVAL
authentication_token_timeout = pool_state["authentication_token_timeout"]
async def update_pool_farmer_info() -> Tuple[Optional[GetFarmerResponse], Optional[PoolErrorCode]]:
# Run a GET /farmer to see if the farmer is already known by the pool
response = await self._pool_get_farmer(
pool_config, authentication_token_timeout, authentication_sk
)
farmer_response: Optional[GetFarmerResponse] = None
error_code_response: Optional[PoolErrorCode] = None
if response is not None:
if "error_code" not in response:
farmer_response = GetFarmerResponse.from_json_dict(response)
if farmer_response is not None:
pool_state["current_difficulty"] = farmer_response.current_difficulty
pool_state["current_points"] = farmer_response.current_points
else:
try:
error_code_response = PoolErrorCode(response["error_code"])
except ValueError:
self.log.error(
f"Invalid error code received from the pool: {response['error_code']}"
)
return farmer_response, error_code_response
if authentication_token_timeout is not None:
farmer_info, error_code = await update_pool_farmer_info()
if error_code == PoolErrorCode.FARMER_NOT_KNOWN:
# Make the farmer known on the pool with a POST /farmer
owner_sk_and_index: Optional[PrivateKey, uint32] = find_owner_sk(
self.all_root_sks, pool_config.owner_public_key
)
assert owner_sk_and_index is not None
post_response = await self._pool_post_farmer(
pool_config, authentication_token_timeout, owner_sk_and_index[0]
)
if post_response is not None and "error_code" not in post_response:
self.log.info(
f"Welcome message from {pool_config.pool_url}: "
f"{post_response['welcome_message']}"
)
# Now we should be able to update the local farmer info
farmer_info, farmer_is_known = await update_pool_farmer_info()
if farmer_info is None and not farmer_is_known:
self.log.error("Failed to update farmer info after POST /farmer.")
# Update the farmer information on the pool if the payout instructions changed or if the
# signature is invalid (latter to make sure the pool has the correct authentication public key).
payout_instructions_update_required: bool = (
farmer_info is not None
and pool_config.payout_instructions.lower() != farmer_info.payout_instructions.lower()
)
if payout_instructions_update_required or error_code == PoolErrorCode.INVALID_SIGNATURE:
owner_sk_and_index: Optional[PrivateKey, uint32] = find_owner_sk(
self.all_root_sks, pool_config.owner_public_key
)
assert owner_sk_and_index is not None
await self._pool_put_farmer(
pool_config, authentication_token_timeout, owner_sk_and_index[0]
)
else:
self.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Exception in update_pool_state for {pool_config.pool_url}, {e} {tb}")
def get_public_keys(self):
return [child_sk.get_g1() for child_sk in self._private_keys]
def get_private_keys(self):
return self._private_keys
async def get_reward_targets(self, search_for_private_key: bool) -> Dict:
if search_for_private_key:
all_sks = await self.get_all_private_keys()
stop_searching_for_farmer, stop_searching_for_pool = False, False
for i in range(500):
if stop_searching_for_farmer and stop_searching_for_pool and i > 0:
break
for sk, _ in all_sks:
ph = create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1())
if ph == self.farmer_target:
stop_searching_for_farmer = True
if ph == self.pool_target:
stop_searching_for_pool = True
return {
"farmer_target": self.farmer_target_encoded,
"pool_target": self.pool_target_encoded,
"have_farmer_sk": stop_searching_for_farmer,
"have_pool_sk": stop_searching_for_pool,
}
return {
"farmer_target": self.farmer_target_encoded,
"pool_target": self.pool_target_encoded,
}
def set_reward_targets(self, farmer_target_encoded: Optional[str], pool_target_encoded: Optional[str]):
with get_config_lock(self._root_path, "config.yaml"):
config = load_config(self._root_path, "config.yaml", acquire_lock=False)
if farmer_target_encoded is not None:
self.farmer_target_encoded = farmer_target_encoded
self.farmer_target = decode_puzzle_hash(farmer_target_encoded)
config["farmer"]["xch_target_address"] = farmer_target_encoded
if pool_target_encoded is not None:
self.pool_target_encoded = pool_target_encoded
self.pool_target = decode_puzzle_hash(pool_target_encoded)
config["pool"]["xch_target_address"] = pool_target_encoded
save_config(self._root_path, "config.yaml", config)
async def set_payout_instructions(self, launcher_id: bytes32, payout_instructions: str):
for p2_singleton_puzzle_hash, pool_state_dict in self.pool_state.items():
if launcher_id == pool_state_dict["pool_config"].launcher_id:
with get_config_lock(self._root_path, "config.yaml"):
config = load_config(self._root_path, "config.yaml", acquire_lock=False)
new_list = []
pool_list = config["pool"].get("pool_list", [])
if pool_list is not None:
for list_element in pool_list:
if hexstr_to_bytes(list_element["launcher_id"]) == bytes(launcher_id):
list_element["payout_instructions"] = payout_instructions
new_list.append(list_element)
config["pool"]["pool_list"] = new_list
save_config(self._root_path, "config.yaml", config)
# Force a GET /farmer which triggers the PUT /farmer if it detects the changed instructions
pool_state_dict["next_farmer_update"] = 0
return
self.log.warning(f"Launcher id: {launcher_id} not found")
async def generate_login_link(self, launcher_id: bytes32) -> Optional[str]:
for pool_state in self.pool_state.values():
pool_config: PoolWalletConfig = pool_state["pool_config"]
if pool_config.launcher_id == launcher_id:
authentication_sk: Optional[PrivateKey] = self.get_authentication_sk(pool_config)
if authentication_sk is None:
self.log.error(f"Could not find authentication sk for {pool_config.p2_singleton_puzzle_hash}")
continue
authentication_token_timeout = pool_state["authentication_token_timeout"]
authentication_token = get_current_authentication_token(authentication_token_timeout)
message: bytes32 = std_hash(
AuthenticationPayload(
"get_login", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token
)
)
signature: G2Element = AugSchemeMPL.sign(authentication_sk, message)
return (
pool_config.pool_url
+ f"/login?launcher_id={launcher_id.hex()}&authentication_token={authentication_token}"
f"&signature={bytes(signature).hex()}"
)
return None
async def update_cached_harvesters(self) -> bool:
# First remove outdated cache entries
self.log.debug(f"update_cached_harvesters cache entries: {len(self.harvester_cache)}")
remove_hosts = []
for host, host_cache in self.harvester_cache.items():
remove_peers = []
for peer_id, peer_cache in host_cache.items():
# If the peer cache is expired it means the harvester didn't respond for too long
if peer_cache.expired(self.update_harvester_cache_interval):
remove_peers.append(peer_id)
for key in remove_peers:
del host_cache[key]
if len(host_cache) == 0:
self.log.debug(f"update_cached_harvesters remove host: {host}")
remove_hosts.append(host)
for key in remove_hosts:
del self.harvester_cache[key]
# Now query each harvester and update caches
updated = False
for connection in self.server.get_connections(NodeType.HARVESTER):
cache_entry = await self.get_cached_harvesters(connection)
if cache_entry.needs_update(self.update_harvester_cache_interval):
self.log.debug(f"update_cached_harvesters update harvester: {connection.peer_node_id}")
cache_entry.bump_last_update()
response = await connection.request_plots(
harvester_protocol.RequestPlots(), timeout=self.update_harvester_cache_interval
)
if response is not None:
if isinstance(response, harvester_protocol.RespondPlots):
new_data: Dict = response.to_json_dict()
if cache_entry.data != new_data:
updated = True
self.log.debug(f"update_cached_harvesters cache updated: {connection.peer_node_id}")
else:
self.log.debug(f"update_cached_harvesters no changes for: {connection.peer_node_id}")
cache_entry.set_data(new_data)
else:
self.log.error(
f"Invalid response from harvester:"
f"peer_host {connection.peer_host}, peer_node_id {connection.peer_node_id}"
)
else:
self.log.error(
f"Harvester '{connection.peer_host}/{connection.peer_node_id}' did not respond: "
f"(version mismatch or time out {UPDATE_HARVESTER_CACHE_INTERVAL}s)"
)
return updated
async def get_cached_harvesters(self, connection: WSChiaConnection) -> HarvesterCacheEntry:
host_cache = self.harvester_cache.get(connection.peer_host)
if host_cache is None:
host_cache = {}
self.harvester_cache[connection.peer_host] = host_cache
node_cache = host_cache.get(connection.peer_node_id.hex())
if node_cache is None:
node_cache = HarvesterCacheEntry()
host_cache[connection.peer_node_id.hex()] = node_cache
return node_cache
async def get_harvesters(self) -> Dict:
harvesters: List = []
for connection in self.server.get_connections(NodeType.HARVESTER):
self.log.debug(f"get_harvesters host: {connection.peer_host}, node_id: {connection.peer_node_id}")
cache_entry = await self.get_cached_harvesters(connection)
if cache_entry.data is not None:
harvester_object: dict = dict(cache_entry.data)
harvester_object["connection"] = {
"node_id": connection.peer_node_id.hex(),
"host": connection.peer_host,
"port": connection.peer_port,
}
harvesters.append(harvester_object)
else:
self.log.debug(f"get_harvesters no cache: {connection.peer_host}, node_id: {connection.peer_node_id}")
return {"harvesters": harvesters}
async def _periodically_update_pool_state_task(self):
time_slept: uint64 = uint64(0)
config_path: Path = config_path_for_filename(self._root_path, "config.yaml")
while not self._shut_down:
# Every time the config file changes, read it to check the pool state
stat_info = config_path.stat()
if stat_info.st_mtime > self.last_config_access_time:
# If we detect the config file changed, refresh private keys first just in case
self.all_root_sks: List[PrivateKey] = [sk for sk, _ in await self.get_all_private_keys()]
self.last_config_access_time = stat_info.st_mtime
await self.update_pool_state()
time_slept = uint64(0)
elif time_slept > 60:
await self.update_pool_state()
time_slept = uint64(0)
time_slept += 1
await asyncio.sleep(1)
async def _periodically_clear_cache_and_refresh_task(self):
time_slept: uint64 = uint64(0)
refresh_slept = 0
while not self._shut_down:
try:
if time_slept > self.constants.SUB_SLOT_TIME_TARGET:
now = time.time()
removed_keys: List[bytes32] = []
for key, add_time in self.cache_add_time.items():
if now - float(add_time) > self.constants.SUB_SLOT_TIME_TARGET * 3:
self.sps.pop(key, None)
self.proofs_of_space.pop(key, None)
self.quality_str_to_identifiers.pop(key, None)
self.number_of_responses.pop(key, None)
removed_keys.append(key)
for key in removed_keys:
self.cache_add_time.pop(key, None)
time_slept = uint64(0)
log.debug(
f"Cleared farmer cache. Num sps: {len(self.sps)} {len(self.proofs_of_space)} "
f"{len(self.quality_str_to_identifiers)} {len(self.number_of_responses)}"
)
time_slept += 1
refresh_slept += 1
# Periodically refresh GUI to show the correct download/upload rate.
if refresh_slept >= 30:
self.state_changed("add_connection", {})
refresh_slept = 0
# Handles harvester plots cache cleanup and updates
if await self.update_cached_harvesters():
self.state_changed("new_plots", await self.get_harvesters())
except Exception:
log.error(f"_periodically_clear_cache_and_refresh_task failed: {traceback.format_exc()}")
await asyncio.sleep(1)
|
the-stack_0_19687 | from malaya.model.tf import TrueCase
from malaya.supervised import transformer as load_transformer
from malaya.supervised import t5 as t5_load
from malaya.model.t5 import TrueCase as T5_TrueCase
from herpetologist import check_type
_transformer_availability = {
'small': {
'Size (MB)': 42.7,
'Quantized Size (MB)': 13.1,
'CER': 0.0246012,
'Suggested length': 256,
},
'base': {
'Size (MB)': 234,
'Quantized Size (MB)': 63.8,
'CER': 0.0146193,
'Suggested length': 256,
},
'super-tiny-t5': {
'Size (MB)': 81.8,
'Quantized Size (MB)': 27.1,
'CER': 0.0254679,
'Suggested length': 256,
},
'super-super-tiny-t5': {
'Size (MB)': 39.6,
'Quantized Size (MB)': 12,
'CER': 0.02533658,
'Suggested length': 256,
},
'3x-super-tiny-t5': {
'Size (MB)': 18.3,
'Quantized Size (MB)': 4.46,
'CER': 0.0487372,
'Suggested length': 256,
},
'3x-super-tiny-t5-4k': {
'Size (MB)': 5.03,
'Quantized Size (MB)': 2.99,
'CER': 0.0798906,
'Suggested length': 256,
}
}
def available_transformer():
"""
List available transformer models.
"""
from malaya.function import describe_availability
return describe_availability(_transformer_availability)
@check_type
def transformer(model: str = 'base', quantized: bool = False, **kwargs):
"""
Load transformer encoder-decoder model to True Case.
Parameters
----------
model : str, optional (default='base')
Model architecture supported. Allowed values:
* ``'small'`` - Transformer SMALL parameters.
* ``'base'`` - Transformer BASE parameters.
* ``'super-tiny-t5'`` - T5 SUPER TINY parameters.
* ``'super-super-tiny-t5'`` - T5 SUPER SUPER TINY parameters.
* ``'3x-super-tiny-t5'`` - T5 3X SUPER TINY parameters.
* ``'3x-super-tiny-t5-4k'`` - T5 3X SUPER TINY 4k vocab size parameters.
quantized : bool, optional (default=False)
if True, will load 8-bit quantized model.
Quantized model not necessary faster, totally depends on the machine.
Returns
-------
result: malaya.model.tf.TrueCase class
"""
model = model.lower()
if model not in _transformer_availability:
raise ValueError(
'model not supported, please check supported models from `malaya.true_case.available_transformer()`.'
)
if 't5' in model:
return t5_load.load(
module='true-case',
model=model,
model_class=T5_TrueCase,
quantized=quantized,
**kwargs,
)
else:
return load_transformer.load(
module='true-case',
model=model,
encoder='yttm',
model_class=TrueCase,
quantized=quantized,
**kwargs,
)
|
the-stack_0_19689 | """
Utility functions to create MongoDB processes.
Handles all the nitty-gritty parameter conversion.
"""
import json
import os
import os.path
import stat
from . import process as _process
from .. import utils
from .. import config
def mongod_program(logger, executable=None, process_kwargs=None, **kwargs):
"""
Returns a Process instance that starts a mongod executable with
arguments constructed from 'kwargs'.
"""
executable = utils.default_if_none(executable, config.DEFAULT_MONGOD_EXECUTABLE)
args = [executable]
# Apply the --setParameter command line argument. Command line options to resmoke.py override
# the YAML configuration.
suite_set_parameters = kwargs.pop("set_parameters", {})
if config.MONGOD_SET_PARAMETERS is not None:
suite_set_parameters.update(utils.load_yaml(config.MONGOD_SET_PARAMETERS))
_apply_set_parameters(args, suite_set_parameters)
shortcut_opts = {
"nojournal": config.NO_JOURNAL,
"storageEngine": config.STORAGE_ENGINE,
"wiredTigerCollectionConfigString": config.WT_COLL_CONFIG,
"wiredTigerEngineConfigString": config.WT_ENGINE_CONFIG,
"wiredTigerIndexConfigString": config.WT_INDEX_CONFIG,
}
# These options are just flags, so they should not take a value.
opts_without_vals = ("nojournal")
# Have the --nojournal command line argument to resmoke.py unset the journal option.
if shortcut_opts["nojournal"] and "journal" in kwargs:
del kwargs["journal"]
# Ensure that config servers run with journaling enabled.
if "configsvr" in kwargs:
shortcut_opts["nojournal"] = False
kwargs["journal"] = ""
# Command line options override the YAML configuration.
for opt_name in shortcut_opts:
opt_value = shortcut_opts[opt_name]
if opt_name in opts_without_vals:
# Options that are specified as --flag on the command line are represented by a boolean
# value where True indicates that the flag should be included in 'kwargs'.
if opt_value:
kwargs[opt_name] = ""
else:
# Options that are specified as --key=value on the command line are represented by a
# value where None indicates that the key-value pair shouldn't be included in 'kwargs'.
if opt_value is not None:
kwargs[opt_name] = opt_value
# Override the storage engine specified on the command line with "wiredTiger" if running a
# config server replica set.
if "replSet" in kwargs and "configsvr" in kwargs:
kwargs["storageEngine"] = "wiredTiger"
# Apply the rest of the command line arguments.
_apply_kwargs(args, kwargs)
_set_keyfile_permissions(kwargs)
process_kwargs = utils.default_if_none(process_kwargs, {})
return _process.Process(logger, args, **process_kwargs)
def mongos_program(logger, executable=None, process_kwargs=None, **kwargs):
"""
Returns a Process instance that starts a mongos executable with
arguments constructed from 'kwargs'.
"""
executable = utils.default_if_none(executable, config.DEFAULT_MONGOS_EXECUTABLE)
args = [executable]
# Apply the --setParameter command line argument. Command line options to resmoke.py override
# the YAML configuration.
suite_set_parameters = kwargs.pop("set_parameters", {})
if config.MONGOS_SET_PARAMETERS is not None:
suite_set_parameters.update(utils.load_yaml(config.MONGOS_SET_PARAMETERS))
_apply_set_parameters(args, suite_set_parameters)
# Apply the rest of the command line arguments.
_apply_kwargs(args, kwargs)
_set_keyfile_permissions(kwargs)
process_kwargs = utils.default_if_none(process_kwargs, {})
return _process.Process(logger, args, **process_kwargs)
def mongo_shell_program(logger, executable=None, filename=None, process_kwargs=None, **kwargs):
"""
Returns a Process instance that starts a mongo shell with arguments
constructed from 'kwargs'.
"""
executable = utils.default_if_none(executable, config.DEFAULT_MONGO_EXECUTABLE)
args = [executable]
eval_sb = [] # String builder.
global_vars = kwargs.pop("global_vars", {}).copy()
shortcut_opts = {
"noJournal": (config.NO_JOURNAL, False),
"noJournalPrealloc": (config.NO_PREALLOC_JOURNAL, False),
"storageEngine": (config.STORAGE_ENGINE, ""),
"testName": (os.path.splitext(os.path.basename(filename))[0], ""),
"wiredTigerCollectionConfigString": (config.WT_COLL_CONFIG, ""),
"wiredTigerEngineConfigString": (config.WT_ENGINE_CONFIG, ""),
"wiredTigerIndexConfigString": (config.WT_INDEX_CONFIG, ""),
}
test_data = global_vars.get("TestData", {}).copy()
for opt_name in shortcut_opts:
(opt_value, opt_default) = shortcut_opts[opt_name]
if opt_value is not None:
test_data[opt_name] = opt_value
elif opt_name not in test_data:
# Only use 'opt_default' if the property wasn't set in the YAML configuration.
test_data[opt_name] = opt_default
global_vars["TestData"] = test_data
# Pass setParameters for mongos and mongod through TestData. The setParameter parsing in
# servers.js is very primitive (just splits on commas), so this may break for non-scalar
# setParameter values.
if config.MONGOD_SET_PARAMETERS is not None:
if "setParameters" in test_data:
raise ValueError("setParameters passed via TestData can only be set from either the"
" command line or the suite YAML, not both")
mongod_set_parameters = utils.load_yaml(config.MONGOD_SET_PARAMETERS)
test_data["setParameters"] = _format_test_data_set_parameters(mongod_set_parameters)
if config.MONGOS_SET_PARAMETERS is not None:
if "setParametersMongos" in test_data:
raise ValueError("setParametersMongos passed via TestData can only be set from either"
" the command line or the suite YAML, not both")
mongos_set_parameters = utils.load_yaml(config.MONGOS_SET_PARAMETERS)
test_data["setParametersMongos"] = _format_test_data_set_parameters(mongos_set_parameters)
if "eval_prepend" in kwargs:
eval_sb.append(str(kwargs.pop("eval_prepend")))
for var_name in global_vars:
_format_shell_vars(eval_sb, var_name, global_vars[var_name])
if "eval" in kwargs:
eval_sb.append(str(kwargs.pop("eval")))
eval_str = "; ".join(eval_sb)
args.append("--eval")
args.append(eval_str)
if config.SHELL_READ_MODE is not None:
kwargs["readMode"] = config.SHELL_READ_MODE
if config.SHELL_WRITE_MODE is not None:
kwargs["writeMode"] = config.SHELL_WRITE_MODE
# Apply the rest of the command line arguments.
_apply_kwargs(args, kwargs)
# Have the mongos shell run the specified file.
args.append(filename)
_set_keyfile_permissions(test_data)
process_kwargs = utils.default_if_none(process_kwargs, {})
return _process.Process(logger, args, **process_kwargs)
def _format_shell_vars(sb, path, value):
"""
Formats 'value' in a way that can be passed to --eval.
If 'value' is a dictionary, then it is unrolled into the creation of
a new JSON object with properties assigned for each key of the
dictionary.
"""
# Only need to do special handling for JSON objects.
if not isinstance(value, dict):
sb.append("%s = %s" % (path, json.dumps(value)))
return
# Avoid including curly braces and colons in output so that the command invocation can be
# copied and run through bash.
sb.append("%s = new Object()" % (path))
for subkey in value:
_format_shell_vars(sb, ".".join((path, subkey)), value[subkey])
def dbtest_program(logger, executable=None, suites=None, process_kwargs=None, **kwargs):
"""
Returns a Process instance that starts a dbtest executable with
arguments constructed from 'kwargs'.
"""
executable = utils.default_if_none(executable, config.DEFAULT_DBTEST_EXECUTABLE)
args = [executable]
if suites is not None:
args.extend(suites)
if config.STORAGE_ENGINE is not None:
kwargs["storageEngine"] = config.STORAGE_ENGINE
return generic_program(logger, args, process_kwargs=process_kwargs, **kwargs)
def generic_program(logger, args, process_kwargs=None, **kwargs):
"""
Returns a Process instance that starts an arbitrary executable with
arguments constructed from 'kwargs'. The args parameter is an array
of strings containing the command to execute.
"""
if not utils.is_string_list(args):
raise ValueError("The args parameter must be a list of command arguments")
_apply_kwargs(args, kwargs)
process_kwargs = utils.default_if_none(process_kwargs, {})
return _process.Process(logger, args, **process_kwargs)
def _format_test_data_set_parameters(set_parameters):
"""
Converts key-value pairs from 'set_parameters' into the comma
delimited list format expected by the parser in servers.js.
WARNING: the parsing logic in servers.js is very primitive.
Non-scalar options such as logComponentVerbosity will not work
correctly.
"""
params = []
for param_name in set_parameters:
param_value = set_parameters[param_name]
if isinstance(param_value, bool):
# Boolean valued setParameters are specified as lowercase strings.
param_value = "true" if param_value else "false"
elif isinstance(param_value, dict):
raise TypeError("Non-scalar setParameter values are not currently supported.")
params.append("%s=%s" % (param_name, param_value))
return ",".join(params)
def _apply_set_parameters(args, set_parameter):
"""
Converts key-value pairs from 'kwargs' into --setParameter key=value
arguments to an executable and appends them to 'args'.
"""
for param_name in set_parameter:
param_value = set_parameter[param_name]
# --setParameter takes boolean values as lowercase strings.
if isinstance(param_value, bool):
param_value = "true" if param_value else "false"
args.append("--setParameter")
args.append("%s=%s" % (param_name, param_value))
def _apply_kwargs(args, kwargs):
"""
Converts key-value pairs from 'kwargs' into --key value arguments
to an executable and appends them to 'args'.
A --flag without a value is represented with the empty string.
"""
for arg_name in kwargs:
arg_value = str(kwargs[arg_name])
args.append("--%s" % (arg_name))
if arg_value:
args.append(arg_value)
def _set_keyfile_permissions(opts):
"""
Change the permissions of keyfiles in 'opts' to 600, i.e. only the
user can read and write the file.
This necessary to avoid having the mongod/mongos fail to start up
because "permissions on the keyfiles are too open".
We can't permanently set the keyfile permissions because git is not
aware of them.
"""
if "keyFile" in opts:
os.chmod(opts["keyFile"], stat.S_IRUSR | stat.S_IWUSR)
if "encryptionKeyFile" in opts:
os.chmod(opts["encryptionKeyFile"], stat.S_IRUSR | stat.S_IWUSR)
|
the-stack_0_19695 | import asyncio
import datetime
import aiocometd
import requests
from aiocometd import ConnectionType
from tastyscrape import dxfeed
from tastyscrape.dxfeed import mapper as dxfeed_mapper
from tastyscrape.bases.session import TastyAPISession
class DataStreamer(object):
def __init__(self, session: TastyAPISession):
if not session.is_active():
raise Exception('Tastyworks API session not active/valid')
self.tasty_session = session
self.cometd_client = None
self.subs = {}
asyncio.get_event_loop().run_until_complete(
self._setup_connection()
)
def __del__(self):
asyncio.get_event_loop().run_until_complete(
self.cometd_client.close()
)
async def _cometd_close(self):
await self.cometd_client.close()
async def add_data_sub(self, values):
await self._send_msg(dxfeed.SUBSCRIPTION_CHANNEL, {'add': values})
async def remove_data_sub(self, values):
# NOTE: Experimental, unconfirmed. Needs testing
await self._send_msg(dxfeed.SUBSCRIPTION_CHANNEL, {'remove': values})
async def _consumer(self, message):
return dxfeed_mapper.map_message(message)
async def _send_msg(self, channel, message):
if not self.logged_in:
raise Exception('Connection not made or logged in')
await self.cometd_client.publish(channel, message)
async def reset_data_subs(self):
await self._send_msg(dxfeed.SUBSCRIPTION_CHANNEL, {'reset': True})
def get_streamer_token(self):
return self._get_streamer_data()['data']['token']
def _get_streamer_data(self):
if not self.tasty_session.logged_in:
raise Exception('Logged in session required')
if hasattr(self, 'streamer_data_created') and (datetime.datetime.now() - self.streamer_data_created).total_seconds() < 60:
return self.streamer_data
resp = requests.get(f'{self.tasty_session.API_url}/quote-streamer-tokens', headers=self.tasty_session.get_request_headers())
if resp.status_code != 200:
raise Exception('Could not get quote streamer data, error message: {}'.format(
resp.json()['error']['message']
))
self.streamer_data = resp.json()
self.streamer_data_created = datetime.datetime.now()
return resp.json()
def _get_streamer_websocket_url(self):
socket_url = self._get_streamer_data()['data']['websocket-url']
full_url = '{}/cometd'.format(socket_url)
return full_url
async def _setup_connection(self):
aiocometd.client.DEFAULT_CONNECTION_TYPE = ConnectionType.WEBSOCKET
streamer_url = self._get_streamer_websocket_url()
auth_extension = AuthExtension(self.get_streamer_token())
cometd_client = aiocometd.Client(
streamer_url,
auth=auth_extension,
)
await cometd_client.open()
await cometd_client.subscribe(dxfeed.DATA_CHANNEL)
self.cometd_client = cometd_client
self.logged_in = True
await self.reset_data_subs()
async def listen(self):
async for msg in self.cometd_client:
if msg['channel'] != dxfeed.DATA_CHANNEL:
continue
yield await self._consumer(msg['data'])
class AuthExtension(aiocometd.AuthExtension):
def __init__(self, streamer_token: str):
self.streamer_token = streamer_token
def _get_login_msg(self):
return {'ext': {'com.devexperts.auth.AuthToken': f'{self.streamer_token}'}}
def _get_advice_msg(self):
return {
'timeout': 60 * 1000,
'interval': 0
}
async def incoming(self, payload, headers=None):
pass
async def outgoing(self, payload, headers=None):
for entry in payload:
if 'clientId' not in entry:
entry.update(self._get_login_msg())
async def authenticate(self):
pass
|
the-stack_0_19696 | r"""
Interface to Singular
AUTHORS:
- David Joyner and William Stein (2005): first version
- Martin Albrecht (2006-03-05): code so singular.[tab] and x =
singular(...), x.[tab] includes all singular commands.
- Martin Albrecht (2006-03-06): This patch adds the equality symbol to
singular. Also fix a problem in which " " as prompt means comparison
will break all further communication with Singular.
- Martin Albrecht (2006-03-13): added current_ring() and
current_ring_name()
- William Stein (2006-04-10): Fixed problems with ideal constructor
- Martin Albrecht (2006-05-18): added sage_poly.
- Simon King (2010-11-23): Reduce the overhead caused by waiting for
the Singular prompt by doing garbage collection differently.
- Simon King (2011-06-06): Make conversion from Singular to Sage more flexible.
- Simon King (2015): Extend pickling capabilities.
Introduction
------------
This interface is extremely flexible, since it's exactly like
typing into the Singular interpreter, and anything that works there
should work here.
The Singular interface will only work if Singular is installed on
your computer; this should be the case, since Singular is included
with Sage. The interface offers three pieces of functionality:
#. ``singular_console()`` - A function that dumps you
into an interactive command-line Singular session.
#. ``singular(expr, type='def')`` - Creation of a
Singular object. This provides a Pythonic interface to Singular.
For example, if ``f=singular(10)``, then
``f.factorize()`` returns the factorization of
`10` computed using Singular.
#. ``singular.eval(expr)`` - Evaluation of arbitrary
Singular expressions, with the result returned as a string.
Of course, there are polynomial rings and ideals in Sage as well
(often based on a C-library interface to Singular). One can convert
an object in the Singular interpreter interface to Sage by the
method ``sage()``.
Tutorial
--------
EXAMPLES: First we illustrate multivariate polynomial
factorization::
sage: R1 = singular.ring(0, '(x,y)', 'dp')
sage: R1
polynomial ring, over a field, global ordering
// coefficients: QQ
// number of vars : 2
// block 1 : ordering dp
// : names x y
// block 2 : ordering C
sage: f = singular('9x16 - 18x13y2 - 9x12y3 + 9x10y4 - 18x11y2 + 36x8y4 + 18x7y5 - 18x5y6 + 9x6y4 - 18x3y6 - 9x2y7 + 9y8')
sage: f
9*x^16-18*x^13*y^2-9*x^12*y^3+9*x^10*y^4-18*x^11*y^2+36*x^8*y^4+18*x^7*y^5-18*x^5*y^6+9*x^6*y^4-18*x^3*y^6-9*x^2*y^7+9*y^8
sage: f.parent()
Singular
::
sage: F = f.factorize(); F
[1]:
_[1]=9
_[2]=x^6-2*x^3*y^2-x^2*y^3+y^4
_[3]=-x^5+y^2
[2]:
1,1,2
::
sage: F[1]
9,
x^6-2*x^3*y^2-x^2*y^3+y^4,
-x^5+y^2
sage: F[1][2]
x^6-2*x^3*y^2-x^2*y^3+y^4
We can convert `f` and each exponent back to Sage objects
as well.
::
sage: g = f.sage(); g
9*x^16 - 18*x^13*y^2 - 9*x^12*y^3 + 9*x^10*y^4 - 18*x^11*y^2 + 36*x^8*y^4 + 18*x^7*y^5 - 18*x^5*y^6 + 9*x^6*y^4 - 18*x^3*y^6 - 9*x^2*y^7 + 9*y^8
sage: F[1][2].sage()
x^6 - 2*x^3*y^2 - x^2*y^3 + y^4
sage: g.parent()
Multivariate Polynomial Ring in x, y over Rational Field
This example illustrates polynomial GCD's::
sage: R2 = singular.ring(0, '(x,y,z)', 'lp')
sage: a = singular.new('3x2*(x+y)')
sage: b = singular.new('9x*(y2-x2)')
sage: g = a.gcd(b)
sage: g
x^2+x*y
This example illustrates computation of a Groebner basis::
sage: R3 = singular.ring(0, '(a,b,c,d)', 'lp')
sage: I = singular.ideal(['a + b + c + d', 'a*b + a*d + b*c + c*d', 'a*b*c + a*b*d + a*c*d + b*c*d', 'a*b*c*d - 1'])
sage: I2 = I.groebner()
sage: I2
c^2*d^6-c^2*d^2-d^4+1,
c^3*d^2+c^2*d^3-c-d,
b*d^4-b+d^5-d,
b*c-b*d^5+c^2*d^4+c*d-d^6-d^2,
b^2+2*b*d+d^2,
a+b+c+d
The following example is the same as the one in the Singular - Gap
interface documentation::
sage: R = singular.ring(0, '(x0,x1,x2)', 'lp')
sage: I1 = singular.ideal(['x0*x1*x2 -x0^2*x2', 'x0^2*x1*x2-x0*x1^2*x2-x0*x1*x2^2', 'x0*x1-x0*x2-x1*x2'])
sage: I2 = I1.groebner()
sage: I2
x1^2*x2^2,
x0*x2^3-x1^2*x2^2+x1*x2^3,
x0*x1-x0*x2-x1*x2,
x0^2*x2-x0*x2^2-x1*x2^2
sage: I2.sage()
Ideal (x1^2*x2^2, x0*x2^3 - x1^2*x2^2 + x1*x2^3, x0*x1 - x0*x2 - x1*x2, x0^2*x2 - x0*x2^2 - x1*x2^2) of Multivariate Polynomial Ring in x0, x1, x2 over Rational Field
This example illustrates moving a polynomial from one ring to
another. It also illustrates calling a method of an object with an
argument.
::
sage: R = singular.ring(0, '(x,y,z)', 'dp')
sage: f = singular('x3+y3+(x-y)*x2y2+z2')
sage: f
x^3*y^2-x^2*y^3+x^3+y^3+z^2
sage: R1 = singular.ring(0, '(x,y,z)', 'ds')
sage: f = R.fetch(f)
sage: f
z^2+x^3+y^3+x^3*y^2-x^2*y^3
We can calculate the Milnor number of `f`::
sage: _=singular.LIB('sing.lib') # assign to _ to suppress printing
sage: f.milnor()
4
The Jacobian applied twice yields the Hessian matrix of
`f`, with which we can compute.
::
sage: H = f.jacob().jacob()
sage: H
6*x+6*x*y^2-2*y^3,6*x^2*y-6*x*y^2, 0,
6*x^2*y-6*x*y^2, 6*y+2*x^3-6*x^2*y,0,
0, 0, 2
sage: H.sage()
[6*x + 6*x*y^2 - 2*y^3 6*x^2*y - 6*x*y^2 0]
[ 6*x^2*y - 6*x*y^2 6*y + 2*x^3 - 6*x^2*y 0]
[ 0 0 2]
sage: H.det() # This is a polynomial in Singular
72*x*y+24*x^4-72*x^3*y+72*x*y^3-24*y^4-48*x^4*y^2+64*x^3*y^3-48*x^2*y^4
sage: H.det().sage() # This is the corresponding polynomial in Sage
72*x*y + 24*x^4 - 72*x^3*y + 72*x*y^3 - 24*y^4 - 48*x^4*y^2 + 64*x^3*y^3 - 48*x^2*y^4
The 1x1 and 2x2 minors::
sage: H.minor(1)
2,
6*y+2*x^3-6*x^2*y,
6*x^2*y-6*x*y^2,
6*x^2*y-6*x*y^2,
6*x+6*x*y^2-2*y^3,
0,
0,
0,
0
sage: H.minor(2)
12*y+4*x^3-12*x^2*y,
12*x^2*y-12*x*y^2,
12*x^2*y-12*x*y^2,
12*x+12*x*y^2-4*y^3,
-36*x*y-12*x^4+36*x^3*y-36*x*y^3+12*y^4+24*x^4*y^2-32*x^3*y^3+24*x^2*y^4,
0,
0,
0,
0
::
sage: _=singular.eval('option(redSB)')
sage: H.minor(1).groebner()
1
Computing the Genus
-------------------
We compute the projective genus of ideals that define curves over
`\QQ`. It is *very important* to load the
``normal.lib`` library before calling the
``genus`` command, or you'll get an error message.
EXAMPLES::
sage: singular.lib('normal.lib')
sage: R = singular.ring(0,'(x,y)','dp')
sage: i2 = singular.ideal('y9 - x2*(x-1)^9 + x')
sage: i2.genus()
40
Note that the genus can be much smaller than the degree::
sage: i = singular.ideal('y9 - x2*(x-1)^9')
sage: i.genus()
0
An Important Concept
--------------------
AUTHORS:
- Neal Harris
The following illustrates an important concept: how Sage interacts
with the data being used and returned by Singular. Let's compute a
Groebner basis for some ideal, using Singular through Sage.
::
sage: singular.lib('polylib.lib')
sage: singular.ring(32003, '(a,b,c,d,e,f)', 'lp')
polynomial ring, over a field, global ordering
// coefficients: ZZ/32003
// number of vars : 6
// block 1 : ordering lp
// : names a b c d e f
// block 2 : ordering C
sage: I = singular.ideal('cyclic(6)')
sage: g = singular('groebner(I)')
Traceback (most recent call last):
...
TypeError: Singular error:
...
We restart everything and try again, but correctly.
::
sage: singular.quit()
sage: singular.lib('polylib.lib'); R = singular.ring(32003, '(a,b,c,d,e,f)', 'lp')
sage: I = singular.ideal('cyclic(6)')
sage: I.groebner()
f^48-2554*f^42-15674*f^36+12326*f^30-12326*f^18+15674*f^12+2554*f^6-1,
...
It's important to understand why the first attempt at computing a
basis failed. The line where we gave singular the input
'groebner(I)' was useless because Singular has no idea what 'I' is!
Although 'I' is an object that we computed with calls to Singular
functions, it actually lives in Sage. As a consequence, the name
'I' means nothing to Singular. When we called
``I.groebner()``, Sage was able to call the groebner
function on 'I' in Singular, since 'I' actually means something to
Sage.
Long Input
----------
The Singular interface reads in even very long input (using files)
in a robust manner, as long as you are creating a new object.
::
sage: t = '"%s"'%10^15000 # 15 thousand character string (note that normal Singular input must be at most 10000)
sage: a = singular.eval(t)
sage: a = singular(t)
TESTS:
We test an automatic coercion::
sage: a = 3*singular('2'); a
6
sage: type(a)
<class 'sage.interfaces.singular.SingularElement'>
sage: a = singular('2')*3; a
6
sage: type(a)
<class 'sage.interfaces.singular.SingularElement'>
Create a ring over GF(9) to check that ``gftables`` has been installed,
see :trac:`11645`::
sage: singular.eval("ring testgf9 = (9,x),(a,b,c,d,e,f),(M((1,2,3,0)),wp(2,3),lp);")
''
Verify that :trac:`17720` is fixed::
sage: R.<p> = QQ[]
sage: K.<p> = QQ.extension(p^2 - p - 1)
sage: r.<x,z> = K[]
sage: I = r.ideal(z)
sage: I.primary_decomposition()
[Ideal (z) of Multivariate Polynomial Ring in x, z over Number Field in p with defining polynomial p^2 - p - 1]
sage: [ J.gens() for J in I.primary_decomposition("gtz")]
[[z]]
"""
# ****************************************************************************
# Copyright (C) 2005 David Joyner and William Stein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
import io
import os
import re
import sys
import pexpect
from .expect import Expect, ExpectElement, FunctionElement, ExpectFunction
from sage.interfaces.tab_completion import ExtraTabCompletion
from sage.structure.sequence import Sequence_generic
from sage.structure.element import RingElement
import sage.rings.integer
from sage.misc.verbose import get_verbose
from sage.docs.instancedoc import instancedoc
class SingularError(RuntimeError):
"""
Raised if Singular printed an error message
"""
pass
class Singular(ExtraTabCompletion, Expect):
r"""
Interface to the Singular interpreter.
EXAMPLES: A Groebner basis example.
::
sage: R = singular.ring(0, '(x0,x1,x2)', 'lp')
sage: I = singular.ideal([ 'x0*x1*x2 -x0^2*x2', 'x0^2*x1*x2-x0*x1^2*x2-x0*x1*x2^2', 'x0*x1-x0*x2-x1*x2'])
sage: I.groebner()
x1^2*x2^2,
x0*x2^3-x1^2*x2^2+x1*x2^3,
x0*x1-x0*x2-x1*x2,
x0^2*x2-x0*x2^2-x1*x2^2
AUTHORS:
- David Joyner and William Stein
"""
def __init__(self, maxread=None, script_subdirectory=None,
logfile=None, server=None, server_tmpdir=None,
seed=None):
"""
EXAMPLES::
sage: singular == loads(dumps(singular))
True
"""
prompt = '> '
Expect.__init__(self,
terminal_echo=False,
name = 'singular',
prompt = prompt,
# no tty, fine grained cputime()
# and do not display CTRL-C prompt
command = "Singular -t --ticks-per-sec 1000 --cntrlc=a",
server = server,
server_tmpdir = server_tmpdir,
script_subdirectory = script_subdirectory,
restart_on_ctrlc = True,
verbose_start = False,
logfile = logfile,
eval_using_file_cutoff=100 if os.uname()[0]=="SunOS" else 1000)
self.__libs = []
self._prompt_wait = prompt
self.__to_clear = [] # list of variable names that need to be cleared.
self._seed = seed
def set_seed(self, seed=None):
"""
Set the seed for singular interpreter.
The seed should be an integer at least 1
and not more than 30 bits.
See
http://www.singular.uni-kl.de/Manual/html/sing_19.htm#SEC26
and
http://www.singular.uni-kl.de/Manual/html/sing_283.htm#SEC323
EXAMPLES::
sage: s = Singular()
sage: s.set_seed(1)
1
sage: [s.random(1,10) for i in range(5)]
[8, 10, 4, 9, 1]
"""
if seed is None:
seed = self.rand_seed()
self.eval('system("--random",%d)' % seed)
self._seed = seed
return seed
def _start(self, alt_message=None):
"""
EXAMPLES::
sage: s = Singular()
sage: s.is_running()
False
sage: s._start()
sage: s.is_running()
True
sage: s.quit()
"""
self.__libs = []
Expect._start(self, alt_message)
# Load some standard libraries.
self.lib('general') # assumed loaded by misc/constants.py
# these options are required by the new coefficient rings
# supported by Singular 3-1-0.
self.option("redTail")
self.option("redThrough")
self.option("intStrategy")
self._saved_options = self.option('get')
# set random seed
self.set_seed(self._seed)
def __reduce__(self):
"""
EXAMPLES::
sage: singular.__reduce__()
(<function reduce_load_Singular at 0x...>, ())
"""
return reduce_load_Singular, ()
def _equality_symbol(self):
"""
EXAMPLES::
sage: singular._equality_symbol()
'=='
"""
return '=='
def _true_symbol(self):
"""
EXAMPLES::
sage: singular._true_symbol()
'1'
"""
return '1'
def _false_symbol(self):
"""
EXAMPLES::
sage: singular._false_symbol()
'0'
"""
return '0'
def _quit_string(self):
"""
EXAMPLES::
sage: singular._quit_string()
'quit;'
"""
return 'quit;'
def _read_in_file_command(self, filename):
r"""
EXAMPLES::
sage: singular._read_in_file_command('test')
'< "...";'
sage: filename = tmp_filename()
sage: f = open(filename, 'w')
sage: _ = f.write('int x = 2;\n')
sage: f.close()
sage: singular.read(filename)
sage: singular.get('x')
'2'
"""
return '< "%s";' % filename
def eval(self, x, allow_semicolon=True, strip=True, **kwds):
r"""
Send the code x to the Singular interpreter and return the output
as a string.
INPUT:
- ``x`` - string (of code)
- ``allow_semicolon`` - default: False; if False then
raise a TypeError if the input line contains a semicolon.
- ``strip`` - ignored
EXAMPLES::
sage: singular.eval('2 > 1')
'1'
sage: singular.eval('2 + 2')
'4'
if the verbosity level is `> 1` comments are also printed
and not only returned.
::
sage: r = singular.ring(0,'(x,y,z)','dp')
sage: i = singular.ideal(['x^2','y^2','z^2'])
sage: s = i.std()
sage: singular.eval('hilb(%s)'%(s.name()))
'// 1 t^0\n// -3 t^2\n// 3 t^4\n// -1 t^6\n\n// 1 t^0\n//
3 t^1\n// 3 t^2\n// 1 t^3\n// dimension (affine) = 0\n//
degree (affine) = 8'
::
sage: from sage.misc.verbose import set_verbose
sage: set_verbose(1)
sage: o = singular.eval('hilb(%s)'%(s.name()))
// 1 t^0
// -3 t^2
// 3 t^4
// -1 t^6
// 1 t^0
// 3 t^1
// 3 t^2
// 1 t^3
// dimension (affine) = 0
// degree (affine) = 8
This is mainly useful if this method is called implicitly. Because
then intermediate results, debugging outputs and printed statements
are printed
::
sage: o = s.hilb()
// 1 t^0
// -3 t^2
// 3 t^4
// -1 t^6
// 1 t^0
// 3 t^1
// 3 t^2
// 1 t^3
// dimension (affine) = 0
// degree (affine) = 8
// ** right side is not a datum, assignment ignored
...
rather than ignored
::
sage: set_verbose(0)
sage: o = s.hilb()
"""
# Simon King:
# In previous versions, the interface was first synchronised and then
# unused variables were killed. This created a considerable overhead.
# By trac ticket #10296, killing unused variables is now done inside
# singular.set(). Moreover, it is not done by calling a separate _eval_line.
# In that way, the time spent by waiting for the singular prompt is reduced.
# Before #10296, it was possible that garbage collection occurred inside
# of _eval_line. But collection of the garbage would launch another call
# to _eval_line. The result would have been a dead lock, that could only
# be avoided by synchronisation. Since garbage collection is now done
# without an additional call to _eval_line, synchronisation is not
# needed anymore, saving even more waiting time for the prompt.
# Uncomment the print statements below for low-level debugging of
# code that involves the singular interfaces. Everything goes
# through here.
x = str(x).rstrip().rstrip(';')
x = x.replace("> ",">\t") #don't send a prompt (added by Martin Albrecht)
if not allow_semicolon and x.find(";") != -1:
raise TypeError("singular input must not contain any semicolons:\n%s" % x)
if len(x) == 0 or x[len(x) - 1] != ';':
x += ';'
s = Expect.eval(self, x, **kwds)
# "Segment fault" is not a typo:
# Singular actually does use that string
if s.find("error occurred") != -1 or s.find("Segment fault") != -1:
raise SingularError('Singular error:\n%s'%s)
if get_verbose() > 0:
for line in s.splitlines():
if line.startswith("//"):
print(line)
return s
else:
return s
def set(self, type, name, value):
"""
Set the variable with given name to the given value.
REMARK:
If a variable in the Singular interface was previously marked for
deletion, the actual deletion is done here, before the new variable
is created in Singular.
EXAMPLES::
sage: singular.set('int', 'x', '2')
sage: singular.get('x')
'2'
We test that an unused variable is only actually deleted if this method
is called::
sage: a = singular(3)
sage: n = a.name()
sage: del a
sage: singular.eval(n)
'3'
sage: singular.set('int', 'y', '5')
sage: singular.eval('defined(%s)'%n)
'0'
"""
cmd = ''.join('if(defined(%s)){kill %s;};'%(v,v) for v in self.__to_clear)
cmd += '%s %s=%s;'%(type, name, value)
self.__to_clear = []
self.eval(cmd)
def get(self, var):
"""
Get string representation of variable named var.
EXAMPLES::
sage: singular.set('int', 'x', '2')
sage: singular.get('x')
'2'
"""
return self.eval('print(%s);'%var)
def clear(self, var):
"""
Clear the variable named ``var``.
EXAMPLES::
sage: singular.set('int', 'x', '2')
sage: singular.get('x')
'2'
sage: singular.clear('x')
"Clearing the variable" means to allow to free the memory
that it uses in the Singular sub-process. However, the
actual deletion of the variable is only committed when
the next element in the Singular interface is created::
sage: singular.get('x')
'2'
sage: a = singular(3)
sage: singular.get('x')
'`x`'
"""
# We add the variable to the list of vars to clear when we do an eval.
# We queue up all the clears and do them at once to avoid synchronizing
# the interface at the same time we do garbage collection, which can
# lead to subtle problems. This was Willem Jan's ideas, implemented
# by William Stein.
self.__to_clear.append(var)
def _create(self, value, type='def'):
"""
Creates a new variable in the Singular session and returns the name
of that variable.
EXAMPLES::
sage: singular._create('2', type='int')
'sage...'
sage: singular.get(_)
'2'
"""
name = self._next_var_name()
self.set(type, name, value)
return name
def __call__(self, x, type='def'):
"""
Create a singular object X with given type determined by the string
x. This returns var, where var is built using the Singular
statement type var = ... x ... Note that the actual name of var
could be anything, and can be recovered using X.name().
The object X returned can be used like any Sage object, and wraps
an object in self. The standard arithmetic operators work. Moreover
if foo is a function then X.foo(y,z,...) calls foo(X, y, z, ...)
and returns the corresponding object.
EXAMPLES::
sage: R = singular.ring(0, '(x0,x1,x2)', 'lp')
sage: I = singular.ideal([ 'x0*x1*x2 -x0^2*x2', 'x0^2*x1*x2-x0*x1^2*x2-x0*x1*x2^2', 'x0*x1-x0*x2-x1*x2'])
sage: I
-x0^2*x2+x0*x1*x2,
x0^2*x1*x2-x0*x1^2*x2-x0*x1*x2^2,
x0*x1-x0*x2-x1*x2
sage: type(I)
<class 'sage.interfaces.singular.SingularElement'>
sage: I.parent()
Singular
"""
if isinstance(x, SingularElement) and x.parent() is self:
return x
elif isinstance(x, ExpectElement):
return self(x.sage())
elif not isinstance(x, ExpectElement) and hasattr(x, '_singular_'):
return x._singular_(self)
# some convenient conversions
if type in ("module","list") and isinstance(x,(list,tuple,Sequence_generic)):
x = str(x)[1:-1]
return SingularElement(self, type, x, False)
def _coerce_map_from_(self, S):
"""
Return ``True`` if ``S`` admits a coercion map into the
Singular interface.
EXAMPLES::
sage: singular._coerce_map_from_(ZZ)
True
sage: singular.coerce_map_from(ZZ)
Call morphism:
From: Integer Ring
To: Singular
sage: singular.coerce_map_from(float)
"""
# we want to implement this without coercing, since singular has state.
if hasattr(S, 'an_element'):
if hasattr(S.an_element(), '_singular_'):
return True
elif S is int:
return True
return None
def cputime(self, t=None):
r"""
Returns the amount of CPU time that the Singular session has used.
If ``t`` is not None, then it returns the difference
between the current CPU time and ``t``.
EXAMPLES::
sage: t = singular.cputime()
sage: R = singular.ring(0, '(x0,x1,x2)', 'lp')
sage: I = singular.ideal([ 'x0*x1*x2 -x0^2*x2', 'x0^2*x1*x2-x0*x1^2*x2-x0*x1*x2^2', 'x0*x1-x0*x2-x1*x2'])
sage: gb = I.groebner()
sage: singular.cputime(t) #random
0.02
"""
if t:
return float(self.eval('timer-(%d)'%(int(1000*t))))/1000.0
else:
return float(self.eval('timer'))/1000.0
###################################################################
# Singular libraries
###################################################################
def lib(self, lib, reload=False):
"""
Load the Singular library named lib.
Note that if the library was already loaded during this session it
is not reloaded unless the optional reload argument is True (the
default is False).
EXAMPLES::
sage: singular.lib('sing.lib')
sage: singular.lib('sing.lib', reload=True)
"""
if lib[-4:] != ".lib":
lib += ".lib"
if not reload and lib in self.__libs:
return
self.eval('LIB "%s"' % lib)
self.__libs.append(lib)
LIB = lib
load = lib
###################################################################
# constructors
###################################################################
def ideal(self, *gens):
"""
Return the ideal generated by gens.
INPUT:
- ``gens`` - list or tuple of Singular objects (or
objects that can be made into Singular objects via evaluation)
OUTPUT: the Singular ideal generated by the given list of gens
EXAMPLES: A Groebner basis example done in a different way.
::
sage: _ = singular.eval("ring R=0,(x0,x1,x2),lp")
sage: i1 = singular.ideal([ 'x0*x1*x2 -x0^2*x2', 'x0^2*x1*x2-x0*x1^2*x2-x0*x1*x2^2', 'x0*x1-x0*x2-x1*x2'])
sage: i1
-x0^2*x2+x0*x1*x2,
x0^2*x1*x2-x0*x1^2*x2-x0*x1*x2^2,
x0*x1-x0*x2-x1*x2
::
sage: i2 = singular.ideal('groebner(%s);'%i1.name())
sage: i2
x1^2*x2^2,
x0*x2^3-x1^2*x2^2+x1*x2^3,
x0*x1-x0*x2-x1*x2,
x0^2*x2-x0*x2^2-x1*x2^2
"""
if isinstance(gens, str):
gens = self(gens)
if isinstance(gens, SingularElement):
return self(gens.name(), 'ideal')
if not isinstance(gens, (list, tuple)):
raise TypeError("gens (=%s) must be a list, tuple, string, or Singular element" % gens)
if len(gens) == 1 and isinstance(gens[0], (list, tuple)):
gens = gens[0]
gens2 = []
for g in gens:
if not isinstance(g, SingularElement):
gens2.append(self.new(g))
else:
gens2.append(g)
return self(",".join(g.name() for g in gens2), 'ideal')
def list(self, x):
r"""
Creates a list in Singular from a Sage list ``x``.
EXAMPLES::
sage: singular.list([1,2])
[1]:
1
[2]:
2
sage: singular.list([1,2,[3,4]])
[1]:
1
[2]:
2
[3]:
[1]:
3
[2]:
4
sage: R.<x,y> = QQ[]
sage: singular.list([1,2,[x,ideal(x,y)]])
[1]:
1
[2]:
2
[3]:
[1]:
x
[2]:
_[1]=x
_[2]=y
Strings have to be escaped before passing them to this method::
sage: singular.list([1,2,'"hi"'])
[1]:
1
[2]:
2
[3]:
hi
TESTS:
Check that a list already converted to Singular can be
embedded into a list to be converted::
sage: singular.list([1, 2, singular.list([3, 4])])
[1]:
1
[2]:
2
[3]:
[1]:
3
[2]:
4
"""
# We have to be careful about object destruction.
# If we convert an object to a Singular element, the only
# thing that goes into the list definition statement is the
# Singular variable name, so we need to keep the element
# around long enough to ensure that the variable still exists
# when we create the list. We ensure this by putting created
# elements on a list, which gets destroyed when this function
# returns, by which time the list has been created.
singular_elements = []
def strify(x):
if isinstance(x, (list, tuple, Sequence_generic)):
return 'list(' + ','.join(strify(i) for i in x) + ')'
elif isinstance(x, SingularElement):
return x.name()
elif isinstance(x, (int, sage.rings.integer.Integer)):
return repr(x)
elif hasattr(x, '_singular_'):
e = x._singular_()
singular_elements.append(e)
return e.name()
else:
return str(x)
return self(strify(x), 'list')
def matrix(self, nrows, ncols, entries=None):
"""
EXAMPLES::
sage: singular.lib("matrix")
sage: R = singular.ring(0, '(x,y,z)', 'dp')
sage: A = singular.matrix(3,2,'1,2,3,4,5,6')
sage: A
1,2,
3,4,
5,6
sage: A.gauss_col()
2,-1,
1,0,
0,1
AUTHORS:
- Martin Albrecht (2006-01-14)
"""
name = self._next_var_name()
if entries is None:
self.eval('matrix %s[%s][%s]'%(name, nrows, ncols))
else:
self.eval('matrix %s[%s][%s] = %s'%(name, nrows, ncols, entries))
return SingularElement(self, None, name, True)
def ring(self, char=0, vars='(x)', order='lp', check=True):
r"""
Create a Singular ring and makes it the current ring.
INPUT:
- ``char`` - characteristic of the base ring (see
examples below), which must be either 0, prime (!), or one of
several special codes (see examples below).
- ``vars`` - a tuple or string that defines the
variable names
- ``order`` - string - the monomial order (default:
'lp')
- ``check`` - if True, check primality of the
characteristic if it is an integer.
OUTPUT: a Singular ring
.. note::
This function is *not* identical to calling the Singular
``ring`` function. In particular, it also attempts to
"kill" the variable names, so they can actually be used
without getting errors, and it sets printing of elements
for this range to short (i.e., with \*'s and carets).
EXAMPLES: We first declare `\QQ[x,y,z]` with degree reverse
lexicographic ordering.
::
sage: R = singular.ring(0, '(x,y,z)', 'dp')
sage: R
polynomial ring, over a field, global ordering
// coefficients: QQ
// number of vars : 3
// block 1 : ordering dp
// : names x y z
// block 2 : ordering C
::
sage: R1 = singular.ring(32003, '(x,y,z)', 'dp')
sage: R2 = singular.ring(32003, '(a,b,c,d)', 'lp')
This is a ring in variables named x(1) through x(10) over the
finite field of order `7`::
sage: R3 = singular.ring(7, '(x(1..10))', 'ds')
This is a polynomial ring over the transcendental extension
`\QQ(a)` of `\QQ`::
sage: R4 = singular.ring('(0,a)', '(mu,nu)', 'lp')
This is a ring over the field of single-precision floats::
sage: R5 = singular.ring('real', '(a,b)', 'lp')
This is over 50-digit floats::
sage: R6 = singular.ring('(real,50)', '(a,b)', 'lp')
sage: R7 = singular.ring('(complex,50,i)', '(a,b)', 'lp')
To use a ring that you've defined, use the set_ring() method on
the ring. This sets the ring to be the "current ring". For
example,
::
sage: R = singular.ring(7, '(a,b)', 'ds')
sage: S = singular.ring('real', '(a,b)', 'lp')
sage: singular.new('10*a')
(1.000e+01)*a
sage: R.set_ring()
sage: singular.new('10*a')
3*a
"""
if len(vars) > 2:
s = '; '.join('if(defined(%s)>0){kill %s;};' % (x, x)
for x in vars[1:-1].split(','))
self.eval(s)
if check and isinstance(char, (int, sage.rings.integer.Integer)):
if char:
n = sage.rings.integer.Integer(char)
if not n.is_prime():
raise ValueError("the characteristic must be 0 or prime")
R = self('%s,%s,%s' % (char, vars, order), 'ring')
self.eval('short=0') # make output include *'s for multiplication for *THIS* ring.
return R
def string(self, x):
"""
Creates a Singular string from a Sage string. Note that the Sage
string has to be "double-quoted".
EXAMPLES::
sage: singular.string('"Sage"')
Sage
"""
return self(x, 'string')
def set_ring(self, R):
"""
Sets the current Singular ring to R.
EXAMPLES::
sage: R = singular.ring(7, '(a,b)', 'ds')
sage: S = singular.ring('real', '(a,b)', 'lp')
sage: singular.current_ring()
polynomial ring, over a field, global ordering
// coefficients: Float()
// number of vars : 2
// block 1 : ordering lp
// : names a b
// block 2 : ordering C
sage: singular.set_ring(R)
sage: singular.current_ring()
polynomial ring, over a field, local ordering
// coefficients: ZZ/7
// number of vars : 2
// block 1 : ordering ds
// : names a b
// block 2 : ordering C
"""
if not isinstance(R, SingularElement):
raise TypeError("R must be a singular ring")
self.eval("setring %s; short=0" % R.name(), allow_semicolon=True)
setring = set_ring
def current_ring_name(self):
"""
Returns the Singular name of the currently active ring in
Singular.
OUTPUT: currently active ring's name
EXAMPLES::
sage: r = PolynomialRing(GF(127),3,'xyz')
sage: r._singular_().name() == singular.current_ring_name()
True
"""
ringlist = self.eval("listvar(ring)").splitlines()
p = re.compile(r"// ([a-zA-Z0-9_]*).*\[.*\].*\*.*") #do this in constructor?
for line in ringlist:
m = p.match(line)
if m:
return m.group(int(1))
return None
def current_ring(self):
"""
Returns the current ring of the running Singular session.
EXAMPLES::
sage: r = PolynomialRing(GF(127),3,'xyz', order='invlex')
sage: r._singular_()
polynomial ring, over a field, global ordering
// coefficients: ZZ/127
// number of vars : 3
// block 1 : ordering rp
// : names x y z
// block 2 : ordering C
sage: singular.current_ring()
polynomial ring, over a field, global ordering
// coefficients: ZZ/127
// number of vars : 3
// block 1 : ordering rp
// : names x y z
// block 2 : ordering C
"""
name = self.current_ring_name()
if name:
return self(name)
else:
return None
def _tab_completion(self):
"""
Return a list of all Singular commands.
EXAMPLES::
sage: singular._tab_completion()
['exteriorPower',
...
'crossprod']
"""
p = re.compile("// *([a-z0-9A-Z_]*).*") #compiles regular expression
proclist = self.eval("listvar(proc)").splitlines()
return [p.match(line).group(int(1)) for line in proclist]
def console(self):
r"""
EXAMPLES::
sage: singular_console() #not tested
SINGULAR / Development
A Computer Algebra System for Polynomial Computations / version 3-0-4
0<
by: G.-M. Greuel, G. Pfister, H. Schoenemann \ Nov 2007
FB Mathematik der Universitaet, D-67653 Kaiserslautern \
"""
singular_console()
def version(self):
"""
Return the version of Singular being used.
EXAMPLES::
sage: singular.version()
"Singular ... version 4...
"""
return singular_version()
def _function_class(self):
"""
EXAMPLES::
sage: singular._function_class()
<class 'sage.interfaces.singular.SingularFunction'>
"""
return SingularFunction
def _function_element_class(self):
"""
EXAMPLES::
sage: singular._function_element_class()
<class 'sage.interfaces.singular.SingularFunctionElement'>
"""
return SingularFunctionElement
def option(self, cmd=None, val=None):
"""
Access to Singular's options as follows:
Syntax: option() Returns a string of all defined options.
Syntax: option( 'option_name' ) Sets an option. Note to disable an
option, use the prefix no.
Syntax: option( 'get' ) Returns an intvec of the state of all
options.
Syntax: option( 'set', intvec_expression ) Restores the state of
all options from an intvec (produced by option('get')).
EXAMPLES::
sage: singular.option()
//options: redefine loadLib usage prompt
sage: singular.option('get')
0,
10321
sage: old_options = _
sage: singular.option('noredefine')
sage: singular.option()
//options: loadLib usage prompt
sage: singular.option('set', old_options)
sage: singular.option('get')
0,
10321
"""
if cmd is None:
return SingularFunction(self,"option")()
elif cmd == "get":
#return SingularFunction(self,"option")("\"get\"")
return self(self.eval("option(get)"),"intvec")
elif cmd == "set":
if not isinstance(val,SingularElement):
raise TypeError("singular.option('set') needs SingularElement as second parameter")
#SingularFunction(self,"option")("\"set\"",val)
self.eval("option(set,%s)" % val.name())
else:
SingularFunction(self,"option")("\""+str(cmd)+"\"")
def _keyboard_interrupt(self):
print("Interrupting %s..." % self)
try:
self._expect.sendline(chr(4))
except pexpect.ExceptionPexpect as msg:
raise pexpect.ExceptionPexpect("THIS IS A BUG -- PLEASE REPORT. This should never happen.\n" + msg)
self._start()
raise KeyboardInterrupt("Restarting %s (WARNING: all variables defined in previous session are now invalid)" % self)
@instancedoc
class SingularElement(ExtraTabCompletion, ExpectElement):
def __init__(self, parent, type, value, is_name=False):
"""
EXAMPLES::
sage: a = singular(2)
sage: loads(dumps(a))
2
"""
RingElement.__init__(self, parent)
if parent is None:
return
if not is_name:
try:
self._name = parent._create(value, type)
# Convert SingularError to TypeError for
# coercion to work properly.
except SingularError as x:
self._session_number = -1
raise TypeError(x)
except BaseException:
self._session_number = -1
raise
else:
self._name = value
self._session_number = parent._session_number
def _repr_(self):
r"""
Return string representation of ``self``.
EXAMPLES::
sage: r = singular.ring(0,'(x,y)','dp')
sage: singular(0)
0
sage: singular('x') # indirect doctest
x
sage: singular.matrix(2,2)
0,0,
0,0
sage: singular.matrix(2,2,"(25/47*x^2*y^4 + 63/127*x + 27)^3,y,0,1")
15625/103823*x^6*y.., y,
0, 1
Note that the output is truncated, and if ``self`` has a custom name then
it is used to print the items of the matrix, rather than abbreviating its
contents::
sage: M = singular.matrix(2,2,"(25/47*x^2*y^4 + 63/127*x + 27)^3,y,0,1")
sage: M.rename('T')
sage: M
T[1,1],y,
0, 1
"""
s = super(SingularElement, self)._repr_()
if self._name in s:
if (not hasattr(self, "__custom_name")) and self.type() == 'matrix':
s = self.parent().eval('pmat(%s,20)'%(self.name()))
return s
def __copy__(self):
r"""
Returns a copy of ``self``.
EXAMPLES::
sage: R=singular.ring(0,'(x,y)','dp')
sage: M=singular.matrix(3,3,'0,0,-x, 0,y,0, x*y,0,0')
sage: N=copy(M)
sage: N[1,1]=singular('x+y')
sage: N
x+y,0,-x,
0, y,0,
x*y,0,0
sage: M
0, 0,-x,
0, y,0,
x*y,0,0
sage: L=R.ringlist()
sage: L[4]=singular.ideal('x**2-5')
sage: Q=L.ring()
sage: otherR=singular.ring(5,'(x)','dp')
sage: cpQ=copy(Q)
sage: cpQ.set_ring()
sage: cpQ
polynomial ring, over a field, global ordering
// coefficients: QQ
// number of vars : 2
// block 1 : ordering dp
// : names x y
// block 2 : ordering C
// quotient ring from ideal
_[1]=x^2-5
sage: R.fetch(M)
0, 0,-x,
0, y,0,
x*y,0,0
"""
if (self.type()=='ring') or (self.type()=='qring'):
# Problem: singular has no clean method to produce
# a copy of a ring/qring. We use ringlist, but this
# is only possible if we make self the active ring,
# use ringlist, and switch back to the previous
# base ring.
br=self.parent().current_ring()
self.set_ring()
OUT = (self.ringlist()).ring()
br.set_ring()
return OUT
else:
return self.parent()(self.name())
def __len__(self):
"""
Returns the size of this Singular element.
EXAMPLES::
sage: R = singular.ring(0, '(x,y,z)', 'dp')
sage: A = singular.matrix(2,2)
sage: len(A)
4
"""
return int(self.size())
def __setitem__(self, n, value):
"""
Set the n-th element of self to x.
INPUT:
- ``n`` - an integer *or* a 2-tuple (for setting
matrix elements)
- ``value`` - anything (is coerced to a Singular
object if it is not one already)
OUTPUT: Changes elements of self.
EXAMPLES::
sage: R = singular.ring(0, '(x,y,z)', 'dp')
sage: A = singular.matrix(2,2)
sage: A
0,0,
0,0
sage: A[1,1] = 5
sage: A
5,0,
0,0
sage: A[1,2] = '5*x + y + z3'
sage: A
5,z^3+5*x+y,
0,0
"""
P = self.parent()
if not isinstance(value, SingularElement):
value = P(value)
if isinstance(n, tuple):
if len(n) != 2:
raise ValueError("If n (=%s) is a tuple, it must be a 2-tuple" % n)
x, y = n
P.eval('%s[%s,%s] = %s'%(self.name(), x, y, value.name()))
else:
P.eval('%s[%s] = %s'%(self.name(), n, value.name()))
def __bool__(self):
"""
Returns ``True`` if this Singular element is not zero.
EXAMPLES::
sage: bool(singular(0))
False
sage: bool(singular(1))
True
"""
P = self.parent()
return P.eval('%s == 0' % self.name()) == '0'
__nonzero__ = __bool__
def sage_polystring(self):
r"""
If this Singular element is a polynomial, return a string
representation of this polynomial that is suitable for evaluation
in Python. Thus \* is used for multiplication and \*\* for
exponentiation. This function is primarily used internally.
The short=0 option *must* be set for the parent ring or this
function will not work as expected. This option is set by default
for rings created using ``singular.ring`` or set using
``ring_name.set_ring()``.
EXAMPLES::
sage: R = singular.ring(0,'(x,y)')
sage: f = singular('x^3 + 3*y^11 + 5')
sage: f
x^3+3*y^11+5
sage: f.sage_polystring()
'x**3+3*y**11+5'
"""
return str(self).replace('^','**')
def sage_global_ring(self):
"""
Return the current basering in Singular as a polynomial ring or quotient ring.
EXAMPLES::
sage: singular.eval('ring r1 = (9,x),(a,b,c,d,e,f),(M((1,2,3,0)),wp(2,3),lp)')
''
sage: R = singular('r1').sage_global_ring()
sage: R
Multivariate Polynomial Ring in a, b, c, d, e, f over Finite Field in x of size 3^2
sage: R.term_order()
Block term order with blocks:
(Matrix term order with matrix
[1 2]
[3 0],
Weighted degree reverse lexicographic term order with weights (2, 3),
Lexicographic term order of length 2)
::
sage: singular.eval('ring r2 = (0,x),(a,b,c),dp')
''
sage: singular('r2').sage_global_ring()
Multivariate Polynomial Ring in a, b, c over Fraction Field of Univariate Polynomial Ring in x over Rational Field
::
sage: singular.eval('ring r3 = (3,z),(a,b,c),dp')
''
sage: singular.eval('minpoly = 1+z+z2+z3+z4')
''
sage: singular('r3').sage_global_ring()
Multivariate Polynomial Ring in a, b, c over Finite Field in z of size 3^4
Real and complex fields in both Singular and Sage are defined with a precision.
The precision in Singular is given in terms of digits, but in Sage it is given
in terms of bits. So, the digit precision is internally converted to a reasonable
bit precision::
sage: singular.eval('ring r4 = (real,20),(a,b,c),dp')
''
sage: singular('r4').sage_global_ring()
Multivariate Polynomial Ring in a, b, c over Real Field with 70 bits of precision
The case of complex coefficients is not fully supported, yet, since
the generator of a complex field in Sage is always called "I"::
sage: singular.eval('ring r5 = (complex,15,j),(a,b,c),dp')
''
sage: R = singular('r5').sage_global_ring(); R
Multivariate Polynomial Ring in a, b, c over Complex Field with 54 bits of precision
sage: R.base_ring()('k')
Traceback (most recent call last):
...
ValueError: given string 'k' is not a complex number
sage: R.base_ring()('I')
1.00000000000000*I
An example where the base ring is a polynomial ring over an extension of the rational field::
sage: singular.eval('ring r7 = (0,a), (x,y), dp')
''
sage: singular.eval('minpoly = a2 + 1')
''
sage: singular('r7').sage_global_ring()
Multivariate Polynomial Ring in x, y over Number Field in a with defining polynomial a^2 + 1
In our last example, the base ring is a quotient ring::
sage: singular.eval('ring r6 = (9,a), (x,y,z),lp')
''
sage: Q = singular('std(ideal(x^2,x+y^2+z^3))', type='qring')
sage: Q.sage_global_ring()
Quotient of Multivariate Polynomial Ring in x, y, z over Finite Field in a of size 3^2 by the ideal (y^4 - y^2*z^3 + z^6, x + y^2 + z^3)
AUTHOR:
- Simon King (2011-06-06)
"""
# extract the ring of coefficients
singular = self.parent()
charstr = singular.eval('charstr(basering)').split(',',1)
from sage.rings.integer_ring import ZZ
is_extension = len(charstr)==2
if charstr[0] in ['integer', 'ZZ']:
br = ZZ
is_extension = False
elif charstr[0] in ['0', 'QQ']:
from sage.all import QQ
br = QQ
elif charstr[0].startswith('Float'):
from sage.all import RealField, ceil, log
prec = singular.eval('ringlist(basering)[1][2][1]')
br = RealField(ceil((ZZ(prec)+1)/log(2,10)))
is_extension = False
elif charstr[0]=='complex':
from sage.all import ComplexField, ceil, log
prec = singular.eval('ringlist(basering)[1][2][1]')
br = ComplexField(ceil((ZZ(prec)+1)/log(2,10)))
is_extension = False
else:
# it ought to be a finite field
q = ZZ(charstr[0].lstrip('ZZ/'))
from sage.rings.finite_rings.finite_field_constructor import GF
if q.is_prime():
br = GF(q)
else:
br = GF(q,charstr[1])
# Singular has no extension of a non-prime field
is_extension = False
# We have the base ring of the base ring. But is it
# an extension?
if is_extension:
minpoly = singular.eval('minpoly')
if minpoly == '0':
from sage.all import Frac
BR = Frac(br[charstr[1]])
else:
is_short = singular.eval('short')
if is_short != '0':
singular.eval('short=0')
minpoly = ZZ[charstr[1]](singular.eval('minpoly'))
singular.eval('short=%s'%is_short)
else:
minpoly = ZZ[charstr[1]](minpoly)
BR = br.extension(minpoly,names=charstr[1])
else:
BR = br
# Now, we form the polynomial ring over BR with the given variables,
# using Singular's term order
from sage.rings.polynomial.term_order import termorder_from_singular
from sage.all import PolynomialRing
# Meanwhile Singulars quotient rings are also of 'ring' type, not 'qring' as it was in the past.
# To find out if a singular ring is a quotient ring or not checking for ring type does not help
# and instead of that we check if the quotient ring is zero or not:
if (singular.eval('ideal(basering)==0')=='1'):
return PolynomialRing(BR, names=singular.eval('varstr(basering)'), order=termorder_from_singular(singular))
P = PolynomialRing(BR, names=singular.eval('varstr(basering)'), order=termorder_from_singular(singular))
return P.quotient(singular('ringlist(basering)[4]')._sage_(P), names=singular.eval('varstr(basering)'))
def sage_poly(self, R=None, kcache=None):
"""
Returns a Sage polynomial in the ring r matching the provided poly
which is a singular polynomial.
INPUT:
- ``R`` - (default: None); an optional polynomial ring.
If it is provided, then you have to make sure that it
matches the current singular ring as, e.g., returned by
singular.current_ring(). By default, the output of
:meth:`sage_global_ring` is used.
- ``kcache`` - (default: None); an optional dictionary
for faster finite field lookups, this is mainly useful for finite
extension fields
OUTPUT: MPolynomial
EXAMPLES::
sage: R = PolynomialRing(GF(2^8,'a'), 'x,y')
sage: f = R('a^20*x^2*y+a^10+x')
sage: f._singular_().sage_poly(R) == f
True
sage: R = PolynomialRing(GF(2^8,'a'), 'x', implementation="singular")
sage: f = R('a^20*x^3+x^2+a^10')
sage: f._singular_().sage_poly(R) == f
True
::
sage: P.<x,y> = PolynomialRing(QQ, 2)
sage: f = x*y**3 - 1/9 * x + 1; f
x*y^3 - 1/9*x + 1
sage: singular(f)
x*y^3-1/9*x+1
sage: P(singular(f))
x*y^3 - 1/9*x + 1
TESTS::
sage: singular.eval('ring r = (3,z),(a,b,c),dp')
''
sage: singular.eval('minpoly = 1+z+z2+z3+z4')
''
sage: p = singular('z^4*a^3+z^2*a*b*c')
sage: p.sage_poly()
(-z^3 - z^2 - z - 1)*a^3 + (z^2)*a*b*c
sage: singular('z^4')
(-z3-z2-z-1)
Test that :trac:`25297` is fixed::
sage: R.<x,y> = QQ[]
sage: SE.<xbar,ybar> = R.quotient(x^2 + y^2 - 1)
sage: P = ideal(xbar,ybar)
sage: P2 = P._singular_().sage()
sage: P2.0.lift().parent()
Multivariate Polynomial Ring in x, y over Rational Field
Test that :trac:`29396` is fixed::
sage: Rxz.<x,z> = RR[]
sage: f = x**3 + x*z + 1
sage: f.discriminant(x)
-4.00000000000000*z^3 - 27.0000000000000
sage: Rx.<x> = RR[]
sage: Rx("x + 7.5")._singular_().sage_poly()
x + 7.50000
sage: Rx("x + 7.5")._singular_().sage_poly(Rx)
x + 7.50000000000000
AUTHORS:
- Martin Albrecht (2006-05-18)
- Simon King (2011-06-06): Deal with Singular's short polynomial representation,
automatic construction of a polynomial ring, if it is not explicitly given.
.. note::
For very simple polynomials
``eval(SingularElement.sage_polystring())`` is faster than
SingularElement.sage_poly(R), maybe we should detect the
crossover point (in dependence of the string length) and
choose an appropriate conversion strategy
"""
# TODO: Refactor imports to move this to the top
from sage.rings.polynomial.multi_polynomial_ring import MPolynomialRing_polydict
from sage.rings.polynomial.multi_polynomial_libsingular import MPolynomialRing_libsingular
from sage.rings.polynomial.polynomial_ring import is_PolynomialRing
from sage.rings.polynomial.polydict import ETuple
from sage.rings.polynomial.polynomial_singular_interface import can_convert_to_singular
from sage.rings.quotient_ring import QuotientRing_generic
ring_is_fine = False
if R is None:
ring_is_fine = True
R = self.sage_global_ring()
if isinstance(R, QuotientRing_generic) and (ring_is_fine or can_convert_to_singular(R)):
p = self.sage_poly(R.ambient(), kcache)
return R(p)
sage_repr = {}
k = R.base_ring()
variable_str = "*".join(R.variable_names())
# This returns a string which looks like a list where the first
# half of the list is filled with monomials occurring in the
# Singular polynomial and the second half filled with the matching
# coefficients.
#
# Our strategy is to split the monomials at "*" to get the powers
# in the single variables and then to split the result to get
# actual exponent.
#
# So e.g. ['x^3*y^3','a'] get's split to
# [[['x','3'],['y','3']],'a']. We may do this quickly,
# as we know what to expect.
is_short = self.parent().eval('short')
if is_short!='0':
self.parent().eval('short=0')
if isinstance(R, MPolynomialRing_libsingular):
out = R(self)
self.parent().eval('short=%s'%is_short)
return out
singular_poly_list = self.parent().eval("string(coef(%s,%s))" % (\
self.name(),variable_str)).split(",")
self.parent().eval('short=%s'%is_short)
else:
if isinstance(R, MPolynomialRing_libsingular):
return R(self)
singular_poly_list = self.parent().eval("string(coef(%s,%s))" % (\
self.name(),variable_str)).split(",")
# Directly treat constants
if singular_poly_list[0] in ['1', '(1.000e+00)']:
return R(singular_poly_list[1])
coeff_start = len(singular_poly_list) // 2
# Singular 4 puts parentheses around floats and sign outside them
charstr = self.parent().eval('charstr(basering)').split(',', 1)
if charstr[0].startswith('Float') or charstr[0] == 'complex':
for i in range(coeff_start, 2 * coeff_start):
singular_poly_list[i] = singular_poly_list[i].replace('(', '').replace(')', '')
if isinstance(R, MPolynomialRing_polydict) and (ring_is_fine or can_convert_to_singular(R)):
# we need to lookup the index of a given variable represented
# through a string
var_dict = dict(zip(R.variable_names(), range(R.ngens())))
ngens = R.ngens()
for i in range(coeff_start):
exp = dict()
monomial = singular_poly_list[i]
if monomial not in ['1', '(1.000e+00)']:
variables = [var.split("^") for var in monomial.split("*") ]
for e in variables:
var = e[0]
if len(e)==int(2):
power = int(e[1])
else:
power=1
exp[var_dict[var]]=power
if kcache is None:
sage_repr[ETuple(exp,ngens)]=k(singular_poly_list[coeff_start+i])
else:
elem = singular_poly_list[coeff_start+i]
if elem not in kcache:
kcache[elem] = k( elem )
sage_repr[ETuple(exp,ngens)]= kcache[elem]
return R(sage_repr)
elif is_PolynomialRing(R) and (ring_is_fine or can_convert_to_singular(R)):
sage_repr = [0]*int(self.deg()+1)
for i in range(coeff_start):
monomial = singular_poly_list[i]
exp = int(0)
if monomial not in ['1', '(1.000e+00)']:
term = monomial.split("^")
if len(term)==int(2):
exp = int(term[1])
else:
exp = int(1)
if kcache is None:
sage_repr[exp] = k(singular_poly_list[coeff_start+i])
else:
elem = singular_poly_list[coeff_start+i]
if elem not in kcache:
kcache[elem] = k( elem )
sage_repr[ exp ]= kcache[elem]
return R(sage_repr)
else:
raise TypeError("Cannot coerce %s into %s" % (self, R))
def sage_matrix(self, R, sparse=True):
"""
Returns Sage matrix for self
INPUT:
- ``R`` - (default: None); an optional ring, over which
the resulting matrix is going to be defined.
By default, the output of :meth:`sage_global_ring` is used.
- ``sparse`` - (default: True); determines whether the
resulting matrix is sparse or not.
EXAMPLES::
sage: R = singular.ring(0, '(x,y,z)', 'dp')
sage: A = singular.matrix(2,2)
sage: A.sage_matrix(ZZ)
[0 0]
[0 0]
sage: A.sage_matrix(RDF)
[0.0 0.0]
[0.0 0.0]
"""
from sage.matrix.constructor import Matrix
nrows, ncols = int(self.nrows()),int(self.ncols())
if R is None:
R = self.sage_global_ring()
A = Matrix(R, nrows, ncols, sparse=sparse)
#this is slow
for x in range(nrows):
for y in range(ncols):
A[x,y]=self[x+1,y+1].sage_poly(R)
return A
A = Matrix(R, nrows, ncols, sparse=sparse)
#this is slow
for x in range(nrows):
for y in range(ncols):
A[x,y]=R(self[x+1,y+1])
return A
def _sage_(self, R=None):
r"""
Convert self to Sage.
EXAMPLES::
sage: R = singular.ring(0, '(x,y,z)', 'dp')
sage: A = singular.matrix(2,2)
sage: A.sage(ZZ) # indirect doctest
[0 0]
[0 0]
sage: A = random_matrix(ZZ,3,3); A # random
[ -8 2 0]
[ 0 1 -1]
[ 2 1 -95]
sage: As = singular(A); As # random
-8 2 0
0 1 -1
2 1 -95
sage: As.sage() == A
True
::
sage: singular.eval('ring R = integer, (x,y,z),lp')
'// ** redefining R (ring R = integer, (x,y,z),lp;)'
sage: I = singular.ideal(['x^2','y*z','z+x'])
sage: I.sage()
Ideal (x^2, y*z, x + z) of Multivariate Polynomial Ring in x, y, z over Integer Ring
::
sage: singular('ringlist(basering)').sage()
[['integer'], ['x', 'y', 'z'], [['lp', (1, 1, 1)], ['C', (0)]], Ideal (0) of Multivariate Polynomial Ring in x, y, z over Integer Ring]
::
sage: singular.eval('ring r10 = (9,a), (x,y,z),lp')
''
sage: singular.eval('setring R')
''
sage: singular('r10').sage()
Multivariate Polynomial Ring in x, y, z over Finite Field in a of size 3^2
Note that the current base ring has not been changed by asking for another ring::
sage: singular('basering')
polynomial ring, over a domain, global ordering
// coefficients: ZZ
// number of vars : 3
// block 1 : ordering lp
// : names x y z
// block 2 : ordering C
::
sage: singular.eval('setring r10')
''
sage: Q = singular('std(ideal(x^2,x+y^2+z^3))', type='qring')
sage: Q.sage()
Quotient of Multivariate Polynomial Ring in x, y, z over Finite Field in a of size 3^2 by the ideal (y^4 - y^2*z^3 + z^6, x + y^2 + z^3)
sage: singular('x^2+y').sage()
y
sage: singular('x^2+y').sage().parent()
Quotient of Multivariate Polynomial Ring in x, y, z over Finite Field in a of size 3^2 by the ideal (y^4 - y^2*z^3 + z^6, x + y^2 + z^3)
Test that :trac:`18848` is fixed::
sage: singular(5).sage()
5
sage: type(singular(int(5)).sage())
<class 'sage.rings.integer.Integer'>
"""
typ = self.type()
if typ=='poly':
return self.sage_poly(R)
elif typ=='int':
return sage.rings.integer.Integer(repr(self))
elif typ == 'module':
return self.sage_matrix(R,sparse=True)
elif typ == 'matrix':
return self.sage_matrix(R,sparse=False)
elif typ == 'list':
return [ f._sage_(R) for f in self ]
elif typ == 'intvec':
from sage.modules.free_module_element import vector
return vector([sage.rings.integer.Integer(str(e)) for e in self])
elif typ == 'intmat':
from sage.matrix.constructor import matrix
from sage.rings.integer_ring import ZZ
A = matrix(ZZ, int(self.nrows()), int(self.ncols()))
for i in range(A.nrows()):
for j in range(A.ncols()):
A[i,j] = sage.rings.integer.Integer(str(self[i+1,j+1]))
return A
elif typ == 'string':
return repr(self)
elif typ == 'ideal':
R = R or self.sage_global_ring()
return R.ideal([p.sage_poly(R) for p in self])
elif typ in ['ring', 'qring']:
br = singular('basering')
self.set_ring()
R = self.sage_global_ring()
br.set_ring()
return R
raise NotImplementedError("Coercion of this datatype not implemented yet")
def is_string(self):
"""
Tell whether this element is a string.
EXAMPLES::
sage: singular('"abc"').is_string()
True
sage: singular('1').is_string()
False
"""
return self.type() == 'string'
def set_ring(self):
"""
Sets the current ring in Singular to be self.
EXAMPLES::
sage: R = singular.ring(7, '(a,b)', 'ds')
sage: S = singular.ring('real', '(a,b)', 'lp')
sage: singular.current_ring()
polynomial ring, over a field, global ordering
// coefficients: Float()
// number of vars : 2
// block 1 : ordering lp
// : names a b
// block 2 : ordering C
sage: R.set_ring()
sage: singular.current_ring()
polynomial ring, over a field, local ordering
// coefficients: ZZ/7
// number of vars : 2
// block 1 : ordering ds
// : names a b
// block 2 : ordering C
"""
self.parent().set_ring(self)
def sage_flattened_str_list(self):
"""
EXAMPLES::
sage: R=singular.ring(0,'(x,y)','dp')
sage: RL = R.ringlist()
sage: RL.sage_flattened_str_list()
['0', 'x', 'y', 'dp', '1,1', 'C', '0', '_[1]=0']
"""
s = str(self)
c = r'\[[0-9]*\]:'
r = re.compile(c)
s = r.sub('',s).strip()
return s.split()
def sage_structured_str_list(self):
r"""
If self is a Singular list of lists of Singular elements, returns
corresponding Sage list of lists of strings.
EXAMPLES::
sage: R=singular.ring(0,'(x,y)','dp')
sage: RL=R.ringlist()
sage: RL
[1]:
0
[2]:
[1]:
x
[2]:
y
[3]:
[1]:
[1]:
dp
[2]:
1,1
[2]:
[1]:
C
[2]:
0
[4]:
_[1]=0
sage: RL.sage_structured_str_list()
['0', ['x', 'y'], [['dp', '1,\n1'], ['C', '0']], '0']
"""
if not (self.type()=='list'):
return str(self)
return [X.sage_structured_str_list() for X in self]
def _tab_completion(self):
"""
Returns the possible tab-completions for self. In this case, we
just return all the tab completions for the Singular object.
EXAMPLES::
sage: R = singular.ring(0,'(x,y)','dp')
sage: R._tab_completion()
['exteriorPower',
...
'crossprod']
"""
return self.parent()._tab_completion()
def type(self):
"""
Returns the internal type of this element.
EXAMPLES::
sage: R = PolynomialRing(GF(2^8,'a'),2,'x')
sage: R._singular_().type()
'ring'
sage: fs = singular('x0^2','poly')
sage: fs.type()
'poly'
"""
# singular reports // $varname $type $stuff
p = re.compile(r"// [\w]+ (\w+) [\w]*")
m = p.match(self.parent().eval("type(%s)" % self.name()))
return m.group(1)
def __iter__(self):
"""
EXAMPLES::
sage: R = singular.ring(0, '(x,y,z)', 'dp')
sage: A = singular.matrix(2,2)
sage: list(iter(A))
[[0], [0]]
sage: A[1,1] = 1; A[1,2] = 2
sage: A[2,1] = 3; A[2,2] = 4
sage: list(iter(A))
[[1,3], [2,4]]
"""
if self.type() == 'matrix':
l = self.ncols()
else:
l = len(self)
for i in range(1, int(l + 1)):
yield self[i]
def _singular_(self):
"""
EXAMPLES::
sage: R = singular.ring(0, '(x,y,z)', 'dp')
sage: A = singular.matrix(2,2)
sage: A._singular_() is A
True
"""
return self
def attrib(self, name, value=None):
"""
Get and set attributes for self.
INPUT:
- ``name`` - string to choose the attribute
- ``value`` - boolean value or None for reading,
(default:None)
VALUES: isSB - the standard basis property is set by all commands
computing a standard basis like groebner, std, stdhilb etc.; used
by lift, dim, degree, mult, hilb, vdim, kbase isHomog - the weight
vector for homogeneous or quasihomogeneous ideals/modules isCI -
complete intersection property isCM - Cohen-Macaulay property rank
- set the rank of a module (see nrows) withSB - value of type
ideal, resp. module, is std withHilb - value of type intvec is
hilb(_,1) (see hilb) withRes - value of type list is a free
resolution withDim - value of type int is the dimension (see dim)
withMult - value of type int is the multiplicity (see mult)
EXAMPLES::
sage: P.<x,y,z> = PolynomialRing(QQ)
sage: I = Ideal([z^2, y*z, y^2, x*z, x*y, x^2])
sage: Ibar = I._singular_()
sage: Ibar.attrib('isSB')
0
sage: singular.eval('vdim(%s)'%Ibar.name()) # sage7 name is random
// ** sage7 is no standard basis
4
sage: Ibar.attrib('isSB',1)
sage: singular.eval('vdim(%s)'%Ibar.name())
'4'
"""
if value is None:
return int(self.parent().eval('attrib(%s,"%s")' % (self.name(), name)))
else:
self.parent().eval('attrib(%s,"%s",%d)' % (self.name(), name,value))
@instancedoc
class SingularFunction(ExpectFunction):
def _instancedoc_(self):
"""
EXAMPLES::
sage: 'groebner' in singular.groebner.__doc__
True
"""
if not nodes:
generate_docstring_dictionary()
prefix = \
"""
This function is an automatically generated pexpect wrapper around the Singular
function '%s'.
EXAMPLES::
sage: groebner = singular.groebner
sage: P.<x, y> = PolynomialRing(QQ)
sage: I = P.ideal(x^2-y, y+x)
sage: groebner(singular(I))
x+y,
y^2-y
""" % (self._name,)
prefix2 = \
"""
The Singular documentation for '%s' is given below.
""" % (self._name,)
try:
return prefix + prefix2 + nodes[node_names[self._name]]
except KeyError:
return prefix
@instancedoc
class SingularFunctionElement(FunctionElement):
def _instancedoc_(self):
r"""
EXAMPLES::
sage: R = singular.ring(0, '(x,y,z)', 'dp')
sage: A = singular.matrix(2,2)
sage: 'matrix_expression' in A.nrows.__doc__
True
"""
if not nodes:
generate_docstring_dictionary()
try:
return nodes[node_names[self._name]]
except KeyError:
return ""
def is_SingularElement(x):
r"""
Returns True is x is of type ``SingularElement``.
EXAMPLES::
sage: from sage.interfaces.singular import is_SingularElement
sage: is_SingularElement(singular(2))
True
sage: is_SingularElement(2)
False
"""
return isinstance(x, SingularElement)
nodes = {}
node_names = {}
def generate_docstring_dictionary():
"""
Generate global dictionaries which hold the docstrings for
Singular functions.
EXAMPLES::
sage: from sage.interfaces.singular import generate_docstring_dictionary
sage: generate_docstring_dictionary()
"""
global nodes
global node_names
nodes.clear()
node_names.clear()
new_node = re.compile(r"File: singular\.[a-z]*, Node: ([^,]*),.*")
new_lookup = re.compile(r"\* ([^:]*):*([^.]*)\..*")
L, in_node, curr_node = [], False, None
from sage.libs.singular.singular import get_resource
singular_info_file = get_resource('i')
# singular.hlp contains a few iso-8859-1 encoded special characters
with io.open(singular_info_file,
encoding='latin-1') as f:
for line in f:
m = re.match(new_node, line)
if m:
# a new node starts
in_node = True
nodes[curr_node] = "".join(L)
L = []
curr_node, = m.groups()
elif in_node: # we are in a node
L.append(line)
else:
m = re.match(new_lookup, line)
if m:
a, b = m.groups()
node_names[a] = b.strip()
if line == "6 Index\n":
in_node = False
nodes[curr_node] = "".join(L) # last node
def get_docstring(name):
"""
Return the docstring for the function ``name``.
INPUT:
- ``name`` - a Singular function name
EXAMPLES::
sage: from sage.interfaces.singular import get_docstring
sage: 'groebner' in get_docstring('groebner')
True
sage: 'standard.lib' in get_docstring('groebner')
True
"""
if not nodes:
generate_docstring_dictionary()
try:
return nodes[node_names[name]]
except KeyError:
return ""
##################################
singular = Singular()
def reduce_load_Singular():
"""
EXAMPLES::
sage: from sage.interfaces.singular import reduce_load_Singular
sage: reduce_load_Singular()
Singular
"""
return singular
def singular_console():
r"""
Spawn a new Singular command-line session.
EXAMPLES::
sage: singular_console() #not tested
SINGULAR / Development
A Computer Algebra System for Polynomial Computations / version 3-0-4
0<
by: G.-M. Greuel, G. Pfister, H. Schoenemann \ Nov 2007
FB Mathematik der Universitaet, D-67653 Kaiserslautern \
"""
from sage.repl.rich_output.display_manager import get_display_manager
if not get_display_manager().is_in_terminal():
raise RuntimeError('Can use the console only in the terminal. Try %%singular magics instead.')
os.system('Singular')
def singular_version():
"""
Return the version of Singular being used.
EXAMPLES::
sage: singular.version()
"Singular ... version 4...
"""
return singular.eval('system("--version");')
class SingularGBLogPrettyPrinter:
"""
A device which prints Singular Groebner basis computation logs
more verbatim.
"""
rng_chng = re.compile(r"\[\d+:\d+\]")# [m:n] internal ring change to
# poly representation with
# exponent bound m and n words in
# exponent vector
new_elem = re.compile("s") # found a new element of the standard basis
red_zero = re.compile("-") # reduced a pair/S-polynomial to 0
red_post = re.compile(r"\.") # postponed a reduction of a pair/S-polynomial
cri_hilb = re.compile("h") # used Hilbert series criterion
hig_corn = re.compile(r"H\(\d+\)") # found a 'highest corner' of degree d, no need to consider higher degrees
num_crit = re.compile(r"\(\d+\)") # n critical pairs are still to be reduced
red_num = re.compile(r"\(S:\d+\)") # doing complete reduction of n elements
deg_lead = re.compile(r"\d+") # the degree of the leading terms is currently d
# SlimGB
red_para = re.compile(r"M\[(\d+),(\d+)\]") # parallel reduction of n elements with m non-zero output elements
red_betr = re.compile("b") # exchange of a reductor by a 'better' one
non_mini = re.compile("e") # a new reductor with non-minimal leading term
crt_lne1 = re.compile(r"product criterion:(\d+) chain criterion:(\d+)")
crt_lne2 = re.compile(r"NF:(\d+) product criterion:(\d+), ext_product criterion:(\d+)")
pat_sync = re.compile(r"1\+(\d+);")
global_pattern = re.compile(r"(\[\d+:\d+\]|s|-|\.|h|H\(\d+\)|\(\d+\)|\(S:\d+\)|\d+|M\[\d+,[b,e]*\d+\]|b|e).*")
def __init__(self, verbosity=1):
"""
Construct a new Singular Groebner Basis log pretty printer.
INPUT:
- ``verbosity`` - how much information should be printed
(between 0 and 3)
EXAMPLES::
sage: from sage.interfaces.singular import SingularGBLogPrettyPrinter
sage: s0 = SingularGBLogPrettyPrinter(verbosity=0)
sage: s1 = SingularGBLogPrettyPrinter(verbosity=1)
sage: s0.write("[1:2]12")
sage: s1.write("[1:2]12")
Leading term degree: 12.
"""
self.verbosity = verbosity
self.curr_deg = 0 # current degree
self.max_deg = 0 # maximal degree in total
self.nf = 0 # number of normal forms computed (SlimGB only)
self.prod = 0 # number of S-polynomials discarded using product criterion
self.ext_prod = 0 # number of S-polynomials discarded using extended product criterion
self.chain = 0 # number of S-polynomials discarded using chain criterion
self.storage = "" # stores incomplete strings
self.sync = None # should we expect a sync integer?
def write(self, s):
"""
EXAMPLES::
sage: from sage.interfaces.singular import SingularGBLogPrettyPrinter
sage: s3 = SingularGBLogPrettyPrinter(verbosity=3)
sage: s3.write("(S:1337)")
Performing complete reduction of 1337 elements.
sage: s3.write("M[389,12]")
Parallel reduction of 389 elements with 12 non-zero output elements.
"""
verbosity = self.verbosity
if self.storage:
s = self.storage + s
self.storage = ""
for line in s.splitlines():
# deal with the Sage <-> Singular syncing code
match = re.match(SingularGBLogPrettyPrinter.pat_sync,line)
if match:
self.sync = int(match.groups()[0])
continue
if self.sync and line == "%d" % (self.sync + 1):
self.sync = None
continue
if line.endswith(";"):
continue
if line.startswith(">"):
continue
if line.startswith("std") or line.startswith("slimgb"):
continue
# collect stats returned about avoided reductions to zero
match = re.match(SingularGBLogPrettyPrinter.crt_lne1,line)
if match:
self.prod,self.chain = map(int,re.match(SingularGBLogPrettyPrinter.crt_lne1,line).groups())
self.storage = ""
continue
match = re.match(SingularGBLogPrettyPrinter.crt_lne2,line)
if match:
self.nf,self.prod,self.ext_prod = map(int,re.match(SingularGBLogPrettyPrinter.crt_lne2,line).groups())
self.storage = ""
continue
while line:
match = re.match(SingularGBLogPrettyPrinter.global_pattern, line)
if not match:
self.storage = line
line = None
continue
token, = match.groups()
line = line[len(token):]
if re.match(SingularGBLogPrettyPrinter.rng_chng,token):
continue
elif re.match(SingularGBLogPrettyPrinter.new_elem,token) and verbosity >= 3:
print("New element found.")
elif re.match(SingularGBLogPrettyPrinter.red_zero,token) and verbosity >= 2:
print("Reduction to zero.")
elif re.match(SingularGBLogPrettyPrinter.red_post, token) and verbosity >= 2:
print("Reduction postponed.")
elif re.match(SingularGBLogPrettyPrinter.cri_hilb, token) and verbosity >= 2:
print("Hilber series criterion applied.")
elif re.match(SingularGBLogPrettyPrinter.hig_corn, token) and verbosity >= 1:
print("Maximal degree found: %s" % token)
elif re.match(SingularGBLogPrettyPrinter.num_crit, token) and verbosity >= 1:
print("Leading term degree: %2d. Critical pairs: %s." % (self.curr_deg,token[1:-1]))
elif re.match(SingularGBLogPrettyPrinter.red_num, token) and verbosity >= 3:
print("Performing complete reduction of %s elements." % token[3:-1])
elif re.match(SingularGBLogPrettyPrinter.deg_lead, token):
if verbosity >= 1:
print("Leading term degree: %2d." % int(token))
self.curr_deg = int(token)
if self.max_deg < self.curr_deg:
self.max_deg = self.curr_deg
elif re.match(SingularGBLogPrettyPrinter.red_para, token) and verbosity >= 3:
m,n = re.match(SingularGBLogPrettyPrinter.red_para,token).groups()
print("Parallel reduction of %s elements with %s non-zero output elements." % (m, n))
elif re.match(SingularGBLogPrettyPrinter.red_betr, token) and verbosity >= 3:
print("Replaced reductor by 'better' one.")
elif re.match(SingularGBLogPrettyPrinter.non_mini, token) and verbosity >= 2:
print("New reductor with non-minimal leading term found.")
def flush(self):
"""
EXAMPLES::
sage: from sage.interfaces.singular import SingularGBLogPrettyPrinter
sage: s3 = SingularGBLogPrettyPrinter(verbosity=3)
sage: s3.flush()
"""
sys.stdout.flush()
class SingularGBDefaultContext:
"""
Within this context all Singular Groebner basis calculations are
reduced automatically.
AUTHORS:
- Martin Albrecht
- Simon King
"""
def __init__(self, singular=None):
"""
Within this context all Singular Groebner basis calculations
are reduced automatically.
INPUT:
- ``singular`` - Singular instance (default: default instance)
EXAMPLES::
sage: from sage.interfaces.singular import SingularGBDefaultContext
sage: P.<a,b,c> = PolynomialRing(QQ,3, order='lex')
sage: I = sage.rings.ideal.Katsura(P,3)
sage: singular.option('noredTail')
sage: singular.option('noredThrough')
sage: Is = I._singular_()
sage: gb = Is.groebner()
sage: gb
84*c^4-40*c^3+c^2+c,
7*b+210*c^3-79*c^2+3*c,
a+2*b+2*c-1
::
sage: with SingularGBDefaultContext(): rgb = Is.groebner()
sage: rgb
84*c^4-40*c^3+c^2+c,
7*b+210*c^3-79*c^2+3*c,
7*a-420*c^3+158*c^2+8*c-7
Note that both bases are Groebner bases because they have
pairwise prime leading monomials but that the monic version of
the last element in ``rgb`` is smaller than the last element
of ``gb`` with respect to the lexicographical term ordering. ::
sage: (7*a-420*c^3+158*c^2+8*c-7)/7 < (a+2*b+2*c-1)
True
.. note::
This context is used automatically internally whenever a
Groebner basis is computed so the user does not need to use
it manually.
"""
if singular is None:
from sage.interfaces.singular import singular as singular_default
singular = singular_default
self.singular = singular
def __enter__(self):
"""
EXAMPLES::
sage: from sage.interfaces.singular import SingularGBDefaultContext
sage: P.<a,b,c> = PolynomialRing(QQ,3, order='lex')
sage: I = sage.rings.ideal.Katsura(P,3)
sage: singular.option('noredTail')
sage: singular.option('noredThrough')
sage: Is = I._singular_()
sage: with SingularGBDefaultContext(): rgb = Is.groebner()
sage: rgb
84*c^4-40*c^3+c^2+c,
7*b+210*c^3-79*c^2+3*c,
7*a-420*c^3+158*c^2+8*c-7
"""
try:
self.bck_degBound = int(self.singular.eval('degBound'))
except SingularError:
self.bck_degBound = int(0)
try:
self.bck_multBound = int(self.singular.eval('multBound'))
except SingularError:
self.bck_multBound = int(0)
self.o = self.singular.option("get")
self.singular.option('set',self.singular._saved_options)
self.singular.option("redSB")
self.singular.option("redTail")
try:
self.singular.eval('degBound=0')
except SingularError:
pass
try:
self.singular.eval('multBound=0')
except SingularError:
pass
def __exit__(self, typ, value, tb):
"""
EXAMPLES::
sage: from sage.interfaces.singular import SingularGBDefaultContext
sage: P.<a,b,c> = PolynomialRing(QQ,3, order='lex')
sage: I = sage.rings.ideal.Katsura(P,3)
sage: singular.option('noredTail')
sage: singular.option('noredThrough')
sage: Is = I._singular_()
sage: with SingularGBDefaultContext(): rgb = Is.groebner()
sage: rgb
84*c^4-40*c^3+c^2+c,
7*b+210*c^3-79*c^2+3*c,
7*a-420*c^3+158*c^2+8*c-7
"""
self.singular.option("set", self.o)
try:
self.singular.eval('degBound=%d' % self.bck_degBound)
except SingularError:
pass
try:
self.singular.eval('multBound=%d' % self.bck_multBound)
except SingularError:
pass
def singular_gb_standard_options(func):
r"""
Decorator to force a reduced Singular groebner basis.
TESTS::
sage: P.<a,b,c,d,e> = PolynomialRing(GF(127))
sage: J = sage.rings.ideal.Cyclic(P).homogenize()
sage: from sage.misc.sageinspect import sage_getsource
sage: "basis" in sage_getsource(J.interreduced_basis) #indirect doctest
True
The following tests against a bug that was fixed in :trac:`11298`::
sage: from sage.misc.sageinspect import sage_getsourcelines, sage_getargspec
sage: P.<x,y> = QQ[]
sage: I = P*[x,y]
sage: sage_getargspec(I.interreduced_basis)
ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None)
sage: sage_getsourcelines(I.interreduced_basis)
([' @handle_AA_and_QQbar\n',
' @singular_gb_standard_options\n',
' @libsingular_gb_standard_options\n',
' def interreduced_basis(self):\n', '
...
' return self.basis.reduced()\n'], ...)
.. note::
This decorator is used automatically internally so the user
does not need to use it manually.
"""
from sage.misc.decorators import sage_wraps
@sage_wraps(func)
def wrapper(*args, **kwds):
with SingularGBDefaultContext():
return func(*args, **kwds)
return wrapper
|
the-stack_0_19697 | # engine/result.py
# Copyright (C) 2005-2020 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Define result set constructs including :class:`.Result`"""
import collections
import functools
import operator
from .row import _baserow_usecext
from .row import BaseRow # noqa
from .row import LegacyRow # noqa
from .row import Row # noqa
from .row import RowMapping # noqa
from .row import RowProxy # noqa
from .row import rowproxy_reconstructor # noqa
from .. import exc
from .. import util
from ..sql import expression
from ..sql import sqltypes
from ..sql import util as sql_util
from ..sql.compiler import RM_NAME
from ..sql.compiler import RM_OBJECTS
from ..sql.compiler import RM_RENDERED_NAME
from ..sql.compiler import RM_TYPE
if _baserow_usecext:
from sqlalchemy.cresultproxy import tuplegetter as _tuplegetter
_UNPICKLED = util.symbol("unpickled")
# cyclical import for sqlalchemy.future
_future_Result = None
# metadata entry tuple indexes.
# using raw tuple is faster than namedtuple.
MD_INDEX = 0 # integer index in cursor.description
MD_OBJECTS = 1 # other string keys and ColumnElement obj that can match
MD_LOOKUP_KEY = 2 # string key we usually expect for key-based lookup
MD_RENDERED_NAME = 3 # name that is usually in cursor.description
MD_PROCESSOR = 4 # callable to process a result value into a row
MD_UNTRANSLATED = 5 # raw name from cursor.description
class ResultMetaData(object):
__slots__ = ()
def _has_key(self, key):
return key in self._keymap
def _key_fallback(self, key):
if isinstance(key, int):
raise IndexError(key)
else:
raise KeyError(key)
class SimpleResultMetaData(ResultMetaData):
__slots__ = "keys", "_keymap", "_processors"
def __init__(self, keys, extra=None):
self.keys = list(keys)
len_keys = len(keys)
self._keymap = {
name: (index, name) for index, name in enumerate(self.keys)
}
if not _baserow_usecext:
self._keymap.update(
{
index: (index, None, self.keys[index])
for index in range(len_keys)
}
)
if extra:
for key, ex in zip(keys, extra):
rec = self._keymap[key]
self._keymap.update({e: rec for e in ex})
self._processors = [None] * len(keys)
def __getstate__(self):
return {"keys": self.keys}
def __setstate__(self, state):
self.__init__(state["keys"])
def _has_key(self, key):
return key in self._keymap
def _contains(self, value, row):
return value in row._data
def result_tuple(fields, extra=None):
parent = SimpleResultMetaData(fields, extra)
return functools.partial(Row, parent, parent._processors, parent._keymap)
class CursorResultMetaData(ResultMetaData):
"""Handle cursor.description, applying additional info from an execution
context."""
__slots__ = (
"_keymap",
"case_sensitive",
"matched_on_name",
"_processors",
"keys",
)
def __init__(self, parent, cursor_description):
context = parent.context
dialect = context.dialect
self.case_sensitive = dialect.case_sensitive
self.matched_on_name = False
if context.result_column_struct:
(
result_columns,
cols_are_ordered,
textual_ordered,
loose_column_name_matching,
) = context.result_column_struct
num_ctx_cols = len(result_columns)
else:
result_columns = (
cols_are_ordered
) = (
num_ctx_cols
) = loose_column_name_matching = textual_ordered = False
# merge cursor.description with the column info
# present in the compiled structure, if any
raw = self._merge_cursor_description(
context,
cursor_description,
result_columns,
num_ctx_cols,
cols_are_ordered,
textual_ordered,
loose_column_name_matching,
)
self._keymap = {}
if not _baserow_usecext:
# keymap indexes by integer index: this is only used
# in the pure Python BaseRow.__getitem__
# implementation to avoid an expensive
# isinstance(key, util.int_types) in the most common
# case path
len_raw = len(raw)
self._keymap.update(
[
(metadata_entry[MD_INDEX], metadata_entry)
for metadata_entry in raw
]
+ [
(metadata_entry[MD_INDEX] - len_raw, metadata_entry)
for metadata_entry in raw
]
)
# processors in key order for certain per-row
# views like __iter__ and slices
self._processors = [
metadata_entry[MD_PROCESSOR] for metadata_entry in raw
]
# keymap by primary string...
by_key = dict(
[
(metadata_entry[MD_LOOKUP_KEY], metadata_entry)
for metadata_entry in raw
]
)
# for compiled SQL constructs, copy additional lookup keys into
# the key lookup map, such as Column objects, labels,
# column keys and other names
if num_ctx_cols:
# if by-primary-string dictionary smaller (or bigger?!) than
# number of columns, assume we have dupes, rewrite
# dupe records with "None" for index which results in
# ambiguous column exception when accessed.
if len(by_key) != num_ctx_cols:
# new in 1.4: get the complete set of all possible keys,
# strings, objects, whatever, that are dupes across two
# different records, first.
index_by_key = {}
dupes = set()
for metadata_entry in raw:
for key in (metadata_entry[MD_RENDERED_NAME],) + (
metadata_entry[MD_OBJECTS] or ()
):
if not self.case_sensitive and isinstance(
key, util.string_types
):
key = key.lower()
idx = metadata_entry[MD_INDEX]
# if this key has been associated with more than one
# positional index, it's a dupe
if index_by_key.setdefault(key, idx) != idx:
dupes.add(key)
# then put everything we have into the keymap excluding only
# those keys that are dupes.
self._keymap.update(
[
(obj_elem, metadata_entry)
for metadata_entry in raw
if metadata_entry[MD_OBJECTS]
for obj_elem in metadata_entry[MD_OBJECTS]
if obj_elem not in dupes
]
)
# then for the dupe keys, put the "ambiguous column"
# record into by_key.
by_key.update({key: (None, (), key) for key in dupes})
else:
# no dupes - copy secondary elements from compiled
# columns into self._keymap
self._keymap.update(
[
(obj_elem, metadata_entry)
for metadata_entry in raw
if metadata_entry[MD_OBJECTS]
for obj_elem in metadata_entry[MD_OBJECTS]
]
)
# update keymap with primary string names taking
# precedence
self._keymap.update(by_key)
# update keymap with "translated" names (sqlite-only thing)
if not num_ctx_cols and context._translate_colname:
self._keymap.update(
[
(
metadata_entry[MD_UNTRANSLATED],
self._keymap[metadata_entry[MD_LOOKUP_KEY]],
)
for metadata_entry in raw
if metadata_entry[MD_UNTRANSLATED]
]
)
def _merge_cursor_description(
self,
context,
cursor_description,
result_columns,
num_ctx_cols,
cols_are_ordered,
textual_ordered,
loose_column_name_matching,
):
"""Merge a cursor.description with compiled result column information.
There are at least four separate strategies used here, selected
depending on the type of SQL construct used to start with.
The most common case is that of the compiled SQL expression construct,
which generated the column names present in the raw SQL string and
which has the identical number of columns as were reported by
cursor.description. In this case, we assume a 1-1 positional mapping
between the entries in cursor.description and the compiled object.
This is also the most performant case as we disregard extracting /
decoding the column names present in cursor.description since we
already have the desired name we generated in the compiled SQL
construct.
The next common case is that of the completely raw string SQL,
such as passed to connection.execute(). In this case we have no
compiled construct to work with, so we extract and decode the
names from cursor.description and index those as the primary
result row target keys.
The remaining fairly common case is that of the textual SQL
that includes at least partial column information; this is when
we use a :class:`.TextualSelect` construct. This construct may have
unordered or ordered column information. In the ordered case, we
merge the cursor.description and the compiled construct's information
positionally, and warn if there are additional description names
present, however we still decode the names in cursor.description
as we don't have a guarantee that the names in the columns match
on these. In the unordered case, we match names in cursor.description
to that of the compiled construct based on name matching.
In both of these cases, the cursor.description names and the column
expression objects and names are indexed as result row target keys.
The final case is much less common, where we have a compiled
non-textual SQL expression construct, but the number of columns
in cursor.description doesn't match what's in the compiled
construct. We make the guess here that there might be textual
column expressions in the compiled construct that themselves include
a comma in them causing them to split. We do the same name-matching
as with textual non-ordered columns.
The name-matched system of merging is the same as that used by
SQLAlchemy for all cases up through te 0.9 series. Positional
matching for compiled SQL expressions was introduced in 1.0 as a
major performance feature, and positional matching for textual
:class:`.TextualSelect` objects in 1.1. As name matching is no longer
a common case, it was acceptable to factor it into smaller generator-
oriented methods that are easier to understand, but incur slightly
more performance overhead.
"""
case_sensitive = context.dialect.case_sensitive
if (
num_ctx_cols
and cols_are_ordered
and not textual_ordered
and num_ctx_cols == len(cursor_description)
):
self.keys = [elem[0] for elem in result_columns]
# pure positional 1-1 case; doesn't need to read
# the names from cursor.description
return [
(
idx,
rmap_entry[RM_OBJECTS],
rmap_entry[RM_NAME].lower()
if not case_sensitive
else rmap_entry[RM_NAME],
rmap_entry[RM_RENDERED_NAME],
context.get_result_processor(
rmap_entry[RM_TYPE],
rmap_entry[RM_RENDERED_NAME],
cursor_description[idx][1],
),
None,
)
for idx, rmap_entry in enumerate(result_columns)
]
else:
# name-based or text-positional cases, where we need
# to read cursor.description names
if textual_ordered:
# textual positional case
raw_iterator = self._merge_textual_cols_by_position(
context, cursor_description, result_columns
)
elif num_ctx_cols:
# compiled SQL with a mismatch of description cols
# vs. compiled cols, or textual w/ unordered columns
raw_iterator = self._merge_cols_by_name(
context,
cursor_description,
result_columns,
loose_column_name_matching,
)
else:
# no compiled SQL, just a raw string
raw_iterator = self._merge_cols_by_none(
context, cursor_description
)
return [
(
idx,
obj,
cursor_colname,
cursor_colname,
context.get_result_processor(
mapped_type, cursor_colname, coltype
),
untranslated,
)
for (
idx,
cursor_colname,
mapped_type,
coltype,
obj,
untranslated,
) in raw_iterator
]
def _colnames_from_description(self, context, cursor_description):
"""Extract column names and data types from a cursor.description.
Applies unicode decoding, column translation, "normalization",
and case sensitivity rules to the names based on the dialect.
"""
dialect = context.dialect
case_sensitive = dialect.case_sensitive
translate_colname = context._translate_colname
description_decoder = (
dialect._description_decoder
if dialect.description_encoding
else None
)
normalize_name = (
dialect.normalize_name if dialect.requires_name_normalize else None
)
untranslated = None
self.keys = []
for idx, rec in enumerate(cursor_description):
colname = rec[0]
coltype = rec[1]
if description_decoder:
colname = description_decoder(colname)
if translate_colname:
colname, untranslated = translate_colname(colname)
if normalize_name:
colname = normalize_name(colname)
self.keys.append(colname)
if not case_sensitive:
colname = colname.lower()
yield idx, colname, untranslated, coltype
def _merge_textual_cols_by_position(
self, context, cursor_description, result_columns
):
num_ctx_cols = len(result_columns) if result_columns else None
if num_ctx_cols > len(cursor_description):
util.warn(
"Number of columns in textual SQL (%d) is "
"smaller than number of columns requested (%d)"
% (num_ctx_cols, len(cursor_description))
)
seen = set()
for (
idx,
colname,
untranslated,
coltype,
) in self._colnames_from_description(context, cursor_description):
if idx < num_ctx_cols:
ctx_rec = result_columns[idx]
obj = ctx_rec[RM_OBJECTS]
mapped_type = ctx_rec[RM_TYPE]
if obj[0] in seen:
raise exc.InvalidRequestError(
"Duplicate column expression requested "
"in textual SQL: %r" % obj[0]
)
seen.add(obj[0])
else:
mapped_type = sqltypes.NULLTYPE
obj = None
yield idx, colname, mapped_type, coltype, obj, untranslated
def _merge_cols_by_name(
self,
context,
cursor_description,
result_columns,
loose_column_name_matching,
):
dialect = context.dialect
case_sensitive = dialect.case_sensitive
match_map = self._create_description_match_map(
result_columns, case_sensitive, loose_column_name_matching
)
self.matched_on_name = True
for (
idx,
colname,
untranslated,
coltype,
) in self._colnames_from_description(context, cursor_description):
try:
ctx_rec = match_map[colname]
except KeyError:
mapped_type = sqltypes.NULLTYPE
obj = None
else:
obj = ctx_rec[1]
mapped_type = ctx_rec[2]
yield idx, colname, mapped_type, coltype, obj, untranslated
@classmethod
def _create_description_match_map(
cls,
result_columns,
case_sensitive=True,
loose_column_name_matching=False,
):
"""when matching cursor.description to a set of names that are present
in a Compiled object, as is the case with TextualSelect, get all the
names we expect might match those in cursor.description.
"""
d = {}
for elem in result_columns:
key = elem[RM_RENDERED_NAME]
if not case_sensitive:
key = key.lower()
if key in d:
# conflicting keyname - just add the column-linked objects
# to the existing record. if there is a duplicate column
# name in the cursor description, this will allow all of those
# objects to raise an ambiguous column error
e_name, e_obj, e_type = d[key]
d[key] = e_name, e_obj + elem[RM_OBJECTS], e_type
else:
d[key] = (elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE])
if loose_column_name_matching:
# when using a textual statement with an unordered set
# of columns that line up, we are expecting the user
# to be using label names in the SQL that match to the column
# expressions. Enable more liberal matching for this case;
# duplicate keys that are ambiguous will be fixed later.
for r_key in elem[RM_OBJECTS]:
d.setdefault(
r_key, (elem[RM_NAME], elem[RM_OBJECTS], elem[RM_TYPE])
)
return d
def _merge_cols_by_none(self, context, cursor_description):
for (
idx,
colname,
untranslated,
coltype,
) in self._colnames_from_description(context, cursor_description):
yield idx, colname, sqltypes.NULLTYPE, coltype, None, untranslated
def _key_fallback(self, key, raiseerr=True):
if raiseerr:
raise exc.NoSuchColumnError(
"Could not locate column in row for column '%s'"
% util.string_or_unprintable(key)
)
else:
return None
def _raise_for_ambiguous_column_name(self, rec):
raise exc.InvalidRequestError(
"Ambiguous column name '%s' in "
"result set column descriptions" % rec[MD_LOOKUP_KEY]
)
def _warn_for_nonint(self, key):
raise TypeError(
"TypeError: tuple indices must be integers or slices, not %s"
% type(key).__name__
)
def _getter(self, key, raiseerr=True):
try:
rec = self._keymap[key]
except KeyError:
rec = self._key_fallback(key, raiseerr)
if rec is None:
return None
index, obj = rec[0:2]
if index is None:
self._raise_for_ambiguous_column_name(rec)
return operator.methodcaller("_get_by_key_impl_mapping", index)
def _tuple_getter(self, keys, raiseerr=True):
"""Given a list of keys, return a callable that will deliver a tuple.
This is strictly used by the ORM and the keys are Column objects.
However, this might be some nice-ish feature if we could find a very
clean way of presenting it.
note that in the new world of "row._mapping", this is a mapping-getter.
maybe the name should indicate that somehow.
"""
indexes = []
for key in keys:
try:
rec = self._keymap[key]
except KeyError:
rec = self._key_fallback(key, raiseerr)
if rec is None:
return None
index, obj = rec[0:2]
if index is None:
self._raise_for_ambiguous_column_name(obj)
indexes.append(index)
if _baserow_usecext:
return _tuplegetter(*indexes)
else:
return self._pure_py_tuplegetter(*indexes)
def _pure_py_tuplegetter(self, *indexes):
getters = [
operator.methodcaller("_get_by_key_impl_mapping", index)
for index in indexes
]
return lambda rec: tuple(getter(rec) for getter in getters)
def __getstate__(self):
return {
"_keymap": {
key: (rec[MD_INDEX], _UNPICKLED, key)
for key, rec in self._keymap.items()
if isinstance(key, util.string_types + util.int_types)
},
"keys": self.keys,
"case_sensitive": self.case_sensitive,
"matched_on_name": self.matched_on_name,
}
def __setstate__(self, state):
self._processors = [None for _ in range(len(state["keys"]))]
self._keymap = state["_keymap"]
self.keys = state["keys"]
self.case_sensitive = state["case_sensitive"]
self.matched_on_name = state["matched_on_name"]
class LegacyCursorResultMetaData(CursorResultMetaData):
def _contains(self, value, row):
key = value
if key in self._keymap:
util.warn_deprecated(
"Using the 'in' operator to test for string or column "
"keys, or integer indexes, in a :class:`.Row` object is "
"deprecated and will "
"be removed in a future release. "
"Use the `Row._fields` or `Row._mapping` attribute, i.e. "
"'key in row._fields'"
)
return True
else:
return self._key_fallback(key, False) is not None
def _key_fallback(self, key, raiseerr=True):
map_ = self._keymap
result = None
if isinstance(key, util.string_types):
result = map_.get(key if self.case_sensitive else key.lower())
elif isinstance(key, expression.ColumnElement):
if (
key._label
and (key._label if self.case_sensitive else key._label.lower())
in map_
):
result = map_[
key._label if self.case_sensitive else key._label.lower()
]
elif (
hasattr(key, "name")
and (key.name if self.case_sensitive else key.name.lower())
in map_
):
# match is only on name.
result = map_[
key.name if self.case_sensitive else key.name.lower()
]
# search extra hard to make sure this
# isn't a column/label name overlap.
# this check isn't currently available if the row
# was unpickled.
if result is not None and result[MD_OBJECTS] not in (
None,
_UNPICKLED,
):
for obj in result[MD_OBJECTS]:
if key._compare_name_for_result(obj):
break
else:
result = None
if result is not None:
if result[MD_OBJECTS] is _UNPICKLED:
util.warn_deprecated(
"Retreiving row values using Column objects from a "
"row that was unpickled is deprecated; adequate "
"state cannot be pickled for this to be efficient. "
"This usage will raise KeyError in a future release."
)
else:
util.warn_deprecated(
"Retreiving row values using Column objects with only "
"matching names as keys is deprecated, and will raise "
"KeyError in a future release; only Column "
"objects that are explicitly part of the statement "
"object should be used."
)
if result is None:
if raiseerr:
raise exc.NoSuchColumnError(
"Could not locate column in row for column '%s'"
% util.string_or_unprintable(key)
)
else:
return None
else:
map_[key] = result
return result
def _warn_for_nonint(self, key):
util.warn_deprecated_20(
"Using non-integer/slice indices on Row is deprecated and will "
"be removed in version 2.0; please use row._mapping[<key>], or "
"the mappings() accessor on the sqlalchemy.future result object.",
stacklevel=4,
)
def _has_key(self, key):
if key in self._keymap:
return True
else:
return self._key_fallback(key, False) is not None
class CursorFetchStrategy(object):
"""Define a cursor strategy for a result object.
Subclasses define different ways of fetching rows, typically but
not necessarily using a DBAPI cursor object.
.. versionadded:: 1.4
"""
__slots__ = ("dbapi_cursor", "cursor_description")
def __init__(self, dbapi_cursor, cursor_description):
self.dbapi_cursor = dbapi_cursor
self.cursor_description = cursor_description
@classmethod
def create(cls, result):
raise NotImplementedError()
def soft_close(self, result):
raise NotImplementedError()
def hard_close(self, result):
raise NotImplementedError()
def fetchone(self):
raise NotImplementedError()
def fetchmany(self, size=None):
raise NotImplementedError()
def fetchall(self):
raise NotImplementedError()
class NoCursorDQLFetchStrategy(CursorFetchStrategy):
"""Cursor strategy for a DQL result that has no open cursor.
This is a result set that can return rows, i.e. for a SELECT, or for an
INSERT, UPDATE, DELETE that includes RETURNING. However it is in the state
where the cursor is closed and no rows remain available. The owning result
object may or may not be "hard closed", which determines if the fetch
methods send empty results or raise for closed result.
"""
__slots__ = ("closed",)
def __init__(self, closed):
self.closed = closed
self.cursor_description = None
def soft_close(self, result):
pass
def hard_close(self, result):
self.closed = True
def fetchone(self):
return self._non_result(None)
def fetchmany(self, size=None):
return self._non_result([])
def fetchall(self):
return self._non_result([])
def _non_result(self, default):
if self.closed:
raise exc.ResourceClosedError("This result object is closed.")
else:
return default
class NoCursorDMLFetchStrategy(CursorFetchStrategy):
"""Cursor strategy for a DML result that has no open cursor.
This is a result set that does not return rows, i.e. for an INSERT,
UPDATE, DELETE that does not include RETURNING.
"""
__slots__ = ("closed",)
def __init__(self, closed):
self.closed = closed
self.cursor_description = None
def soft_close(self, result):
pass
def hard_close(self, result):
self.closed = True
def fetchone(self):
return self._non_result(None)
def fetchmany(self, size=None):
return self._non_result([])
def fetchall(self):
return self._non_result([])
def _non_result(self, default):
raise exc.ResourceClosedError(
"This result object does not return rows. "
"It has been closed automatically."
)
class DefaultCursorFetchStrategy(CursorFetchStrategy):
"""Call fetch methods from a DBAPI cursor.
Alternate versions of this class may instead buffer the rows from
cursors or not use cursors at all.
"""
@classmethod
def create(cls, result):
dbapi_cursor = result.cursor
description = dbapi_cursor.description
if description is None:
return NoCursorDMLFetchStrategy(False)
else:
return cls(dbapi_cursor, description)
def soft_close(self, result):
result.cursor_strategy = NoCursorDQLFetchStrategy(False)
def hard_close(self, result):
result.cursor_strategy = NoCursorDQLFetchStrategy(True)
def fetchone(self):
return self.dbapi_cursor.fetchone()
def fetchmany(self, size=None):
if size is None:
return self.dbapi_cursor.fetchmany()
else:
return self.dbapi_cursor.fetchmany(size)
def fetchall(self):
return self.dbapi_cursor.fetchall()
class BufferedRowCursorFetchStrategy(DefaultCursorFetchStrategy):
"""A cursor fetch strategy with row buffering behavior.
This strategy buffers the contents of a selection of rows
before ``fetchone()`` is called. This is to allow the results of
``cursor.description`` to be available immediately, when
interfacing with a DB-API that requires rows to be consumed before
this information is available (currently psycopg2, when used with
server-side cursors).
The pre-fetching behavior fetches only one row initially, and then
grows its buffer size by a fixed amount with each successive need
for additional rows up the ``max_row_buffer`` size, which defaults
to 1000::
with psycopg2_engine.connect() as conn:
result = conn.execution_options(
stream_results=True, max_row_buffer=50
).execute("select * from table")
.. versionadded:: 1.4 ``max_row_buffer`` may now exceed 1000 rows.
.. seealso::
:ref:`psycopg2_execution_options`
"""
__slots__ = ("_max_row_buffer", "_rowbuffer", "_bufsize")
def __init__(
self, max_row_buffer, dbapi_cursor, description, initial_buffer
):
super(BufferedRowCursorFetchStrategy, self).__init__(
dbapi_cursor, description
)
self._max_row_buffer = max_row_buffer
self._growth_factor = 5
self._rowbuffer = initial_buffer
self._bufsize = min(self._max_row_buffer, self._growth_factor)
@classmethod
def create(cls, result):
"""Buffered row strategy has to buffer the first rows *before*
cursor.description is fetched so that it works with named cursors
correctly
"""
dbapi_cursor = result.cursor
initial_buffer = collections.deque(dbapi_cursor.fetchmany(1))
description = dbapi_cursor.description
if description is None:
return NoCursorDMLFetchStrategy(False)
else:
max_row_buffer = result.context.execution_options.get(
"max_row_buffer", 1000
)
return cls(
max_row_buffer, dbapi_cursor, description, initial_buffer
)
def __buffer_rows(self):
size = self._bufsize
self._rowbuffer = collections.deque(self.dbapi_cursor.fetchmany(size))
if size < self._max_row_buffer:
self._bufsize = min(
self._max_row_buffer, size * self._growth_factor
)
def soft_close(self, result):
self._rowbuffer.clear()
super(BufferedRowCursorFetchStrategy, self).soft_close(result)
def hard_close(self, result):
self._rowbuffer.clear()
super(BufferedRowCursorFetchStrategy, self).hard_close(result)
def fetchone(self):
if not self._rowbuffer:
self.__buffer_rows()
if not self._rowbuffer:
return None
return self._rowbuffer.popleft()
def fetchmany(self, size=None):
if size is None:
return self.fetchall()
result = []
for x in range(0, size):
row = self.fetchone()
if row is None:
break
result.append(row)
return result
def fetchall(self):
self._rowbuffer.extend(self.dbapi_cursor.fetchall())
ret = self._rowbuffer
self._rowbuffer = collections.deque()
return ret
class FullyBufferedCursorFetchStrategy(DefaultCursorFetchStrategy):
"""A cursor strategy that buffers rows fully upon creation.
Used for operations where a result is to be delivered
after the database conversation can not be continued,
such as MSSQL INSERT...OUTPUT after an autocommit.
"""
__slots__ = ("_rowbuffer",)
def __init__(self, dbapi_cursor, description, initial_buffer=None):
super(FullyBufferedCursorFetchStrategy, self).__init__(
dbapi_cursor, description
)
if initial_buffer is not None:
self._rowbuffer = collections.deque(initial_buffer)
else:
self._rowbuffer = self._buffer_rows()
@classmethod
def create_from_buffer(cls, dbapi_cursor, description, buffer):
return cls(dbapi_cursor, description, buffer)
def _buffer_rows(self):
return collections.deque(self.dbapi_cursor.fetchall())
def soft_close(self, result):
self._rowbuffer.clear()
super(FullyBufferedCursorFetchStrategy, self).soft_close(result)
def hard_close(self, result):
self._rowbuffer.clear()
super(FullyBufferedCursorFetchStrategy, self).hard_close(result)
def fetchone(self):
if self._rowbuffer:
return self._rowbuffer.popleft()
else:
return None
def fetchmany(self, size=None):
if size is None:
return self.fetchall()
result = []
for x in range(0, size):
row = self.fetchone()
if row is None:
break
result.append(row)
return result
def fetchall(self):
ret = self._rowbuffer
self._rowbuffer = collections.deque()
return ret
class BaseResult(object):
"""Base class for database result objects.
:class:`.BaseResult` is the base class for the 1.x style
:class:`.ResultProxy` class as well as the 2.x style
:class:`.future.Result` class.
"""
out_parameters = None
_metadata = None
_soft_closed = False
closed = False
@classmethod
def _create_for_context(cls, context):
if context._is_future_result:
obj = object.__new__(_future_Result)
else:
obj = object.__new__(ResultProxy)
obj.__init__(context)
return obj
def __init__(self, context):
self.context = context
self.dialect = context.dialect
self.cursor = context.cursor
self.connection = context.root_connection
self._echo = (
self.connection._echo and context.engine._should_log_debug()
)
self._init_metadata()
def _init_metadata(self):
self.cursor_strategy = strat = self.context.get_result_cursor_strategy(
self
)
if strat.cursor_description is not None:
if self.context.compiled:
if self.context.compiled._cached_metadata:
self._metadata = self.context.compiled._cached_metadata
else:
self._metadata = (
self.context.compiled._cached_metadata
) = self._cursor_metadata(self, strat.cursor_description)
else:
self._metadata = self._cursor_metadata(
self, strat.cursor_description
)
if self._echo:
self.context.engine.logger.debug(
"Col %r", tuple(x[0] for x in strat.cursor_description)
)
# leave cursor open so that execution context can continue
# setting up things like rowcount
def keys(self):
"""Return the list of string keys that would represented by each
:class:`.Row`."""
if self._metadata:
return self._metadata.keys
else:
return []
def _getter(self, key, raiseerr=True):
try:
getter = self._metadata._getter
except AttributeError:
return self.cursor_strategy._non_result(None)
else:
return getter(key, raiseerr)
def _tuple_getter(self, key, raiseerr=True):
try:
getter = self._metadata._tuple_getter
except AttributeError:
return self.cursor_strategy._non_result(None)
else:
return getter(key, raiseerr)
def _has_key(self, key):
try:
has_key = self._metadata._has_key
except AttributeError:
return self.cursor_strategy._non_result(None)
else:
return has_key(key)
def _soft_close(self, hard=False):
"""Soft close this :class:`.ResultProxy`.
This releases all DBAPI cursor resources, but leaves the
ResultProxy "open" from a semantic perspective, meaning the
fetchXXX() methods will continue to return empty results.
This method is called automatically when:
* all result rows are exhausted using the fetchXXX() methods.
* cursor.description is None.
This method is **not public**, but is documented in order to clarify
the "autoclose" process used.
.. versionadded:: 1.0.0
.. seealso::
:meth:`.ResultProxy.close`
"""
if (not hard and self._soft_closed) or (hard and self.closed):
return
if hard:
self.closed = True
self.cursor_strategy.hard_close(self)
else:
self.cursor_strategy.soft_close(self)
if not self._soft_closed:
cursor = self.cursor
self.cursor = None
self.connection._safe_close_cursor(cursor)
self._soft_closed = True
@util.memoized_property
def inserted_primary_key(self):
"""Return the primary key for the row just inserted.
The return value is a list of scalar values
corresponding to the list of primary key columns
in the target table.
This only applies to single row :func:`.insert`
constructs which did not explicitly specify
:meth:`.Insert.returning`.
Note that primary key columns which specify a
server_default clause,
or otherwise do not qualify as "autoincrement"
columns (see the notes at :class:`.Column`), and were
generated using the database-side default, will
appear in this list as ``None`` unless the backend
supports "returning" and the insert statement executed
with the "implicit returning" enabled.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isinsert:
raise exc.InvalidRequestError(
"Statement is not an insert() " "expression construct."
)
elif self.context._is_explicit_returning:
raise exc.InvalidRequestError(
"Can't call inserted_primary_key "
"when returning() "
"is used."
)
return self.context.inserted_primary_key
def last_updated_params(self):
"""Return the collection of updated parameters from this
execution.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an update() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isupdate:
raise exc.InvalidRequestError(
"Statement is not an update() " "expression construct."
)
elif self.context.executemany:
return self.context.compiled_parameters
else:
return self.context.compiled_parameters[0]
def last_inserted_params(self):
"""Return the collection of inserted parameters from this
execution.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isinsert:
raise exc.InvalidRequestError(
"Statement is not an insert() " "expression construct."
)
elif self.context.executemany:
return self.context.compiled_parameters
else:
return self.context.compiled_parameters[0]
@property
def returned_defaults(self):
"""Return the values of default columns that were fetched using
the :meth:`.ValuesBase.return_defaults` feature.
The value is an instance of :class:`.Row`, or ``None``
if :meth:`.ValuesBase.return_defaults` was not used or if the
backend does not support RETURNING.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.ValuesBase.return_defaults`
"""
return self.context.returned_defaults
def lastrow_has_defaults(self):
"""Return ``lastrow_has_defaults()`` from the underlying
:class:`.ExecutionContext`.
See :class:`.ExecutionContext` for details.
"""
return self.context.lastrow_has_defaults()
def postfetch_cols(self):
"""Return ``postfetch_cols()`` from the underlying
:class:`.ExecutionContext`.
See :class:`.ExecutionContext` for details.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() or update() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isinsert and not self.context.isupdate:
raise exc.InvalidRequestError(
"Statement is not an insert() or update() "
"expression construct."
)
return self.context.postfetch_cols
def prefetch_cols(self):
"""Return ``prefetch_cols()`` from the underlying
:class:`.ExecutionContext`.
See :class:`.ExecutionContext` for details.
Raises :class:`~sqlalchemy.exc.InvalidRequestError` if the executed
statement is not a compiled expression construct
or is not an insert() or update() construct.
"""
if not self.context.compiled:
raise exc.InvalidRequestError(
"Statement is not a compiled " "expression construct."
)
elif not self.context.isinsert and not self.context.isupdate:
raise exc.InvalidRequestError(
"Statement is not an insert() or update() "
"expression construct."
)
return self.context.prefetch_cols
def supports_sane_rowcount(self):
"""Return ``supports_sane_rowcount`` from the dialect.
See :attr:`.ResultProxy.rowcount` for background.
"""
return self.dialect.supports_sane_rowcount
def supports_sane_multi_rowcount(self):
"""Return ``supports_sane_multi_rowcount`` from the dialect.
See :attr:`.ResultProxy.rowcount` for background.
"""
return self.dialect.supports_sane_multi_rowcount
@util.memoized_property
def rowcount(self):
"""Return the 'rowcount' for this result.
The 'rowcount' reports the number of rows *matched*
by the WHERE criterion of an UPDATE or DELETE statement.
.. note::
Notes regarding :attr:`.ResultProxy.rowcount`:
* This attribute returns the number of rows *matched*,
which is not necessarily the same as the number of rows
that were actually *modified* - an UPDATE statement, for example,
may have no net change on a given row if the SET values
given are the same as those present in the row already.
Such a row would be matched but not modified.
On backends that feature both styles, such as MySQL,
rowcount is configured by default to return the match
count in all cases.
* :attr:`.ResultProxy.rowcount` is *only* useful in conjunction
with an UPDATE or DELETE statement. Contrary to what the Python
DBAPI says, it does *not* return the
number of rows available from the results of a SELECT statement
as DBAPIs cannot support this functionality when rows are
unbuffered.
* :attr:`.ResultProxy.rowcount` may not be fully implemented by
all dialects. In particular, most DBAPIs do not support an
aggregate rowcount result from an executemany call.
The :meth:`.ResultProxy.supports_sane_rowcount` and
:meth:`.ResultProxy.supports_sane_multi_rowcount` methods
will report from the dialect if each usage is known to be
supported.
* Statements that use RETURNING may not return a correct
rowcount.
"""
try:
return self.context.rowcount
except BaseException as e:
self.connection._handle_dbapi_exception(
e, None, None, self.cursor, self.context
)
@property
def lastrowid(self):
"""return the 'lastrowid' accessor on the DBAPI cursor.
This is a DBAPI specific method and is only functional
for those backends which support it, for statements
where it is appropriate. It's behavior is not
consistent across backends.
Usage of this method is normally unnecessary when
using insert() expression constructs; the
:attr:`~ResultProxy.inserted_primary_key` attribute provides a
tuple of primary key values for a newly inserted row,
regardless of database backend.
"""
try:
return self.context.get_lastrowid()
except BaseException as e:
self.connection._handle_dbapi_exception(
e, None, None, self.cursor, self.context
)
@property
def returns_rows(self):
"""True if this :class:`.ResultProxy` returns rows.
I.e. if it is legal to call the methods
:meth:`~.ResultProxy.fetchone`,
:meth:`~.ResultProxy.fetchmany`
:meth:`~.ResultProxy.fetchall`.
"""
return self._metadata is not None
@property
def is_insert(self):
"""True if this :class:`.ResultProxy` is the result
of a executing an expression language compiled
:func:`.expression.insert` construct.
When True, this implies that the
:attr:`inserted_primary_key` attribute is accessible,
assuming the statement did not include
a user defined "returning" construct.
"""
return self.context.isinsert
class ResultProxy(BaseResult):
"""A facade around a DBAPI cursor object.
Returns database rows via the :class:`.Row` class, which provides
additional API features and behaviors on top of the raw data returned
by the DBAPI.
Within the scope of the 1.x series of SQLAlchemy, the :class:`.ResultProxy`
will in fact return instances of the :class:`.LegacyRow` class, which
maintains Python mapping (i.e. dictionary) like behaviors upon the object
itself. Going forward, the :attr:`.Row._mapping` attribute should be used
for dictionary behaviors.
.. seealso::
:ref:`coretutorial_selecting` - introductory material for accessing
:class:`.ResultProxy` and :class:`.Row` objects.
"""
_autoclose_connection = False
_process_row = LegacyRow
_cursor_metadata = LegacyCursorResultMetaData
_cursor_strategy_cls = DefaultCursorFetchStrategy
def __iter__(self):
"""Implement iteration protocol."""
while True:
row = self.fetchone()
if row is None:
return
else:
yield row
def close(self):
"""Close this ResultProxy.
This closes out the underlying DBAPI cursor corresponding
to the statement execution, if one is still present. Note that the
DBAPI cursor is automatically released when the :class:`.ResultProxy`
exhausts all available rows. :meth:`.ResultProxy.close` is generally
an optional method except in the case when discarding a
:class:`.ResultProxy` that still has additional rows pending for fetch.
In the case of a result that is the product of
:ref:`connectionless execution <dbengine_implicit>`,
the underlying :class:`.Connection` object is also closed, which
:term:`releases` DBAPI connection resources.
.. deprecated:: 2.0 "connectionless" execution is deprecated and will
be removed in version 2.0. Version 2.0 will feature the
:class:`.Result` object that will no longer affect the status
of the originating connection in any case.
After this method is called, it is no longer valid to call upon
the fetch methods, which will raise a :class:`.ResourceClosedError`
on subsequent use.
.. seealso::
:ref:`connections_toplevel`
"""
self._soft_close(hard=True)
def _soft_close(self, hard=False):
soft_closed = self._soft_closed
super(ResultProxy, self)._soft_close(hard=hard)
if (
not soft_closed
and self._soft_closed
and self._autoclose_connection
):
self.connection.close()
def __next__(self):
"""Implement the Python next() protocol.
This method, mirrored as both ``.next()`` and ``.__next__()``, is part
of Python's API for producing iterator-like behavior.
.. versionadded:: 1.2
"""
row = self.fetchone()
if row is None:
raise StopIteration()
else:
return row
next = __next__
def process_rows(self, rows):
process_row = self._process_row
metadata = self._metadata
keymap = metadata._keymap
processors = metadata._processors
if self._echo:
log = self.context.engine.logger.debug
l = []
for row in rows:
log("Row %r", sql_util._repr_row(row))
l.append(process_row(metadata, processors, keymap, row))
return l
else:
return [
process_row(metadata, processors, keymap, row) for row in rows
]
def fetchall(self):
"""Fetch all rows, just like DB-API ``cursor.fetchall()``.
After all rows have been exhausted, the underlying DBAPI
cursor resource is released, and the object may be safely
discarded.
Subsequent calls to :meth:`.ResultProxy.fetchall` will return
an empty list. After the :meth:`.ResultProxy.close` method is
called, the method will raise :class:`.ResourceClosedError`.
:return: a list of :class:`.Row` objects
"""
try:
l = self.process_rows(self.cursor_strategy.fetchall())
self._soft_close()
return l
except BaseException as e:
self.connection._handle_dbapi_exception(
e, None, None, self.cursor, self.context
)
def fetchmany(self, size=None):
"""Fetch many rows, just like DB-API
``cursor.fetchmany(size=cursor.arraysize)``.
After all rows have been exhausted, the underlying DBAPI
cursor resource is released, and the object may be safely
discarded.
Calls to :meth:`.ResultProxy.fetchmany` after all rows have been
exhausted will return
an empty list. After the :meth:`.ResultProxy.close` method is
called, the method will raise :class:`.ResourceClosedError`.
:return: a list of :class:`.Row` objects
"""
try:
l = self.process_rows(self.cursor_strategy.fetchmany(size))
if len(l) == 0:
self._soft_close()
return l
except BaseException as e:
self.connection._handle_dbapi_exception(
e, None, None, self.cursor, self.context
)
def _onerow(self):
return self.fetchone()
def fetchone(self):
"""Fetch one row, just like DB-API ``cursor.fetchone()``.
After all rows have been exhausted, the underlying DBAPI
cursor resource is released, and the object may be safely
discarded.
Calls to :meth:`.ResultProxy.fetchone` after all rows have
been exhausted will return ``None``.
After the :meth:`.ResultProxy.close` method is
called, the method will raise :class:`.ResourceClosedError`.
:return: a :class:`.Row` object, or None if no rows remain
"""
try:
row = self.cursor_strategy.fetchone()
if row is not None:
return self.process_rows([row])[0]
else:
self._soft_close()
return None
except BaseException as e:
self.connection._handle_dbapi_exception(
e, None, None, self.cursor, self.context
)
def first(self):
"""Fetch the first row and then close the result set unconditionally.
After calling this method, the object is fully closed,
e.g. the :meth:`.ResultProxy.close` method will have been called.
:return: a :class:`.Row` object, or None if no rows remain
"""
try:
row = self.cursor_strategy.fetchone()
except BaseException as e:
self.connection._handle_dbapi_exception(
e, None, None, self.cursor, self.context
)
try:
if row is not None:
return self.process_rows([row])[0]
else:
return None
finally:
self.close()
def scalar(self):
"""Fetch the first column of the first row, and close the result set.
After calling this method, the object is fully closed,
e.g. the :meth:`.ResultProxy.close` method will have been called.
:return: a Python scalar value , or None if no rows remain
"""
row = self.first()
if row is not None:
return row[0]
else:
return None
class BufferedRowResultProxy(ResultProxy):
"""A ResultProxy with row buffering behavior.
.. deprecated:: 1.4 this class is now supplied using a strategy object.
See :class:`.BufferedRowCursorFetchStrategy`.
"""
_cursor_strategy_cls = BufferedRowCursorFetchStrategy
class FullyBufferedResultProxy(ResultProxy):
"""A result proxy that buffers rows fully upon creation.
.. deprecated:: 1.4 this class is now supplied using a strategy object.
See :class:`.FullyBufferedCursorFetchStrategy`.
"""
_cursor_strategy_cls = FullyBufferedCursorFetchStrategy
class BufferedColumnRow(LegacyRow):
"""Row is now BufferedColumn in all cases"""
class BufferedColumnResultProxy(ResultProxy):
"""A ResultProxy with column buffering behavior.
.. versionchanged:: 1.4 This is now the default behavior of the Row
and this class does not change behavior in any way.
"""
_process_row = BufferedColumnRow
|
the-stack_0_19699 | """Mix-in for spatially extended state-space models."""
from typing import Tuple, Sequence, Optional, Union
import numpy as np
class SpatiallyExtendedModelMixIn:
"""Mix-in class for spatially extended state-space models."""
def __init__(
self,
mesh_shape: Tuple[int, ...],
domain_extents: Tuple[float, ...],
domain_is_periodic: bool,
observation_coords: Optional[np.ndarray] = None,
observation_node_indices: Optional[Union[slice, Sequence[int]]] = None,
**kwargs,
):
"""
Args:
mesh_shape: Tuple of integers specifying dimensions (number of nodes along
each axis) of rectilinear mesh used to discretize spatial domain. For
example `mesh_shape=(64,)` would represent a 1D spatial domain with
64 (equispaced) mesh nodes along the extents of the domain while
`mesh_shape=(32, 64)` would represent a 2D spatial domain with 32
equispaced mesh nodes along the first spatial axis and 64 equispaced
mesh noes along the second spatial axis with there being in total
`2048 = 32 * 64` mesh nodes in this case.
domain_extents: Tuple of (positive) floats specifying spatial extent (size)
of domain along each spatial axis, for example `domain_size=(1, 1)`
would specify a 2D spatial domain of unit length along both axes.
domain_is_periodic: Whether the spatial domain should be assumed to have
periodic boundary conditions or equivalently to be a D-torus where D is
the spatial dimension.
observation_coords: Two-dimensional array of shape
`(dim_observation, spatial_dimension)` specifying coordinates of
observation points in order corresponding to values in observation
vectors. Either this or `observation_indices` should be specified but
not both.
observation_node_indices: Sequence of integers or slice specifying indices
of mesh nodes corresponding to observation points. Either this or
`observation_coords` should be specified but not both.
"""
self._mesh_shape = mesh_shape
self._mesh_size = np.product(mesh_shape)
self._domain_extents = domain_extents
self._domain_is_periodic = domain_is_periodic
self._mesh_node_coords = np.stack(
np.meshgrid(
*(
np.linspace(
0, domain_extents[d], mesh_shape[d], not domain_is_periodic
)
for d in range(self.spatial_dimension)
),
indexing='ij'
),
axis=-1,
).reshape((self.mesh_size, self.spatial_dimension))
if observation_coords is None and observation_node_indices is None:
raise ValueError(
"One of observation_coords or observation_node_indices must be "
"specified"
)
elif observation_coords is not None and observation_node_indices is not None:
raise ValueError(
"Only one of observation_coords or observation_node_indices must be "
"specified"
)
elif observation_node_indices is not None:
self._observation_coords = self._mesh_node_coords[observation_node_indices]
else:
self._observation_coords = observation_coords
super().__init__(**kwargs)
@property
def mesh_shape(self) -> Tuple[int, ...]:
"""Number of nodes along each axis of spatial mesh."""
return self._mesh_shape
@property
def mesh_size(self) -> int:
"""Total number of nodes in spatial mesh."""
return self._mesh_size
@property
def spatial_dimension(self) -> int:
"""Number of dimensions of spatial domain."""
return len(self._mesh_shape)
@property
def domain_extents(self) -> Tuple[float, ...]:
"""Spatial extents of domain along each spatial axis."""
return self._domain_extents
@property
def domain_is_periodic(self) -> bool:
"""Whether domain has periodic boundary conditions or not."""
return self._domain_is_periodic
@property
def mesh_node_coords(self) -> np.ndarray:
"""Two-dimensional array containing coordinates of spatial mesh nodes.
Of shape `(mesh_size, spatial_dimension)` with each row representing the
coordinates for one mesh node, with the row ordering following the ordering of
the mesh nodes in the state vectors.
"""
return self._mesh_node_coords
@property
def observation_coords(self) -> np.ndarray:
"""Two-dimensional array containing coordinates of observation points.
Of shape `(mesh_size, spatial_dimension)` with each row representing the
coordinates for one observation point, with the row ordering following the
ordering of the observation points in the observation vectors.
"""
return self._observation_coords
def distances_from_mesh_node_to_observation_points(
self, mesh_node_index: int
) -> np.ndarray:
"""Compute distance between mesh node and observation points.
Args:
mesh_node_index: Integer index of mesh node in order represented in state
vector.
Returns:
One-dimensional array of spatial distances from specified mesh node to each
of observation points, in order in which observation points are represented
in the observation vectors.
"""
return self.distances_from_mesh_node_to_points(
mesh_node_index, self.observation_coords)
def distances_from_mesh_node_to_points(
self, mesh_node_index: int, coords: np.ndarray
) -> np.ndarray:
"""Compute distance between mesh node and points in spatial domain.
Args:
mesh_node_index: Integer index of mesh node in order represented in state
vector.
coords: Two-dimensional array of spatial coordinates of points to compute
distances for.
Returns:
One-dimensional array of spatial distances from specified mesh node to each
of points with coordinates specified in `coords`.
"""
mesh_node_coord = self.mesh_node_coords[mesh_node_index]
if self.domain_is_periodic:
deltas = np.abs(mesh_node_coord - coords)
return (np.minimum(deltas, self.domain_extents - deltas) ** 2).sum(
-1
) ** 0.5
else:
return ((mesh_node_coord - coords) ** 2).sum(-1) ** 0.5
|
the-stack_0_19701 | #!/usr/bin/env python
# encoding: utf-8
from django.urls import path
from django.views.decorators.cache import cache_page
from . import views
app_name = "oauth"
urlpatterns = [
path(
r'oauth/authorize',
views.authorize),
path(
r'oauth/requireemail/<int:oauthid>.html',
views.RequireEmailView.as_view(),
name='require_email'),
path(
r'oauth/emailconfirm/<int:id>/<sign>.html',
views.emailconfirm,
name='email_confirm'),
path(
r'oauth/bindsuccess/<int:oauthid>.html',
views.bindsuccess,
name='bindsuccess'),
path(
r'oauth/oauthlogin',
views.oauthlogin,
name='oauthlogin')]
|
the-stack_0_19702 | # -*- coding: utf-8 -*-
r"""
These pre and postprocessor preserve highlights when converting the notebook to
html or LaTeX.
Preprocessor:
- for html conversion: the preprocessor replaces html tags by a "neutral" text
versions, which enables markdown conversion of text included in the data
field e.g. ::
<span class="mark"> *text* </span>
is translated into::
!oph!span class="mark"!clh! *text* !oph!/span!clh!
- for LaTeX conversion: the preprocessor replaces html tags by a "neutral" text
version of some associated LaTeX commands or environments. This, again
enables markdown conversion of text included in the command or environment.
e.g. ::
<span class="mark"> *text* </span>
is translated into::
!sl!highlighta!op! *text* !cl!
Postprocessor:
- replaces the "neutral" text versions by the destination language tags
e.g. the [html example] becomes ::
<span class="mark"> <em>text</em> </span>
(the data text have been correctly emphasized)
e.g. [LaTeX example] becomes ::
\highlighta{\emph{text}}
(the data text have been correctly emphasized)
The LaTeX commands and environments are defined in the LaTeX template
highlighter.tplx
"""
from __future__ import print_function
import re
from nbconvert.postprocessors.base import PostProcessorBase
from nbconvert.preprocessors import Preprocessor
class HighlighterPreprocessor(Preprocessor):
"""
:mod:`nbconvert` Preprocessor for the ``highlighter`` nbextension.
The preprocessor replaces highlighter html tags in markdown with a
"neutral" text version, which enables markdown conversion of text included
in the data field, command or environment. Then the neutral text is
translated into LaTeX/html output by the corresponding
:class:`HighlighterPostProcessor`.
For example the highlighter-created markdown ::
<span class="mark"> *text* </span>
is translated for html conversion into::
!oph!span class="mark"!clh! *text* !oph!/span!clh!
or for LaTeX conversion is translated into::
!sl!highlighta!op! *text* !cl!
"""
def latex_scheme_cell(self, match):
schemes = {
"mark": "highlightA",
"burk": "highlightB",
"girk": "highlightC"
}
return ("!sl!begin!op!" + schemes[match.group(1)] + '!cl!\n' +
match.group(2) + "\n!sl!end!op!" + schemes[match.group(1)] +
'!cl!\n')
def latex_scheme(self, match):
schemes = {
"mark": r"!sl!highlighta",
"burk": r"!sl!highlightb",
"girk": r"!sl!highlightc"
}
return schemes[match.group(1)] + '!op!' + match.group(2) + '!cl!'
def html_replacements(self, match):
return match.group(0).replace("<", "!oph!").replace(">", "!clh!")
def replace_highlights_with_latex(self, cell_text):
cell_text = re.sub(
"^<div class=\"(mark|burk|girk)\">([\S\s]*?)<\/div>" +
"<i class=\"fa fa-lightbulb-o \"></i>",
self.latex_scheme_cell, cell_text)
cell_text = re.sub(
"<span class=\"(mark|burk|girk)\">([\S\s]*?)<\/span>",
self.latex_scheme, cell_text)
return cell_text
def replace_highlights_in_html(self, cell_text):
cell_text = re.sub(
"^<div class=\"(mark|burk|girk)\">([\S\s]*?)<\/div>" +
"<i class=\"fa fa-lightbulb-o \"></i>",
self.html_replacements, cell_text)
cell_text = re.sub(
"<span class=\"(mark|burk|girk)\">([\S\s]*?)<\/span>",
self.html_replacements, cell_text)
return cell_text
def preprocess_cell(self, cell, resources, index):
"""
Preprocess cell
Parameters
----------
cell : NotebookNode cell
Notebook cell being processed
resources : dictionary
Additional resources used in the conversion process. Allows
preprocessors to pass variables into the Jinja engine.
cell_index : int
Index of the cell being processed (see base.py)
"""
# print("config", self.config)
if cell.cell_type == "markdown":
if self.config.NbConvertApp.export_format == "latex":
cell.source = self.replace_highlights_with_latex(cell.source)
elif self.config.NbConvertApp.export_format == "html":
cell.source = self.replace_highlights_in_html(cell.source)
return cell, resources
class HighlighterPostProcessor(PostProcessorBase):
r"""
:mod:`nbconvert` PostProcessor for the ``highlighter`` nbextension.
Replaces the "neutral" text versions created by the
:class:`HighlighterPreprocessor` by the destination language tags.
e.g. the html example becomes ::
<span class="mark"> <em>text</em> </span>
(the data text have been correctly emphasized in html markup)
e.g. the LaTeX example becomes ::
\highlighta{\emph{text}}
(the data text have been correctly emphasized using LaTeX tags)
"""
def postprocess(self, input):
print("Postprocessing...")
"""if self.config.NbConvertApp.export_format == "latex":
with open(input,'rt') as f:
nb_text=f.read()
nb_text=nb_text.replace('!op!','{')
nb_text=nb_text.replace('!cl!','}')
nb_text=nb_text.replace('!sl!','\\')
with open(input,'wt') as f:
f.write(nb_text)
elif self.config.NbConvertApp.export_format == "html":
with open(input,'rt') as f:
nb_text=f.read()
nb_text=nb_text.replace('!oph!','<')
nb_text=nb_text.replace('!clh!','>')
with open(input,'wt') as f:
f.write(nb_text)
"""
if self.config.NbConvertApp.export_format == "latex" or "html":
with open(input, 'rt') as f:
nb_text = f.read()
if self.config.NbConvertApp.export_format == "latex":
nb_text = nb_text.replace('!op!', '{')
nb_text = nb_text.replace('!cl!', '}')
nb_text = nb_text.replace('!sl!', '\\')
elif self.config.NbConvertApp.export_format == "html":
nb_text = nb_text.replace('!oph!', '<')
nb_text = nb_text.replace('!clh!', '>')
with open(input, 'wt') as f:
f.write(nb_text)
|
the-stack_0_19703 | # -*- coding: utf-8 -*-
""" S3 User Roles Management
@copyright: 2018 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3RoleManager",
)
import uuid
import json
#import sys
from gluon import current, URL, DIV, SQLFORM, INPUT, A, LI, UL
from s3dal import Field
from s3crud import S3CRUD
from s3rest import S3Method
from s3query import FS
from s3utils import s3_str, s3_mark_required
from s3validators import JSONERRORS
from s3widgets import s3_comments_widget
from s3xml import SEPARATORS
# =============================================================================
class S3RoleManager(S3Method):
""" REST Method to manage user roles and permission rules """
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point for REST interface.
@param r: the S3Request instance
@param attr: controller attributes
"""
method = self.method
tablename = self.tablename
auth = current.auth
sr = auth.get_system_roles()
output = {}
if tablename == "auth_group": # through admin/role controller
# Only ADMIN can manipulate roles
if not auth.s3_has_role(sr.ADMIN):
r.unauthorised()
if method == "list":
output = self.role_list(r, **attr)
elif method in ("read", "create", "update"):
output = self.role_form(r, **attr)
elif method == "copy":
output = self.copy_role(r, **attr)
elif method == "delete":
output = self.delete_role(r, **attr)
elif method == "users":
output = self.assign_users(r, **attr)
elif method == "import":
output = self.import_roles(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
elif tablename == "auth_user": # through admin/user controller
# Must have read-permission for the user record
# (user accounts are filtered to OU by controller)
if not self._permitted():
r.unauthorised()
if method == "roles":
output = self.assign_roles(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
# TODO implement per-target perspective
#elif tablename == "s3_permission": # through admin/permissions controller
#
# # View permissions for a target (page or table)
# r.error(501, current.ERROR.NOT_IMPLEMENTED)
else:
r.error(401, current.ERROR.BAD_REQUEST)
return output
# -------------------------------------------------------------------------
def role_list(self, r, **attr):
"""
List or export roles
@param r: the S3Request instance
@param attr: controller attributes
NB this function must be restricted to ADMINs (in apply_method)
"""
# Check permission to read in this table
authorised = self._permitted()
if not authorised:
r.unauthorised()
# Validate requested format
representation = r.representation
if representation == "csv":
return self.export_roles(r, **attr)
T = current.T
response = current.response
s3 = response.s3
get_vars = self.request.get_vars
# List Config
list_id = "roles"
list_fields = ["id",
"role",
(T("UID"), "uuid"),
"description",
]
default_orderby = "auth_group.role"
s3.no_formats = True
# Exclude hidden roles
resource = self.resource
resource.add_filter(FS("hidden") == False)
if r.interactive:
# Formkey for Ajax-actions
formkey = str(uuid.uuid4())
current.session["_formkey[admin/rolelist]"] = formkey
# Pagination
display_length = s3.dataTable_pageLength or 25
start = None
if s3.no_sspag:
dt_pagination = "false"
limit = None
else:
dt_pagination = "true"
limit = 2 * display_length
# Generate Data Table
dt, totalrows = resource.datatable(fields = list_fields,
start = start,
limit = limit,
left = [],
orderby = default_orderby,
)
# Render the Data Table
datatable = dt.html(totalrows,
totalrows,
id = list_id,
dt_pagination = dt_pagination,
dt_pageLength = display_length,
dt_base_url = r.url(method="", vars={}),
dt_permalink = r.url(),
dt_formkey = formkey,
)
# Configure action buttons
self.role_list_actions(r)
# View
response.view = "admin/roles.html"
# Page actions
crud_button = S3CRUD.crud_button
page_actions = DIV(crud_button(T("Create Role"),
_href = r.url(method="create"),
),
# TODO activate when implemented
#crud_button(T("Import Roles"),
# _href = r.url(method="import"),
# ),
crud_button(T("Export Roles"),
_href = r.url(representation="csv"),
),
)
# Output
output = {"title": T("User Roles"),
"items": datatable,
"page_actions": page_actions,
}
elif representation == "aadata":
# Page limits
start, limit = S3CRUD._limits(get_vars)
# Data Table Filter and Sorting
searchq, orderby, left = resource.datatable_filter(list_fields,
get_vars,
)
if searchq is not None:
totalrows = resource.count()
resource.add_filter(searchq)
else:
totalrows = None
if orderby is None:
orderby = default_orderby
# Data Table
if totalrows != 0:
dt, displayrows = resource.datatable(fields = list_fields,
start = start,
limit = limit,
left = left,
orderby = orderby,
)
else:
dt, displayrows = None, 0
if totalrows is None:
totalrows = displayrows
# Echo
draw = int(get_vars.get("draw", 0))
# Representation
if dt is not None:
output = dt.json(totalrows, displayrows, list_id, draw)
else:
output = '{"recordsTotal":%s,' \
'"recordsFiltered":0,' \
'"dataTable_id":"%s",' \
'"draw":%s,' \
'"data":[]}' % (totalrows, list_id, draw)
else:
r.error(415, current.ERROR.BAD_FORMAT)
return output
# -------------------------------------------------------------------------
def role_list_actions(self, r):
"""
Configure action buttons for role list
@param r: the S3Request
"""
T = current.T
s3 = current.response.s3
sr = current.auth.get_system_roles()
table = self.table
# Standard actions
s3.actions = None
s3.crud_labels.UPDATE = T("Edit")
S3CRUD.action_buttons(r, deletable=False)
action_button = S3CRUD.action_button
# Users
label = T("Users")
excluded = [str(sr.AUTHENTICATED), str(sr.ANONYMOUS)]
action_button(label, URL(args=["[id]", "users"]),
exclude = excluded,
_title = s3_str(T("Assign this role to users")),
)
action_button(label, None,
restrict = excluded,
_disabled = "disabled",
_title = s3_str(T("This role is assigned automatically")),
)
# Copy-button Ajax
label = T("Copy")
excluded = [str(sr.ADMIN)]
action_button(label, None,
_ajaxurl = URL(args=["[id]", "copy.json"]),
exclude = excluded,
_title = s3_str(T("Copy this role to create a new role")),
_class = "action-btn copy-role-btn",
)
action_button(label, None,
restrict = excluded,
_disabled = "disabled",
_title = s3_str(T("This role cannot be copied")),
)
question = T("Create a copy of this role?")
script = '''var dt=$('#roles');dt.on('click','.copy-role-btn',dt.dataTableS3('ajaxAction','%s'));''' % question
s3.jquery_ready.append(script)
# Delete-button Ajax
label = T("Delete")
query = (table.deleted == False) & \
((table.system == True) | (table.protected == True))
protected_roles = current.db(query).select(table.id)
excluded = [str(role.id) for role in protected_roles]
action_button(label, None,
_ajaxurl = URL(args=["[id]", "delete.json"]),
_class = "delete-btn-ajax action-btn dt-ajax-delete",
exclude = excluded,
)
action_button(label, None,
restrict = excluded,
_disabled = "disabled",
_title = s3_str(T("This role cannot be deleted")),
)
# -------------------------------------------------------------------------
def role_form(self, r, **attr):
"""
Create, read, update a role
NB this function must be restricted to ADMINs (in apply_method)
"""
T = current.T
s3 = current.response.s3
settings = current.deployment_settings
output = {}
method = r.method
record = r.record
# Read-only?
readonly = False
if r.record:
if r.interactive:
readonly = method == "read"
elif r.representation == "csv":
return self.export_roles(r, **attr)
else:
r.error(415, current.ERROR.BAD_FORMAT)
# Form fields
table = r.table
# UID
uid = table.uuid
uid.label = T("UID")
uid.readable = True
uid.writable = False if record and record.system else True
# Role name
role = table.role
role.label = T("Name")
# Role description
description = table.description
description.label = T("Description")
description.widget = s3_comments_widget
# Permissions
PERMISSIONS = T("Permissions")
permissions = Field("permissions",
label = PERMISSIONS,
widget = S3PermissionWidget(r.id),
)
if not current.auth.permission.use_cacls:
# Security policy with fixed access rules
permissions.readable = permissions.writable = False
elif record:
if record.uuid == "ADMIN":
# Administrator permissions cannot be edited
permissions.readable = permissions.writable = False
else:
# Populate the field with current permissions
record.permissions = self.get_permissions(record)
# Mark required
if not readonly:
labels, s3.has_required = s3_mark_required(table, [])
labels["permissions"] = "%s:" % s3_str(PERMISSIONS)
else:
labels = None
# Form buttons
if not readonly:
submit_button = INPUT(_class = "small primary button",
_type = "submit",
_value = T("Save"),
)
cancel_button = A(T("Cancel"),
_class="cancel-form-btn action-lnk",
_href = r.url(id=""),
)
buttons = [submit_button, cancel_button]
else:
buttons = ["submit"]
# Form style
crudopts = s3.crud
formstyle = crudopts.formstyle_read if readonly else crudopts.formstyle
# Render form
tablename = "auth_group"
form = SQLFORM.factory(uid,
role,
description,
permissions,
record = record,
showid = False,
labels = labels,
formstyle = formstyle,
table_name = tablename,
upload = s3.download_url,
readonly = readonly,
separator = "",
submit_button = settings.submit_button,
buttons = buttons,
)
form.add_class("rm-form")
output["form"] = form
# Navigate-away confirmation
if crudopts.navigate_away_confirm:
s3.jquery_ready.append("S3EnableNavigateAwayConfirm()")
# Process form
response = current.response
formname = "%s/%s" % (tablename, record.id if record else None)
if form.accepts(current.request.post_vars,
current.session,
#onvalidation = self.validate,
formname = formname,
keepvalues = False,
hideerror = False,
):
role_id, message = self.update_role(record, form)
if role_id:
response.confirmation = message
self.next = r.url(id="", method="")
else:
response.error = message
elif form.errors:
response.error = T("There are errors in the form, please check your input")
# Title
if record:
if readonly:
output["title"] = record.role
else:
output["title"] = T("Edit Role: %(role)s") % {"role": record.role}
else:
output["title"] = T("Create Role")
# View
response.view = "admin/role_form.html"
return output
# -------------------------------------------------------------------------
def get_permissions(self, role):
"""
Extract the permission rules for a role
@param role: the role (Row)
@returns: the permission rules as JSON string
"""
permissions = current.auth.permission
rules = []
table = permissions.table
if table:
query = (table.group_id == role.id) & \
(table.deleted == False)
if not permissions.use_facls:
query &= (table.function == None)
if not permissions.use_tacls:
query &= (table.tablename == None)
rows = current.db(query).select(table.id,
table.controller,
table.function,
table.tablename,
table.uacl,
table.oacl,
table.entity,
table.unrestricted,
)
for row in rows:
if row.unrestricted:
entity = "any"
else:
entity = row.entity
rules.append([row.id,
row.controller,
row.function,
row.tablename,
row.uacl,
row.oacl,
entity,
False, # delete-flag
])
return json.dumps(rules, separators=SEPARATORS)
# -------------------------------------------------------------------------
def update_role(self, role, form):
"""
Create or update a role from a role form
@param role: the role (Row)
@param form: the form
@returns: tuple (role ID, confirmation message)
"""
T = current.T
auth = current.auth
formvars = form.vars
rolename = formvars.role
uid = formvars.uuid
if role:
role_id = role.id
data = {"role": rolename,
"description": formvars.description,
}
if uid is not None:
data["uuid"] = uid
role.update_record(**data)
else:
data = {"role": rolename}
role_id = auth.s3_create_role(rolename,
description = formvars.description,
uid = uid,
)
if role_id:
# Update permissions
permissions = formvars.permissions
if permissions:
self.update_permissions(role_id, permissions)
if not role:
message = T("Role %(role)s created") % data
else:
message = T("Role %(role)s updated") % data
else:
if not role:
message = T("Failed to create role %(role)s") % data
else:
message = T("Failed to update role %(role)s") % data
return role_id, message
# -------------------------------------------------------------------------
def update_permissions(self, role_id, rules):
"""
Update the permission rules for a role
@param role_id: the role record ID (auth_group.id)
@param rules: the rules as JSON string
"""
table = current.auth.permission.table
if table:
db = current.db
rules = json.loads(rules)
for rule in rules:
rule_id = rule[0]
deleted = rule[7]
if rule_id is None:
continue
if not any(rule[i] for i in (1, 2, 3)):
continue
if rule_id and deleted:
db(table.id == rule_id).update(deleted=True)
else:
entity = rule[6]
if entity == "any":
unrestricted = True
entity = None
else:
unrestricted = False
try:
entity = long(entity) if entity else None
except (ValueError, TypeError):
entity = None
data = {"group_id": role_id,
"controller": rule[1],
"function": rule[2],
"tablename": rule[3],
"uacl": rule[4],
"oacl": rule[5],
"entity": entity,
"unrestricted": unrestricted,
}
if rule_id:
# Update the rule
db(table.id == rule_id).update(**data)
else:
# Add the rule
table.insert(**data)
return ""
# -------------------------------------------------------------------------
def copy_role(self, r, **attr):
"""
Duplicate an existing role
NB this function must be restricted to ADMINs (in apply_method)
"""
# CSRF Protection
key = current.session["_formkey[admin/rolelist]"]
if not key or r.post_vars.get("_formkey") != key:
r.error(403, current.ERROR.NOT_PERMITTED)
if r.http == "POST":
db = current.db
role = r.record
if not role:
r.error(400, current.ERROR.BAD_RECORD)
# Find a suitable uuid and name
table = r.table
query = ((table.uuid.like("%s%%" % role.uuid)) | \
(table.role.like("%s%%" % role.role)))
rows = db(query).select(table.uuid,
table.role,
)
uids = set(row.uuid for row in rows)
names = set(row.role for row in rows)
uid = name = None
for i in range(2, 1000):
if not uid:
uid = "%s%s" % (role.uuid, i)
if uid in uids:
uid = None
if not name:
name = "%s-%s" % (role.role, i)
if name in names:
name = None
if uid and name:
break
if not uid:
uid = str(uuid.uuid4())
if not name:
name = str(uuid.uuid4())
# Create the new role
role_id = table.insert(uuid = uid,
role = name,
)
# Copy permissions
ptable = current.auth.permission.table
if ptable:
query = (ptable.group_id == role.id) & \
(ptable.deleted == False)
rules = db(query).select(ptable.controller,
ptable.function,
ptable.tablename,
ptable.record,
ptable.oacl,
ptable.uacl,
ptable.entity,
ptable.unrestricted,
)
for rule in rules:
ptable.insert(group_id = role_id,
controller = rule.controller,
function = rule.function,
tablename = rule.tablename,
record = rule.record,
oacl = rule.oacl,
uacl = rule.uacl,
entity = rule.entity,
unrestricted = rule.unrestricted,
)
message = current.T("New Role %(role)s created") % {"role": name}
return current.xml.json_message(message=message)
else:
r.error(405, current.ERROR.BAD_METHOD)
# -------------------------------------------------------------------------
def delete_role(self, r, **attr):
"""
Delete a role
NB this function must be restricted to ADMINs (in apply_method)
"""
# CSRF Protection
key = current.session["_formkey[admin/rolelist]"]
if not key or r.post_vars.get("_formkey") != key:
r.error(403, current.ERROR.NOT_PERMITTED)
if r.http in ("POST", "DELETE"):
role = r.record
if not role:
r.error(400, current.ERROR.BAD_RECORD)
if role.protected or role.system:
r.error(403, current.ERROR.NOT_PERMITTED)
auth = current.auth
auth.s3_delete_role(role.id)
auth.s3_set_roles()
message = current.T("Role %(role)s deleted") % {"role": role.role}
return current.xml.json_message(message=message)
else:
r.error(405, current.ERROR.BAD_METHOD)
# -------------------------------------------------------------------------
def assign_roles(self, r, **attr):
"""
Assign/unassign roles to a user
NB this function is accessible for non-ADMINs (e.g. ORG_ADMIN)
"""
auth = current.auth
# Require a primary record
if not r.record:
r.error(400, current.ERRORS.BAD_RECORD)
# Require permission to create or delete group memberships
mtable = auth.settings.table_membership
permitted = auth.s3_has_permission
if not permitted("create", mtable) and not permitted("delete", mtable):
r.unauthorised()
# Require that the target user record belongs to a managed organisation
pe_ids = auth.get_managed_orgs()
if not pe_ids:
r.unauthorised()
elif pe_ids is not True:
otable = current.s3db.org_organisation
utable = auth.settings.table_user
query = (utable.id == r.id) & \
(otable.id == utable.organisation_id) & \
(otable.pe_id.belongs(pe_ids))
row = current.db(query).select(utable.id, limitby=(0, 1)).first()
if not row:
r.unauthorised()
s3 = current.response.s3
# Which roles can the current user manage for this user?
managed_roles = self.get_managed_roles(r.id)
output = {}
if r.http == "GET":
T = current.T
# Page Title
userfield = auth.settings.login_userfield
user_name = r.record[userfield]
output["title"] = "%s: %s" % (T("Roles of User"), user_name)
# Should we use realms?
use_realms = auth.permission.entity_realm
if use_realms:
realm_types, realms = self.get_managed_realms()
else:
realm_types, realms = None, None
# The Ajax URL for role updates
ajax_url = r.url(id="[id]", representation="json")
# The form field
field = mtable.user_id
field.readable = field.writable = True
field.widget = S3RolesWidget(mode="roles",
items = managed_roles,
use_realms = use_realms,
realm_types = realm_types,
realms = realms,
ajax_url = ajax_url,
)
# Render form
tablename = str(mtable)
form = SQLFORM.factory(field,
record = {"id": None, "user_id": r.id},
showid = False,
labels = {field.name: ""},
formstyle = s3.crud.formstyle,
table_name = tablename,
upload = s3.download_url,
#readonly = readonly,
separator = "",
submit_button = False,
buttons = [],
)
form.add_class("rm-form")
output["form"] = form
# Show a back-button since OrgAdmins have no other obvious
# way to return to the list (no left menu)
crud_button = S3CRUD.crud_button
output["list_btn"] = crud_button(T("Back to User List"),
icon = "return",
_href = r.url(id="", method=""),
)
# View
response = current.response
response.view = "admin/role_form.html"
elif r.http == "POST":
if r.representation == "json":
# Read+parse body JSON
s = r.body
s.seek(0)
try:
options = json.load(s)
except JSONERRORS:
options = None
if not isinstance(options, dict):
r.error(400, "Invalid request options")
user_id = r.record.id
added = options.get("add")
removed = options.get("remove")
# Validate
if added:
for group_id, pe_id in added:
role = managed_roles.get(group_id)
if not role or role.get("a") is False:
r.error(403, current.ERROR.NOT_PERMITTED)
if removed:
for group_id, pe_id in removed:
role = managed_roles.get(group_id)
if not role or role.get("r") is False:
r.error(403, current.ERROR.NOT_PERMITTED)
# Update role assignments
if added:
add_role = auth.s3_assign_role
for group_id, pe_id in added:
add_role(user_id, group_id, for_pe=pe_id)
if removed:
remove_role = auth.s3_withdraw_role
for group_id, pe_id in removed:
remove_role(user_id, group_id, for_pe=pe_id)
output = current.xml.json_message(options=options)
else:
r.error(415, current.ERROR.BAD_FORMAT)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
def assign_users(self, r, **attr):
"""
Assign/unassign users to a role
NB this function could be accessible for non-ADMINs (e.g. ORG_ADMIN)
"""
auth = current.auth
# Require a primary record
role = r.record
if not role:
r.error(400, current.ERRORS.BAD_RECORD)
# Require permission to create or delete group memberships
mtable = auth.settings.table_membership
permitted = auth.s3_has_permission
if not permitted("create", mtable) and not permitted("delete", mtable):
r.unauthorised()
# Require that the target role belongs to managed roles
managed_roles = self.get_managed_roles(None)
if role.id not in managed_roles:
r.unauthorised()
s3 = current.response.s3
# Which users can the current user manage?
managed_users = self.get_managed_users(role.id)
# Special rules for system roles
sr = auth.get_system_roles()
unrestrictable = (sr.ADMIN, sr.AUTHENTICATED, sr.ANONYMOUS)
unassignable = (sr.AUTHENTICATED, sr.ANONYMOUS)
output = {}
if r.http == "GET":
T = current.T
# Page Title
output["title"] = "%s: %s" % (T("Users with Role"), role.role)
# Should we use realms?
use_realms = auth.permission.entity_realm and \
role.id not in unrestrictable
if use_realms:
realm_types, realms = self.get_managed_realms()
else:
realm_types, realms = None, None
# The Ajax URL for role updates
ajax_url = r.url(id="[id]", representation="json")
# The form field
field = mtable.group_id
field.readable = field.writable = True
field.widget = S3RolesWidget(mode="users",
items = managed_users,
use_realms = use_realms,
realm_types = realm_types,
realms = realms,
ajax_url = ajax_url,
)
# Render form
tablename = str(mtable)
form = SQLFORM.factory(field,
record = {"id": None, "group_id": role.id},
showid = False,
labels = {field.name: ""},
formstyle = s3.crud.formstyle,
table_name = tablename,
upload = s3.download_url,
#readonly = readonly,
separator = "",
submit_button = False,
buttons = [],
)
form.add_class("rm-form")
output["form"] = form
# Default RHeader and View
if "rheader" not in attr:
return_btn = S3CRUD.crud_button("Back to Roles List",
icon = "return",
_href=r.url(id="", method=""),
)
output["rheader"] = DIV(return_btn,
_class="rheader",
)
response = current.response
response.view = "admin/role_form.html"
elif r.http == "POST":
if r.representation == "json":
# Process Ajax-request from S3RolesWidget
# Read+parse body JSON
s = r.body
s.seek(0)
try:
options = json.load(s)
except JSONERRORS:
options = None
if not isinstance(options, dict):
r.error(400, "Invalid request options")
added = options.get("add")
removed = options.get("remove")
# Validate
group_id = role.id
if group_id in unassignable:
r.error(403, current.ERROR.NOT_PERMITTED)
if added:
for user_id, pe_id in added:
user = managed_users.get(user_id)
if not user or user.get("a") is False:
r.error(403, current.ERROR.NOT_PERMITTED)
if removed:
for user_id, pe_id in removed:
user = managed_users.get(user_id)
if not user or user.get("r") is False:
r.error(403, current.ERROR.NOT_PERMITTED)
# Update role assignments
if added:
add_role = auth.s3_assign_role
for user_id, pe_id in added:
add_role(user_id, group_id, for_pe=pe_id)
if removed:
remove_role = auth.s3_withdraw_role
for user_id, pe_id in removed:
remove_role(user_id, group_id, for_pe=pe_id)
output = current.xml.json_message(options=options)
else:
r.error(415, current.ERROR.BAD_FORMAT)
else:
r.error(405, current.ERROR.BAD_METHOD)
return output
# -------------------------------------------------------------------------
@staticmethod
def get_managed_users(role_id):
"""
Get a dict of users the current user can assign to roles
@param role_id: the target role ID
@returns: a dict {user_id: {l:label,
t:title,
a:assignable,
r:removable,
u:unrestrictable,
}, ...}
NB a, r and u attributes only added if non-default
"""
auth = current.auth
auth_settings = auth.settings
sr = auth.get_system_roles()
admin_role = role_id == sr.ADMIN
unassignable = role_id in (sr.AUTHENTICATED, sr.ANONYMOUS)
unrestrictable = role_id in (sr.ADMIN, sr.AUTHENTICATED, sr.ANONYMOUS)
current_user = auth.user.id if auth.user else None
users = {}
pe_ids = auth.get_managed_orgs()
if pe_ids:
utable = auth_settings.table_user
query = (utable.deleted == False)
if pe_ids is not True:
otable = current.s3db.org_organisation
query &= (otable.id == utable.organisation_id) & \
(otable.pe_id.belongs(pe_ids))
userfield = auth_settings.login_userfield
rows = current.db(query).select(utable.id,
utable.first_name,
utable.last_name,
utable[userfield],
)
for row in rows:
user_id = row.id
user = {"l": row[userfield],
"t": "%s %s" % (row.first_name,
row.last_name,
),
}
if unrestrictable:
user["u"] = True
if admin_role and user_id == current_user:
# ADMINs cannot remove their own ADMIN role
user["r"] = False
if unassignable:
user["a"] = user["r"] = False
users[user_id] = user
return users
# -------------------------------------------------------------------------
@staticmethod
def get_managed_roles(user_id):
"""
Get a dict of roles the current user can manage
@returns: a dict {role_id: {l:label,
a:assignable,
r:removable,
u:unrestrictable,
}, ...},
NB a, r and u attributes only added if non-default
"""
auth = current.auth
sr = auth.get_system_roles()
AUTO = (sr.AUTHENTICATED, sr.ANONYMOUS)
ADMINS = (sr.ADMIN, sr.ORG_ADMIN, sr.ORG_GROUP_ADMIN)
UNRESTRICTABLE = (sr.ADMIN, sr.AUTHENTICATED, sr.ANONYMOUS)
table = auth.settings.table_group
query = (table.hidden == False) & \
(table.deleted == False)
rows = current.db(query).select(table.id,
table.uuid,
table.role,
)
has_role = auth.s3_has_role
roles = {}
for row in rows:
role = {"l": row.role or row.uuid}
role_id = row.id
if role_id in ADMINS:
assignable = has_role(role_id)
else:
assignable = role_id not in AUTO
if role_id == sr.ADMIN and auth.user.id == user_id:
removable = False
else:
removable = assignable
if not assignable:
role["a"] = False
if not removable:
role["r"] = False
if role_id in UNRESTRICTABLE:
role["u"] = True
roles[role_id] = role
return roles
# -------------------------------------------------------------------------
@staticmethod
def get_managed_realms():
"""
Get a dict of realms managed by the current user
@returns: tuple (realm_types, realms):
- realm_types = [(instance_type, label), ...]
- realms = {pe_id: {l:label, t:type}, ...}
"""
T = current.T
t_ = lambda v: s3_str(T(v))
realm_types = [(None, t_("Multiple"))]
realms = {None: {"l": t_("Default Realm"), "t": None},
}
# Look up the realms managed by the current user
pe_ids = []
auth = current.auth
sr = auth.get_system_roles()
has_role = auth.s3_has_role
is_admin = has_role(sr.ADMIN)
if is_admin:
# Only ADMIN can assign roles site-wide
realms[0] = {"l": t_("All Entities"), "t": None}
else:
if has_role(sr.ORG_GROUP_ADMIN):
role_realms = auth.user.realms[sr.ORG_GROUP_ADMIN]
if role_realms:
pe_ids.extend(role_realms)
if has_role(sr.ORG_ADMIN):
role_realms = auth.user.realms[sr.ORG_ADMIN]
if role_realms:
pe_ids.extend(role_realms)
# Get entities and types
s3db = current.s3db
types = current.deployment_settings.get_auth_realm_entity_types()
entities = s3db.pr_get_entities(pe_ids = pe_ids,
types = types,
group = True,
show_instance_type = False,
)
# Add representations for entities and types
instance_type_nice = s3db.pr_pentity.instance_type.represent
for instance_type in types:
entity_group = entities.get(instance_type)
if not entity_group:
continue
realm_types.append((instance_type,
s3_str(instance_type_nice(instance_type)),
))
for pe_id, name in entity_group.items():
realms[pe_id] = {"l": s3_str(name), "t": instance_type}
return realm_types, realms
# -------------------------------------------------------------------------
def import_roles(self, r, **attr):
"""
Interactive import of roles (auth_roles.csv format)
NB this function must be restricted to ADMINs (in apply_method)
"""
# TODO implement roles importer
T = current.T
output = {}
# Title
output["title"] = T("Import Roles")
# View
response = current.response
response.view = "admin/import_roles.html"
return output
# if GET:
# show an import form
# elif POST:
# import the submitted file using Bulk-importer
# -------------------------------------------------------------------------
def export_roles(self, r, **attr):
"""
Export of roles (auth_roles.csv format)
NB this function must be restricted to ADMINs (in apply_method)
"""
output = S3RolesExport(r.resource).as_csv()
# Response headers
from gluon.contenttype import contenttype
filename = "auth_roles.csv"
disposition = "attachment; filename=\"%s\"" % filename
response = current.response
response.headers["Content-Type"] = contenttype(".csv")
response.headers["Content-disposition"] = disposition
return output.read()
# =============================================================================
class S3PermissionWidget(object):
"""
Form widget to modify permissions of a role
"""
def __init__(self, role_id=None):
"""
Constructor
"""
sr = current.auth.get_system_roles()
if role_id == sr.ANONYMOUS:
default_roles = ()
elif role_id == sr.AUTHENTICATED:
default_roles = (sr.ANONYMOUS,)
else:
default_roles = (sr.ANONYMOUS, sr.AUTHENTICATED)
self.default_roles = default_roles
# -------------------------------------------------------------------------
def __call__(self, field, value, **attributes):
"""
Form builder entry point
@param field: the Field
@param value: the current (or default) value of the field
@param attributes: HTML attributes for the widget
"""
T = current.T
# Widget ID
widget_id = attributes.get("_id") or str(field).replace(".", "_")
# Field name
name = attributes.get("_name") or field.name
# Page access rules tab+pane
prules_id = "%s-prules" % widget_id
prules_tab = LI(A(T("Page Access"),
_href = "#" + prules_id,
)
)
prules_pane = DIV(_id = prules_id,
_class = "rm-page-rules",
)
# Table access rules tab+page
rules = current.auth.permission
use_tacls = rules.use_tacls
if use_tacls:
trules_id = "%s-trules" % widget_id
trules_tab = LI(A(T("Table Access"),
_href = "#" + trules_id,
),
)
trules_pane = DIV(_id = trules_id,
_class = "rm-table-rules",
)
else:
trules_pane = ""
trules_tab = ""
# Construct the widget
widget = DIV(INPUT(_type = "hidden",
_name = name,
_value = value,
_id = widget_id + "-input",
),
DIV(UL(trules_tab,
prules_tab,
),
trules_pane,
prules_pane,
_class = "rm-rules hide"
),
_id = widget_id,
)
# Module header icons
rtl = current.response.s3.rtl
icons = {"expanded": "fa fa-caret-down",
"collapsed": "fa fa-caret-left" if rtl else "fa fa-caret-right",
}
# Client-side widget options
widget_opts = {"fRules": rules.use_facls,
"tRules": use_tacls,
"useRealms": rules.entity_realm,
"permissions": self.get_permissions(),
"defaultPermissions": self.get_default_permissions(),
"modules": self.get_active_modules(),
"icons": icons,
}
if use_tacls:
widget_opts["models"] = self.get_active_models()
# Localized strings for client-side widget
i18n = {"rm_Add": T("Add"),
"rm_AddRule": T("Add Rule"),
"rm_AllEntities": T("All Entities"),
"rm_AllRecords": T("All Records"),
"rm_AssignedEntities": T("Assigned Entities"),
"rm_Cancel": T("Cancel"),
"rm_CollapseAll": T("Collapse All"),
"rm_ConfirmDeleteRule": T("Do you want to delete this rule?"),
"rm_Default": T("default"),
"rm_DeleteRule": T("Delete"),
"rm_ExpandAll": T("Expand All"),
"rm_NoAccess": T("No access"),
"rm_NoRestrictions": T("No restrictions"),
"rm_Others": T("Others"),
"rm_OwnedRecords": T("Owned Records"),
"rm_Page": T("Page"),
"rm_RestrictedTables": T("Restricted Tables"),
"rm_Scope": T("Scope"),
"rm_SystemTables": T("System Tables"),
"rm_Table": T("Table"),
"rm_UnrestrictedTables": T("Unrestricted Tables"),
}
# Inject the client-side script
self.inject_script(widget_id, widget_opts, i18n)
return widget
# -------------------------------------------------------------------------
def get_active_modules(self):
"""
Get a JSON-serializable dict of active modules
@returns: a dict {prefix: (name_nice, restricted)}
"""
# Modules where access rules do not apply (or are hard-coded)
exclude = ("appadmin", "errors")
# Active modules
modules = current.deployment_settings.modules
active= {k: (s3_str(modules[k].name_nice), modules[k].restricted)
for k in modules if k not in exclude
}
# Special controllers for dynamic models
if current.auth.permission.use_facls:
active["default/dt"] = (s3_str(current.T("Dynamic Models")), True)
return active
# -------------------------------------------------------------------------
def get_active_models(self):
"""
Get a JSON-serializable dict of active data models
@returns: a dict {prefix: {tablename: restricted}}
"""
# Get all table names
db_tables = current.cache.ram("permission_widget_all_tables",
self.get_db_tables,
time_expire = 14400,
)
# Count the number of restricting roles per table
# @see: S3Permission.table_restricted()
rtable = current.auth.permission.table
query = (rtable.tablename != None) & \
(rtable.controller == None) & \
(rtable.function == None) & \
(rtable.deleted == False)
numroles = rtable.group_id.count()
tablename = rtable.tablename
rows = current.db(query).select(tablename,
numroles,
groupby = tablename,
)
restrictions = {row[tablename]: row[numroles] for row in rows}
# Sort tablenames after module and mark number of restrictions
models = {}
for tablename in db_tables:
prefix = tablename.split("_", 1)[0]
if prefix in ("auth", "sync", "s3", "scheduler"):
prefix = "_system"
if prefix not in models:
models[prefix] = {}
models[prefix][tablename] = restrictions.get(tablename, 0)
return models
# -------------------------------------------------------------------------
def get_db_tables(self):
"""
Return all table names in the database; in separate function
to allow caching because it requires to load all models once
@returns: db.tables
"""
db = current.db
s3db = current.s3db
# Load all static models
s3db.load_all_models()
# Load all dynamic tables (TODO: how does this make sense?)
#ttable = s3db.s3_table
#rows = db(ttable.deleted != True).select(ttable.name)
#for row in rows:
# s3db.table(row.name)
return db.tables
# -------------------------------------------------------------------------
def get_permissions(self):
"""
Get a JSON-serializable list of permissions
@returns: an ordered list of dicts:
[{l: label,
b: bit,
o: relevant for owned records,
},
...
]
"""
permission = current.auth.permission
opts = permission.PERMISSION_OPTS
skip = 0x0000
# Hide approval-related permissions if record approval is disabled
if not current.deployment_settings.get_auth_record_approval():
skip |= permission.REVIEW | permission.APPROVE
output = []
for bit, label in opts.items():
if bit & skip:
continue
output.append({"l": s3_str(label),
"b": bit,
"o": bit != permission.CREATE,
})
return output
# -------------------------------------------------------------------------
def get_default_permissions(self):
"""
Get default permissions, i.e. those granted by roles the user
has by default
@returns: a dict {tablename: (uACL, oACL)}
"""
permissions = current.auth.permission
table = permissions.table
default_roles = self.default_roles
default_permissions = {}
if table and default_roles:
query = (table.group_id.belongs(default_roles))
if not permissions.use_facls:
query &= (table.function == None)
if not permissions.use_tacls:
query &= (table.tablename == None)
query &= (table.deleted == False)
rows = current.db(query).select(table.controller,
table.function,
table.tablename,
table.uacl,
table.oacl,
)
for row in rows:
target = row.tablename
if not target:
c = row.controller
if c:
target = "%s/%s" % (c, row.function or "*")
else:
continue
rules = default_permissions.get(target)
if rules:
default_permissions[target] = (rules[0] | row.uacl,
rules[1] | row.oacl,
)
else:
default_permissions[target] = (row.uacl, row.oacl)
return default_permissions
# -------------------------------------------------------------------------
def inject_script(self, widget_id, options, i18n):
"""
Inject the necessary JavaScript for the widget
@param widget_id: the widget ID
(=element ID of the person_id field)
@param options: JSON-serializable dict of widget options
@param i18n: translations of screen messages rendered by
the client-side script,
a dict {messageKey: translation}
"""
s3 = current.response.s3
# Static script
if s3.debug:
script = "/%s/static/scripts/S3/s3.ui.permissions.js" % \
current.request.application
else:
script = "/%s/static/scripts/S3/s3.ui.permissions.min.js" % \
current.request.application
scripts = s3.scripts
if script not in scripts:
scripts.append(script)
self.inject_i18n(i18n)
# Widget options
opts = {}
if options:
opts.update(options)
# Widget instantiation
script = '''$('#%(widget_id)s').permissionEdit(%(options)s)''' % \
{"widget_id": widget_id,
"options": json.dumps(opts, separators=SEPARATORS),
}
jquery_ready = s3.jquery_ready
if script not in jquery_ready:
jquery_ready.append(script)
# -------------------------------------------------------------------------
def inject_i18n(self, labels):
"""
Inject translations for screen messages rendered by the
client-side script
@param labels: dict of translations {messageKey: translation}
"""
strings = ['''i18n.%s="%s"''' % (k, s3_str(v))
for k, v in labels.items()]
current.response.s3.js_global.append("\n".join(strings))
# =============================================================================
class S3RolesWidget(object):
"""
Form widget to assign roles to users
"""
def __init__(self,
mode="roles",
items=None,
use_realms=False,
realm_types=None,
realms=None,
ajax_url=None,
):
"""
Constructor
@param mode: what to assign ("roles"|"users")
@param items: the assignable items (roles or users), dict,
structure see get_managed_roles/get_managed_users
@param use_realms: boolean, whether to use realms
@param realm_types: the realm types and their labels, tuple,
format see get_managed_realms
@param realms: the realms, dict, structure see get_managed_realms
@param ajax_url: the URL for Ajax modification of assignments
"""
self.mode = mode
self.items = items
self.use_realms = use_realms
self.realm_types = realm_types
self.realms = realms
self.ajax_url = ajax_url
# -------------------------------------------------------------------------
def __call__(self, field, value, **attributes):
"""
Form builder entry point
@param field: the Field
@param value: the current (or default) value of the field
@param attributes: HTML attributes for the widget
"""
T = current.T
# Widget ID
widget_id = attributes.get("_id") or str(field).replace(".", "_")
# Field name
name = attributes.get("_name") or field.name
# Extract the current assignments
if value:
assignments = self.get_current_assignments(value)
else:
assignments = []
# Construct the widget
widget = DIV(INPUT(_type = "hidden",
_name = name,
_value = value,
_id = widget_id + "-id",
),
INPUT(_type = "hidden",
_name = "assigned",
_value = json.dumps(assignments, separators=SEPARATORS),
_id = widget_id + "-data",
),
_id = widget_id,
_class = "rm-assign-widget",
)
# Client-side widget options
widget_opts = {"mode": self.mode,
"ajaxURL": self.ajax_url,
"items": self.items,
"useRealms": self.use_realms,
"realms": self.realms,
"realmTypes": self.realm_types,
}
# Localized strings for client-side widget
if self.mode == "roles":
CONFIRM = T("Do you want to remove the %(role)s role?")
else:
CONFIRM = T("Do you want to remove %(user)s from this role?")
i18n = {"rm_Add": T("Add"),
"rm_Cancel": T("Cancel"),
"rm_ConfirmDeleteAssignment": CONFIRM,
"rm_Delete": T("Delete"),
"rm_DeletionFailed": T("Deletion Failed"),
"rm_ForEntity": T("For Entity"),
"rm_Roles": T("Roles"),
"rm_SubmissionFailed": T("Submission Failed"),
"rm_Users": T("Users"),
}
# Inject the client-side script
self.inject_script(widget_id, widget_opts, i18n)
return widget
# -------------------------------------------------------------------------
def get_current_assignments(self, record_id):
"""
Get the current assignments for the user/role
@param record_id: the user or role ID
@returns: a list of tuples (roleID|userID, realmID)
"""
auth = current.auth
table = auth.settings.table_membership
if self.mode == "roles":
query = (table.user_id == record_id) & \
(table.group_id.belongs(self.items.keys()))
field = table.group_id
else:
query = (table.group_id == record_id) & \
(table.user_id.belongs(self.items.keys()))
field = table.user_id
use_realms = self.use_realms
if use_realms and \
not auth.s3_has_role(auth.get_system_roles().ADMIN):
managed_realms = set(self.realms.keys())
none = None in managed_realms
managed_realms.discard(None)
q = (table.pe_id.belongs(managed_realms)) if managed_realms else None
if none:
n = (table.pe_id == None)
q = q | n if q else n
if q:
query &= q
query &= (table.deleted == False)
rows = current.db(query).select(field, table.pe_id)
assignments = set()
for row in rows:
pe_id = row.pe_id if use_realms else None
assignments.add((row[field], pe_id))
return list(assignments)
# -------------------------------------------------------------------------
def inject_script(self, widget_id, options, i18n):
"""
Inject the necessary JavaScript for the widget
@param widget_id: the widget ID
(=element ID of the person_id field)
@param options: JSON-serializable dict of widget options
@param i18n: translations of screen messages rendered by
the client-side script,
a dict {messageKey: translation}
"""
s3 = current.response.s3
# Static script
if s3.debug:
script = "/%s/static/scripts/S3/s3.ui.roles.js" % \
current.request.application
else:
script = "/%s/static/scripts/S3/s3.ui.roles.min.js" % \
current.request.application
scripts = s3.scripts
if script not in scripts:
scripts.append(script)
self.inject_i18n(i18n)
# Widget options
opts = {}
if options:
opts.update(options)
# Widget instantiation
script = '''$('#%(widget_id)s').roleManager(%(options)s)''' % \
{"widget_id": widget_id,
"options": json.dumps(opts, separators=SEPARATORS),
}
jquery_ready = s3.jquery_ready
if script not in jquery_ready:
jquery_ready.append(script)
# -------------------------------------------------------------------------
def inject_i18n(self, labels):
"""
Inject translations for screen messages rendered by the
client-side script
@param labels: dict of translations {messageKey: translation}
"""
strings = ['''i18n.%s="%s"''' % (k, s3_str(v))
for k, v in labels.items()]
current.response.s3.js_global.append("\n".join(strings))
# =============================================================================
class S3RolesExport(object):
"""
Roles Exporter
"""
def __init__(self, resource):
"""
Constructor
@param resource: the role resource (auth_group) with REST
filters; or None to export all groups
"""
db = current.db
auth = current.auth
# Optional columns
self.col_hidden = False
self.col_protected = False
self.col_entity = False
# Look up the roles
gtable = auth.settings.table_group
fields = ("id",
"uuid",
"role",
"description",
"hidden",
"protected",
"system",
)
if resource and resource.tablename == str(gtable):
roles = resource.select(fields, as_rows=True)
else:
query = (gtable.deleted == False)
roles = db(query).select(*fields)
# Generate roles dict
role_dicts = {}
for role in roles:
role_dict = {"uid": role.uuid,
"role": role.role,
"description": role.description,
}
if role.hidden:
self.col_hidden = True
role_dict["hidden"] = "true"
if role.protected and not role.system:
self.col_protected = True
role_dict["protected"] = "true"
role_dicts[role.id] = role_dict
self.roles = role_dicts
# Look up all rules, ordered by UID, controller, function, table
rtable = auth.permission.table
query = (rtable.group_id.belongs(role_dicts.keys())) & \
(rtable.deleted == False)
rules = db(query).select(rtable.id,
rtable.group_id,
rtable.controller,
rtable.function,
rtable.tablename,
rtable.uacl,
rtable.oacl,
rtable.entity,
)
self.rules = rules
# Look up all org entities
entities = set()
for rule in rules:
entity = rule.entity
if entity is not None:
self.col_entity = True
entities.add(entity)
otable = current.s3db.org_organisation
query = (otable.pe_id.belongs(entities)) & \
(otable.deleted == False)
self.orgs = db(query).select(otable.pe_id,
otable.name,
).as_dict(key="pe_id")
# -------------------------------------------------------------------------
def as_csv(self):
"""
Export the current roles and permissions as CSV,
suitable for prepop (see S3BulkImporter.import_role)
@returns: a StringIO containing the CSV
"""
import csv
try:
from cStringIO import StringIO # Faster, where available
except ImportError:
from StringIO import StringIO
# Optional columns
col_protected = self.col_protected
col_hidden = self.col_hidden
col_entity = self.col_entity
# Role fields
fieldnames = ["uid", "role", "description"]
if col_hidden:
fieldnames.append("hidden")
if col_protected:
fieldnames.append("protected")
# Rule fields
fieldnames.extend(["controller", "function", "table", "uacl", "oacl"])
if col_entity:
fieldnames.extend("entity")
# Helper to get the role UID for a rule
role_dicts = self.roles
def get_uid(group_id):
role_dict = role_dicts.get(group_id)
return role_dict.get("uid") if role_dict else None
# Sort the rules
rules = sorted(self.rules,
key = lambda rule: (get_uid(rule.group_id),
rule.controller or "zzzzzz",
rule.function,
rule.tablename,
))
# Create the CSV
f = StringIO()
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
# Write the rules to the CSV
orgs = self.orgs
encode_permissions = self.encode_permissions
for rule in rules:
role_dict = role_dicts.get(rule.group_id)
if not role_dict:
continue
rule_dict = {}
# The entity column (optional)
if col_entity:
entity = rule.entity
if entity is not None:
if entity == 0:
rule_dict["entity"] = "any"
else:
org = orgs.get(entity)
if org:
rule_dict["entity"] = org
else:
continue
# The target columns (controller, function, table)
if rule.tablename:
rule_dict["table"] = rule.tablename
else:
if rule.controller:
rule_dict["controller"] = rule.controller
if rule.function:
rule_dict["function"] = rule.function
# The permission columns (uacl, oacl)
uacl = encode_permissions(rule.uacl, explicit_none=True)
if uacl:
rule_dict["uacl"] = uacl
oacl = encode_permissions(rule.oacl & ~(rule.uacl))
if oacl:
rule_dict["oacl"] = oacl
# Add role columns
rule_dict.update(role_dict)
# Write the rule
writer.writerow(rule_dict)
f.seek(0)
return f
# -------------------------------------------------------------------------
def encode_permissions(self, permissions, explicit_none=False):
"""
Encodes a permission bitmap as string, using the permission
labels from S3Permission.PERMISSION_OPTS
@param permissions: the permission bitmap
@param explicit_none: return "NONE" if no permission bit set
(otherwise returns None)
"""
if not permissions:
if explicit_none:
return "NONE"
else:
return None
opts = current.auth.permission.PERMISSION_OPTS
labels = []
for bit in opts:
if permissions & bit:
labels.append(opts[bit])
return "|".join(labels)
# END =========================================================================
|
the-stack_0_19704 | #
# Copyright 2014-2016 CloudVelox Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This module contains the implementation of the 'aki' command
"""
import getopt
import common
from common import DisplayOptions
from common import CommandOutput
from common import ResourceSelector
class AKICommand(common.BaseCommand):
@staticmethod
def __aki_display(aki, disp, pg):
"""Display AMI info
"""
if disp.display == DisplayOptions.LONG:
pg.prt("%-14s %-10s %-5s",
aki.id, aki.architecture, aki.virtualization_type)
elif disp.display == DisplayOptions.EXTENDED:
pg.prt("%s:", aki.id)
pg.prt("%15s : %-12s", "State", aki.state)
pg.prt("%15s : %s", "Location", aki.location)
pg.prt("%15s : %s", "Public", aki.is_public)
pg.prt("%15s : %s", "Owner", aki.owner_id)
if aki.description:
pg.prt("%15s : %s", "Description", aki.description)
pg.prt("%15s : %s %s %s", "Hardware",
aki.architecture,
aki.virtualization_type,
aki.hypervisor,
)
if disp.display_tags:
common.display_tags(aki.tags, pg)
else:
pg.prt("%s", aki.id)
if disp.display_tags:
common.display_tags(aki.tags)
@staticmethod
def __aki_filter(selector, aki_list):
"""Create a new list of AKIs from aki_list by keeping only the
AKIs that match the filters in disp.
"""
FILTER_MAP = {
'arch' : 'architecture',
}
filter_dict = selector.get_filter_dict()
if not filter_dict:
return aki_list
new_aki_list = []
for aki in aki_list:
match = True
for filt in filter_dict:
if filter_dict[filt] != getattr(aki, FILTER_MAP[filt]):
match = False
break
if match:
new_aki_list.append(aki)
return new_aki_list
def __aki_list_cmd(self, region, selector, disp):
"""Implements the list function of the snap command
"""
if not selector.has_selection():
return
ec2_conn = self.get_ec2_conn(region)
aki_list = ec2_conn.get_all_kernels(
kernel_ids=selector.resource_id_list)
self.cache_insert(region, [aki.id for aki in aki_list])
aki_list = self.__aki_filter(selector, aki_list)
with CommandOutput() as pg:
for aki in aki_list:
self.__aki_display(aki, disp, pg)
def __aki_cmd(self, argv):
"""Implements the aki command
"""
disp = DisplayOptions()
selector = ResourceSelector()
region = None
opt_list, args = getopt.getopt(argv, "af:lr:tx")
if opt_list:
for opt in opt_list:
if opt[0] == '-a':
selector.select_all = True
elif opt[0] == '-f':
selector.add_filter_spec(opt[1])
elif opt[0] == '-l':
disp.display = DisplayOptions.LONG
elif opt[0] == '-r':
region = opt[1]
elif opt[0] == '-t':
disp.display_tags = True
elif opt[0] == '-x':
disp.display = DisplayOptions.EXTENDED
if False:
# modifying command
pass
else:
selector.resource_id_list = args
self.__aki_list_cmd(region, selector, disp)
def do_aki(self, ln):
"""
aki [std-options] [-a] [-f filtspec] [-l] [-t] [-x] [aki-id] ...
Options:
-a : list all kernels
-f filtspec : filter the list of kernels; filtspec has the form key=value;
the only valid key is 'arch'
-l : long output
-t : list tags
-x : extended output
"""
self.dispatch(self.__aki_cmd, ln)
|
the-stack_0_19705 | from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from iprestrict import models
from iprestrict.restrictor import IPRestrictor
from iprestrict.decorators import superuser_required
from django.shortcuts import render_to_response
import json
@superuser_required
def move_rule_up(request, rule_id):
rule = models.Rule.objects.get(pk=rule_id)
rule.move_up()
return HttpResponseRedirect(reverse('admin:iprestrict_rule_changelist'))
@superuser_required
def move_rule_down(request, rule_id):
rule = models.Rule.objects.get(pk=rule_id)
rule.move_down()
return HttpResponseRedirect(reverse('admin:iprestrict_rule_changelist'))
@superuser_required
def reload_rules(request):
models.ReloadRulesRequest.request_reload()
return HttpResponse('ok')
@superuser_required
def test_rules_page(request):
return render_to_response('iprestrict/test_rules.html')
@superuser_required
def test_match(request):
url = request.REQUEST['url']
ip = request.REQUEST['ip']
matching_rule_id, action = find_matching_rule(url, ip)
rules = list_rules(matching_rule_id, url, ip)
if matching_rule_id is None:
result = {
'action': 'Allowed',
'msg': 'No rules matched.',
}
else:
result = {
'action': action,
'msg': 'URL matched Rule highlighted below.'
}
result['rules'] = rules
return HttpResponse(json.dumps(result))
def find_matching_rule(url, ip):
for r in models.Rule.objects.all():
if r.matches_url(url) and r.matches_ip(ip):
return (r.pk, r.action_str())
return (None, None)
def list_rules(matching_rule_id, url, ip):
return [map_rule(r, matching_rule_id, url, ip) for r in models.Rule.objects.all()]
def map_rule(r, matching_rule_id, url, ip):
rule = {
'url_pattern': {
'value': r.url_pattern,
'matchStatus': 'match' if r.matches_url(url) else 'noMatch'
},
'ip_group': {
'name': r.ip_group.name,
'ranges': r.ip_group.ranges_str(),
'matchStatus': 'match' if r.matches_ip(ip) else 'noMatch'
},
'action': r.action_str(),
}
if r.pk == matching_rule_id:
rule['matched'] = True
return rule
|
the-stack_0_19706 | from flask import Flask
from flask_graphql import GraphQLView
from bookqlub_api import utils
from bookqlub_api.schema import schema
def create_app(session) -> Flask:
graphql_context = {
"session": session,
"secret": utils.config["app"]["secret"],
"demo_user_ids": frozenset(utils.config["app"]["demo_user_ids"]),
}
app = Flask(__name__)
app.add_url_rule(
"/graphql",
view_func=GraphQLView.as_view(
"graphql",
schema=schema.schema,
graphiql=utils.config["app"]["graphiql"],
get_context=lambda: graphql_context,
),
)
return app
def get_app(*args, **kwargs):
return create_app(utils.get_db_session())
if __name__ == "__main__":
app = get_app()
app.run(port=utils.config["app"]["port"])
|
the-stack_0_19707 | import tkinter as tk
from tkinter.scrolledtext import ScrolledText
from tkinter.constants import HORIZONTAL
import SendMailWindow as smw
'''parameters'''
'''sender:string of mail from who'''
'''topic:string of mail's topic'''
'''text:string of mail's text'''
class ReadMailWindow:
def __init__(self, receiver='[email protected]', sender='[email protected]', topic='DefaultTopic', text='Dear:\n\nDefaultLine\nDefaultLine2\nDefaultLine3\n\nSincerely,\nDefaultUser'):
'''
create the ReadMailWindow.
'''
self.root = tk.Tk()
self.root.title('Mail from '+sender)
self.root.geometry('300x200')
self.receiver=receiver
self.sender=sender
self.topic=topic
self.text=text
self.pane_for_sender = tk.PanedWindow(self.root,orient=tk.HORIZONTAL, borderwidth=5)
self.pane_for_sender.pack(fill=tk.BOTH)
self.lable_for_sender = tk.Label(self.pane_for_sender, text='From:', width=5, justify=tk.LEFT, anchor=tk.W)
self.entry_for_sender = tk.Entry(self.pane_for_sender, width=10)
self.entry_for_sender.insert(0, self.sender)
self.entry_for_sender.config(state=tk.DISABLED)
self.pane_for_sender.add(self.lable_for_sender)
self.pane_for_sender.add(self.entry_for_sender)
self.pane_for_topic = tk.PanedWindow(self.root, orient=tk.HORIZONTAL, borderwidth=5)
self.pane_for_topic.pack(fill=tk.BOTH)
self.lable_for_topic = tk.Label(self.pane_for_topic, text='Topic:', width=5, justify=tk.LEFT, anchor=tk.W)
self.entry_for_topic = tk.Entry(self.pane_for_topic, width=10)
self.entry_for_topic.insert(0, self.topic)
self.entry_for_topic.config(state=tk.DISABLED)
self.pane_for_topic.add(self.lable_for_topic)
self.pane_for_topic.add(self.entry_for_topic)
self.pane_for_content = tk.PanedWindow(self.root, orient=HORIZONTAL, borderwidth=7)
self.pane_for_content.pack(fill=tk.BOTH, expand=1)
self.lable_for_content = tk.Label(self.pane_for_content, text='Text:', justify=tk.LEFT, anchor=tk.W)
self.text_for_content = ScrolledText(self.pane_for_content, width=10, height=4)
self.text_for_content.insert(1.0, self.text)
self.text_for_content.config(state=tk.DISABLED)
self.pane_for_content.add(self.lable_for_content)
self.pane_for_content.add(self.text_for_content)
self.pane_for_button = tk.PanedWindow(self.root, orient=tk.HORIZONTAL)
self.pane_for_button.pack(fill=tk.BOTH)
self.button_for_reply = tk.Button(self.pane_for_button, text="Reply", command=self.Reply)
self.button_for_close = tk.Button(self.pane_for_button, text="Exit", command=self.Destroy, width=5)
self.pane_for_button.add(self.button_for_close)
self.pane_for_button.add(self.button_for_reply)
def Reply(self):
with open('acpwd.txt') as file:
for line in file:
acpwd = line.split(':')
self.SMW = smw.SendMailWindow(self.receiver, self.sender, acpwd[1])
self.SMW.text_for_content.insert(1.0, '\n\n---------------\n'+self.text)
#self.root.destroy()
def Destroy(self):
self.root.destroy()
if __name__=='__main__':
myRMW = ReadMailWindow()
myRMW.root.mainloop() |
the-stack_0_19709 | import unittest
from unittest.mock import Mock
from pynonymizer.fake import UnsupportedFakeTypeError
from pynonymizer.strategy import database
from pynonymizer.strategy.exceptions import UnknownTableStrategyError, UnknownColumnStrategyError, ConfigSyntaxError
from pynonymizer.strategy.table import TableStrategyTypes
from pynonymizer.strategy.update_column import UpdateColumnStrategyTypes
import copy
import pytest
@pytest.fixture
def simple_config():
return {
"tables": {
"accounts": {
"columns": {
"current_sign_in_ip": "ipv4_public",
"username": "unique_login",
"email": "unique_email",
"name": "empty",
"raw": "(NOW())"
}
},
"secrets" : "delete",
"transactions": "truncate"
}
}
@pytest.fixture
def config_unsupported_fake_type():
return {
"tables": {
"accounts": {
"columns": {
"current_sign_in_ip": "ipv4_public",
"username": "unique_login",
"email": "NOT A VALID FAKE TYPE",
"name": "empty",
}
},
"transactions": "truncate"
}
}
@pytest.fixture
def fake_column_generator():
from pynonymizer.fake import FakeColumnGenerator
return FakeColumnGenerator()
@pytest.fixture
def strategy_parser(fake_column_generator):
from pynonymizer.strategy import parser
return parser.StrategyParser(fake_column_generator)
def test_valid_parse_no_mutate(simple_config, strategy_parser):
old_valid_config = copy.deepcopy(simple_config)
strategy_parser.parse_config(simple_config)
assert simple_config == old_valid_config
def test_simple_parse_creates_databasestrategy(simple_config, strategy_parser):
strategy = strategy_parser.parse_config(simple_config)
assert isinstance(strategy, database.DatabaseStrategy)
def test_simple_parse_update_columns(simple_config, strategy_parser):
strategy = strategy_parser.parse_config(simple_config)
table = strategy.table_strategies[0]
assert table.table_name == "accounts"
assert table.strategy_type == TableStrategyTypes.UPDATE_COLUMNS
def test_simple_parse_truncate(simple_config, strategy_parser):
strategy = strategy_parser.parse_config(simple_config)
table = strategy.table_strategies[2]
assert table.table_name == "transactions"
assert table.strategy_type == TableStrategyTypes.TRUNCATE
def test_simple_parse_delete(simple_config, strategy_parser):
strategy = strategy_parser.parse_config(simple_config)
table = strategy.table_strategies[1]
assert table.table_name == "secrets"
assert table.strategy_type == TableStrategyTypes.DELETE
def test_simple_parse_columns(simple_config, strategy_parser):
strategy = strategy_parser.parse_config(simple_config)
table = strategy.table_strategies[0]
# this is pretty rigid - it will also test strat order as well. not ideal!
assert table.column_strategies[0].strategy_type == UpdateColumnStrategyTypes.FAKE_UPDATE
assert table.column_strategies[1].strategy_type == UpdateColumnStrategyTypes.UNIQUE_LOGIN
assert table.column_strategies[2].strategy_type == UpdateColumnStrategyTypes.UNIQUE_EMAIL
assert table.column_strategies[3].strategy_type == UpdateColumnStrategyTypes.EMPTY
assert table.column_strategies[4].strategy_type == UpdateColumnStrategyTypes.LITERAL
assert table.column_strategies[4].value == "(NOW())"
def test_unsupported_fake_column_type(strategy_parser, config_unsupported_fake_type):
"""
get_fake_column's UnsupportedFakeType should kill a parse attempt
"""
with pytest.raises(UnsupportedFakeTypeError):
strategy_parser.parse_config(config_unsupported_fake_type)
def test_invalid_table_strategy_parse(strategy_parser):
with pytest.raises(UnknownTableStrategyError):
strategy_parser.parse_config({
"tables": {
"accounts": "cheesecake"
}
})
def test_unknown_column_strategy(strategy_parser):
with pytest.raises(UnknownColumnStrategyError):
strategy_parser.parse_config({
"tables": {
"accounts": {
"columns": {
"current_sign_in_ip": {
"type": "cheese" # Not a valid strategy
}
}
},
"transactions": "truncate"
}
})
def test_unknown_table_strategy_bad_dict(strategy_parser):
with pytest.raises(UnknownTableStrategyError):
strategy_parser.parse_config({
"tables": {
"accounts": {
"not_columns": {
"current_sign_in_ip": "ipv4_public",
"username": "unique_login",
"email": "unique_email",
"name": "empty",
}
},
}
})
def test_valid_parse_before_after_script(strategy_parser):
parse_result = strategy_parser.parse_config({
"scripts": {
"before": [
"SELECT `before` from `students`;"
],
"after": [
"SELECT `after` from `students`;",
"SELECT `after_2` from `example`;"
]
},
"tables": {
"accounts": "truncate"
},
})
assert isinstance(parse_result, database.DatabaseStrategy)
assert len(parse_result.table_strategies) == 1
assert parse_result.table_strategies[0].strategy_type == TableStrategyTypes.TRUNCATE
assert parse_result.scripts["before"] == [
"SELECT `before` from `students`;"
]
assert parse_result.scripts["after"] == [
"SELECT `after` from `students`;",
"SELECT `after_2` from `example`;"
]
def test_verbose_table_truncate(strategy_parser):
with pytest.raises(ConfigSyntaxError):
strategy = strategy_parser.parse_config({
"tables": {
"table1": {
"type": "truncate",
# parser should raise error when keys from other types when type is specified
"columns": {}
}
}
})
def test_verbose_table_update_columns(strategy_parser):
strategy = strategy_parser.parse_config({
"tables": {
"table1": {
"type": "update_columns",
"columns": {
}
}
}
})
assert len(strategy.table_strategies) == 1
assert strategy.table_strategies[0].table_name == "table1"
assert strategy.table_strategies[0].strategy_type == TableStrategyTypes.UPDATE_COLUMNS
def test_verbose_table_list_duplicate(strategy_parser):
"""parser should allow multiple tables of the same name in list-parse-mode"""
strategy = strategy_parser.parse_config({
"tables": [
{
"table_name": "table1",
"type": "truncate",
},
{
"table_name": "table1",
"type": "truncate",
}
]
})
assert len(strategy.table_strategies) == 2
assert strategy.table_strategies[0].table_name == "table1"
assert strategy.table_strategies[0].strategy_type == TableStrategyTypes.TRUNCATE
assert strategy.table_strategies[1].table_name == "table1"
assert strategy.table_strategies[1].strategy_type == TableStrategyTypes.TRUNCATE
def test_table_raises_when_given_unrelated_key(strategy_parser):
with pytest.raises(ConfigSyntaxError):
strategy_parser.parse_config({
"tables": {
"table1": {
"type": "update_columns",
"columns": {
"column1": {
"type": "empty",
"where": "condition = 'value1'",
"fake_type": "email"
},
}
}
}
}) |
the-stack_0_19711 | # -*- coding: utf-8 -*-
# Copyright (C) 2016 Adrien Vergé
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Use this rule to control the number of spaces before and after colons (``:``).
.. rubric:: Options
* ``max-spaces-before`` defines the maximal number of spaces allowed before
colons (use ``-1`` to disable).
* ``max-spaces-after`` defines the maximal number of spaces allowed after
colons (use ``-1`` to disable).
.. rubric:: Examples
#. With ``colons: {max-spaces-before: 0, max-spaces-after: 1}``
the following code snippet would **PASS**:
::
object:
- a
- b
key: value
#. With ``colons: {max-spaces-before: 1}``
the following code snippet would **PASS**:
::
object :
- a
- b
the following code snippet would **FAIL**:
::
object :
- a
- b
#. With ``colons: {max-spaces-after: 2}``
the following code snippet would **PASS**:
::
first: 1
second: 2
third: 3
the following code snippet would **FAIL**:
::
first: 1
2nd: 2
third: 3
"""
import yaml
from yamllint.rules.common import is_explicit_key, spaces_after, spaces_before
ID = 'colons'
TYPE = 'token'
CONF = {'max-spaces-before': int,
'max-spaces-after': int}
DEFAULT = {'max-spaces-before': 0,
'max-spaces-after': 1}
def check(conf, token, prev, next, nextnext, context):
if isinstance(token, yaml.ValueToken):
problem = spaces_before(token, prev, next,
max=conf['max-spaces-before'],
max_desc='too many spaces before colon')
if problem is not None:
yield problem
problem = spaces_after(token, prev, next,
max=conf['max-spaces-after'],
max_desc='too many spaces after colon')
if problem is not None:
yield problem
if isinstance(token, yaml.KeyToken) and is_explicit_key(token):
problem = spaces_after(token, prev, next,
max=conf['max-spaces-after'],
max_desc='too many spaces after question mark')
if problem is not None:
yield problem
|
the-stack_0_19713 | from pygls.features import TEXT_DOCUMENT_DID_CHANGE, TEXT_DOCUMENT_DID_CLOSE, TEXT_DOCUMENT_DID_OPEN
from pygls.server import LanguageServer
from pygls.types import (
Diagnostic,
DidChangeTextDocumentParams,
DidCloseTextDocumentParams,
DidOpenTextDocumentParams,
Position,
Range,
)
from . import DEFAULT_CP2K_INPUT_XML
from .parser import CP2KInputParser
from .parser_errors import ParserError
from .tokenizer import TokenizerError
def _validate(ls, params):
ls.show_message_log("Validating CP2K input...")
diagnostics = []
text_doc = ls.workspace.get_document(params.textDocument.uri)
parser = CP2KInputParser(DEFAULT_CP2K_INPUT_XML)
with open(text_doc.path, "r") as fhandle:
try:
parser.parse(fhandle)
except (TokenizerError, ParserError) as exc:
ctx = exc.args[1]
line = ctx["line"].rstrip()
msg = f"Syntax error: {exc.args[0]}"
if exc.__cause__:
msg += f"({exc.__cause__})"
linenr = ctx["linenr"] - 1
colnr = ctx["colnr"]
if colnr is not None:
count = 0 # number of underline chars after (positiv) or before (negative) the marker if ref_colnr given
nchars = colnr # relevant line length
if ctx["ref_colnr"] is not None:
count = ctx["ref_colnr"] - ctx["colnr"]
nchars = min(ctx["ref_colnr"], ctx["colnr"]) # correct if ref comes before
if ctx["colnrs"] is not None:
# shift by the number of left-stripped ws
# ctx["colnrs"] contains the left shift for each possibly continued line
nchars += ctx["colnrs"][0] # assume no line-continuation for now
# at least do one context
count = max(1, count)
erange = Range(Position(linenr, colnr + 1 - count), Position(linenr, colnr + 1))
else:
erange = Range(Position(linenr, 1), Position(linenr, len(line)))
diagnostics += [Diagnostic(erange, msg, source=type(cp2k_inp_server).__name__, related_information=[])]
ls.publish_diagnostics(text_doc.uri, diagnostics)
def setup_ls(server):
@server.feature(TEXT_DOCUMENT_DID_CHANGE)
def did_change(ls, params: DidChangeTextDocumentParams):
"""Text document did change notification."""
_validate(ls, params)
@server.feature(TEXT_DOCUMENT_DID_CLOSE)
def did_close(server: LanguageServer, params: DidCloseTextDocumentParams):
"""Text document did close notification."""
server.show_message("Text Document Did Close")
@server.feature(TEXT_DOCUMENT_DID_OPEN)
async def did_open(ls, params: DidOpenTextDocumentParams):
"""Text document did open notification."""
ls.show_message("Text Document Did Open")
_validate(ls, params)
cp2k_inp_server = LanguageServer()
setup_ls(cp2k_inp_server)
|
the-stack_0_19715 | import numpy as np
import pandas as pd
from imblearn.over_sampling import RandomOverSampler
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
class Preprocessor:
"""
This class shall be used to clean and transform the data before training.
Written By: swapnil sonawane
Version: 1.0
Revisions: None
"""
def __init__(self, file_object, logger_object):
self.file_object = file_object
self.logger_object = logger_object
def remove_unwanted_spaces(self, data):
"""
Method Name: remove_unwanted_spaces
Description: This method removes the unwanted spaces from a pandas dataframe.
Output: A pandas DataFrame after removing the spaces.
On Failure: Raise Exception
Written By: swapnil sonawane
Version: 1.0
Revisions: None
"""
self.logger_object.log(self.file_object, "Entered the remove_unwanted_spaces method of the Preprocessor class")
self.data = data
try:
self.df_without_spaces = self.data.apply(
lambda x: x.str.strip() if x.dtype == "object" else x) # drop the labels specified in the columns
self.logger_object.log(self.file_object,
"Unwanted spaces removal Successful. Exited the remove_unwanted_spaces method of the Preprocess class")
return self.df_without_spaces
except Exception as e:
self.logger_object.log(self.file_object,
"Exception occured in remove_unwanted_spaces method of the Preprocessor class. Exception message:: " + str(
e))
self.logger_object.log(self.file_object,
"Unwanted space removal Unsuccessful! Exited the remove_unwanted_spaces method of the Prepcoessor class")
raise Exception()
def remove_clumns(self, data, columns):
"""
Method Name: remove_columns
Description: This method removes the given columns from a pandas dataframe.
Output: A pandas DataFrame after removing the specified columns.
On Failure: Raise Exception
Written By: swapnil sonawane
Version: 1.0
Revisions: None
"""
self.logger_object.log(self.file_object, "Entered the remove_columns method of the Preprocessor class")
self.data = data
self.columns = columns
try:
self.useful_data = self.data.drop(labels=self.columns, axis=1) # drop the labels specified in the columns
self.logger_object.log(self.file_object,
'Columns removal Successful. Exited the removal_columns method of the preprocessor class')
return self.useful_data
except Exception as e:
self.logger_object.log(self.file_object,
"Exception occured in remove_columns mehod of the Preprocessor class. Exception:: " + str(
e))
self.logger_object.log(self.file_object,
'Column removal Unsuccessful. Exited the remove_columns method of the Preprocessor class')
raise Exception
def separate_label_features(self, data, label_column_name):
"""
Method Name: separate_label_feature
Description: This method separates the features and a Label Coulmns.
Output: Returns two separate Dataframes, one containing features and the other containing Labels .
On Failure: Raise Exception
Written By: swapnil sonawane
Version: 1.0
Revisions: None
"""
self.logger_object.log(self.file_object, "Entered the separate_label_feature method of the Preprocesor class")
try:
self.X = data.drop(labels=label_column_name,
axis=1) # drop the columns specified and separate the feature columns
self.Y = data[label_column_name] # Filter the label column
self.logger_object.log(self.file_object,
"Label Separation Successful. Exited the separate_label_features method of the Preprocessor claa")
return self.X, self.Y
except Exception as e:
self.logger_object.log(self.file_object,
'Exception occured in separate_label_feature method of the Preprocessor class. Exception message:: ' + str(
e))
self.logger_object.log(self.file_object,
'Label Separation Unsuccessful. Exited the separate_label_feature method of the Preprocessor class')
raise Exception()
def is_null_present(self, data):
"""
Method Name: is_null_present
Description: This method checks whether there are null values present in the pandas Dataframe or not.
Output: Returns True if null values are present in the DataFrame, False if they are not present and
returns the list of columns for which null values are present.
On Failure: Raise Exception
Written By: swapnil sonawane
Version: 1.0
Revisions: None
"""
self.logger_object.log(self.file_object, "Entered the is_null_present method of the Preprocessor class")
self.null_present = False
self.cols_with_missing_values = []
self.col = data.columns
try:
self.null_counts = data.isna().sum() # check for the count of null values per columns
for i in range(len(self.null_counts)):
if self.null_counts[i] > 0:
self.null_present = True
self.cols_with_missing_values.append(self.col[i])
if self.null_present: # write the logs to see which columns have null values
self.dataframe_with_null = pd.DataFrame()
self.dataframe_with_null['columns'] = data.columns
self.dataframe_with_null['missing value count'] = np.asarray(data.isna().sum())
self.dataframe_with_null.to_csv(
'preprocessing_data/null_values.csv') # storing the null column information to file
self.logger_object.log(self.file_object,
"Finding missing values is a successful .Data written to the null values file. Exited the is_null_present method of the Preprocessor class")
return self.null_present, self.cols_with_missing_values
except Exception as e:
self.logger_object.log(self.file_object,
'Exception occured in is_null_present method of the Preprocessor class. Exception message:: ' + str(
e))
self.logger_object.log(self.file_object,
'Finding missing values failed. Exited the is_null_present method of the Preprocessor class')
raise Exception()
def impute_missing_values(self, data, cols_with_missing_values):
"""
Method Name: impute_missing_values
Description: This method replaces all the missing values in the Dataframe using KNN Imputer.
Output: A Dataframe which has all the missing values imputed.
On Failure: Raise Exception
Written By: swapnil sonawane
Version: 1.0
Revisions: None
"""
self.logger_object.log(self.file_object, "Entered the impute_missing_values method of the Preprocessor class")
self.data = data
self.cols_with_missing_values = cols_with_missing_values
try:
self.imputer = SimpleImputer(strategy="most_frequent")
for col in self.cols_with_missing_values:
self.data[col] = self.imputer.fit_transform(self.data[col])
self.logger_object.log(self.file_object,
'Imputing missing values Successful. Exited the impute_missing_values method of the Preprocessor class')
return self.data
except Exception as e:
self.logger_object.log(self.file_object,
'Exception occured in impute_missing_values method of the Preprocessor class. Exception message:: ' + str(
e))
self.logger_object.log(self.file_object,
'Imputing missing values failed. Exited the impute_missing_values method of the Preprocessor class')
raise Exception()
def scale_numerical_columns(self, data):
"""
Method Name: scale_numerical_columns
Description: This method scales the numerical values using the Standard scaler.
Output: A dataframe with scaled
On Failure: Raise Exception
Written By: swapnil sonawane
Version: 1.0
Revisions: None
"""
self.logger_object.log(self.file_object, "Entered the scale_numerical_columns method of the Preprocessor class")
self.data = data
try:
self.num_df = self.data.select_dtypes(include=['int64', 'float64']).copy()
self.scaler = StandardScaler()
self.scaled_data = self.scaler.fit_transform(self.num_df)
self.scaled_num_df = pd.DataFrame(data=self.scaled_data, columns=self.num_df.columns)
self.logger_object.log(self.file_object,
'scaling for numerical values successful. Exited the scale_numerical_columns method of the Preprocessor class')
return self.scaled_num_df
except Exception as e:
self.logger_object.log(self.file_object,
'Exception occured in scale_numerical_columns method of the Preprocessor class. Exception message:: ' + str(
e))
self.logger_object.log(self.file_object,
'scaling for numerical columns Failed. Exited the scale_numerical_columns method of the Preprocessor class')
raise Exception()
def encode_categorical_columns(self, data):
"""
Method Name: encode_categorical_columns
Description: This method encodes the categorical values to numeric values.
Output: only the columns with categorical values converted to numerical values
On Failure: Raise Exception
Written By: swapnil sonawane
Version: 1.0
Revisions: None
"""
self.logger_object.log(self.file_object,
"Entered the encode_categorical_columns method of the Preprocessor class")
try:
self.cat_df = data.select_dtypes(include=['object']).copy()
# using the dummy encoding to encode the categorical columns to numerical ones
for col in self.cat_df.columns:
self.cat_df = pd.get_dummies(self.cat_df, columns=[col], prefix=[col], drop_first=True)
self.logger_object.log(self.file_object,
"encoding for categorical values successful. Exited the encode_categorical_columns method of the Preprocessor class")
return self.cat_df
except Exception as e:
self.logger_object.log(self.file_object,
'Exception occured in encode_categorical_columns method of the Preprocessor class. Exception message: ' + str(
e))
self.logger_object.log(self.file_object,
'encoding for categorical columns Failed. Exited the encode_categorical_columns method of the Preprocessor class')
raise Exception()
def handle_imbalanced_dataset(self, x, y):
"""
Method Name: handle_imbalanced_dataset
Description: This method handles the imbalanced dataset to make it a balanced one.
Output: new balanced feature and target columns
On Failure: Raise Exception
Written By: swapnil sonawane
Version: 1.0
Revisions: None
"""
self.logger_object.log(self.file_object,
"Entered the handle_imbalanced_dataset method of the Preprocessor class")
try:
self.rdsmple = RandomOverSampler()
self.x_sampled, self.y_sampled = self.rdsmple.fit_resample(x, y)
self.logger_object.log(self.file_object,
"dataset balancing successful. Exited the handle_imbalanced_dataset method of the Preprocessor class")
return self.x_sampled, self.y_sampled
except Exception as e:
self.logger_object.log(self.file_object,
'Exception occured in handle_imbalanced_dataset method of the Preprocessor class. Exception message: ' + str(
e))
self.logger_object.log(self.file_object,
'dataset balancing Failed. Exited the handle_imbalanced_dataset method of the Preprocessor class')
raise Exception()
|
the-stack_0_19717 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/outbitstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *outbit_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("outbit-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
the-stack_0_19718 | # Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
xDS Test Client.
TODO(sergiitk): separate XdsTestClient and KubernetesClientRunner to individual
modules.
"""
import functools
import logging
from typing import Optional, Iterator
import tenacity
from framework.infrastructure import k8s
import framework.rpc
from framework.rpc import grpc_channelz
from framework.rpc import grpc_testing
from framework.test_app import base_runner
logger = logging.getLogger(__name__)
# Type aliases
_ChannelzServiceClient = grpc_channelz.ChannelzServiceClient
_ChannelConnectivityState = grpc_channelz.ChannelConnectivityState
_LoadBalancerStatsServiceClient = grpc_testing.LoadBalancerStatsServiceClient
class XdsTestClient(framework.rpc.grpc.GrpcApp):
"""
Represents RPC services implemented in Client component of the xds test app.
https://github.com/grpc/grpc/blob/master/doc/xds-test-descriptions.md#client
"""
def __init__(self,
*,
ip: str,
rpc_port: int,
server_target: str,
rpc_host: Optional[str] = None,
maintenance_port: Optional[int] = None):
super().__init__(rpc_host=(rpc_host or ip))
self.ip = ip
self.rpc_port = rpc_port
self.server_target = server_target
self.maintenance_port = maintenance_port or rpc_port
@property
@functools.lru_cache(None)
def load_balancer_stats(self) -> _LoadBalancerStatsServiceClient:
return _LoadBalancerStatsServiceClient(self._make_channel(
self.rpc_port))
@property
@functools.lru_cache(None)
def channelz(self) -> _ChannelzServiceClient:
return _ChannelzServiceClient(self._make_channel(self.maintenance_port))
def get_load_balancer_stats(
self,
*,
num_rpcs: int,
timeout_sec: Optional[int] = None,
) -> grpc_testing._LoadBalancerStatsResponse:
"""
Shortcut to LoadBalancerStatsServiceClient.get_client_stats()
"""
return self.load_balancer_stats.get_client_stats(
num_rpcs=num_rpcs, timeout_sec=timeout_sec)
def get_server_channels(self) -> Iterator[grpc_channelz.Channel]:
return self.channelz.find_channels_for_target(self.server_target)
def wait_for_active_server_channel(self):
retryer = tenacity.Retrying(
retry=(tenacity.retry_if_result(lambda r: r is None) |
tenacity.retry_if_exception_type()),
wait=tenacity.wait_exponential(max=10),
stop=tenacity.stop_after_delay(60 * 3),
reraise=True)
channel = retryer(self.get_active_server_channel)
logger.info('Active server channel found: channel_id: %s, %s',
channel.ref.channel_id, channel.ref.name)
logger.debug('Server channel:\n%r', channel)
def get_active_server_channel(self) -> Optional[grpc_channelz.Channel]:
for channel in self.get_server_channels():
state: _ChannelConnectivityState = channel.data.state
logger.debug('Server channel: %s, state: %s', channel.ref.name,
_ChannelConnectivityState.State.Name(state.state))
if state.state is _ChannelConnectivityState.READY:
return channel
raise self.NotFound('Client has no active channel with the server')
def get_client_socket_with_test_server(self) -> grpc_channelz.Socket:
channel = self.get_active_server_channel()
logger.debug('Retrieving client->server socket: channel %s',
channel.ref.name)
# Get the first subchannel of the active server channel
subchannel_id = channel.subchannel_ref[0].subchannel_id
subchannel = self.channelz.get_subchannel(subchannel_id)
logger.debug('Retrieving client->server socket: subchannel %s',
subchannel.ref.name)
# Get the first socket of the subchannel
socket = self.channelz.get_socket(subchannel.socket_ref[0].socket_id)
logger.debug('Found client->server socket: %s', socket.ref.name)
return socket
class KubernetesClientRunner(base_runner.KubernetesBaseRunner):
def __init__(self,
k8s_namespace,
*,
deployment_name,
image_name,
gcp_service_account,
td_bootstrap_image,
service_account_name=None,
stats_port=8079,
network='default',
deployment_template='client.deployment.yaml',
service_account_template='service-account.yaml',
reuse_namespace=False,
namespace_template=None,
debug_use_port_forwarding=False):
super().__init__(k8s_namespace, namespace_template, reuse_namespace)
# Settings
self.deployment_name = deployment_name
self.image_name = image_name
self.gcp_service_account = gcp_service_account
self.service_account_name = service_account_name or deployment_name
self.stats_port = stats_port
# xDS bootstrap generator
self.td_bootstrap_image = td_bootstrap_image
self.network = network
self.deployment_template = deployment_template
self.service_account_template = service_account_template
self.debug_use_port_forwarding = debug_use_port_forwarding
# Mutable state
self.deployment: Optional[k8s.V1Deployment] = None
self.service_account: Optional[k8s.V1ServiceAccount] = None
self.port_forwarder = None
def run(self,
*,
server_target,
rpc='UnaryCall',
qps=25,
secure_mode=False,
print_response=False) -> XdsTestClient:
super().run()
# TODO(sergiitk): make rpc UnaryCall enum or get it from proto
# Create service account
self.service_account = self._create_service_account(
self.service_account_template,
service_account_name=self.service_account_name,
namespace_name=self.k8s_namespace.name,
gcp_service_account=self.gcp_service_account)
# Always create a new deployment
self.deployment = self._create_deployment(
self.deployment_template,
deployment_name=self.deployment_name,
image_name=self.image_name,
namespace_name=self.k8s_namespace.name,
service_account_name=self.service_account_name,
td_bootstrap_image=self.td_bootstrap_image,
network_name=self.network,
stats_port=self.stats_port,
server_target=server_target,
rpc=rpc,
qps=qps,
secure_mode=secure_mode,
print_response=print_response)
self._wait_deployment_with_available_replicas(self.deployment_name)
# Load test client pod. We need only one client at the moment
pod = self.k8s_namespace.list_deployment_pods(self.deployment)[0]
self._wait_pod_started(pod.metadata.name)
pod_ip = pod.status.pod_ip
rpc_host = None
# Experimental, for local debugging.
if self.debug_use_port_forwarding:
logger.info('Enabling port forwarding from %s:%s', pod_ip,
self.stats_port)
self.port_forwarder = self.k8s_namespace.port_forward_pod(
pod, remote_port=self.stats_port)
rpc_host = self.k8s_namespace.PORT_FORWARD_LOCAL_ADDRESS
return XdsTestClient(ip=pod_ip,
rpc_port=self.stats_port,
server_target=server_target,
rpc_host=rpc_host)
def cleanup(self, *, force=False, force_namespace=False):
if self.port_forwarder:
self.k8s_namespace.port_forward_stop(self.port_forwarder)
self.port_forwarder = None
if self.deployment or force:
self._delete_deployment(self.deployment_name)
self.deployment = None
if self.service_account or force:
self._delete_service_account(self.service_account_name)
self.service_account = None
super().cleanup(force=force_namespace and force)
|
the-stack_0_19719 | import unittest
from pyalink.alink import *
import numpy as np
import pandas as pd
class TestAftSurvivalRegression(unittest.TestCase):
def test_aftsurvivalregression(self):
df = pd.DataFrame([
[1.218, 1.0, "1.560,-0.605"],
[2.949, 0.0, "0.346,2.158"],
[3.627, 0.0, "1.380,0.231"],
[0.273, 1.0, "0.520,1.151"],
[4.199, 0.0, "0.795,-0.226"]])
data = BatchOperator.fromDataframe(df, schemaStr="label double, censor double, features string")
reg = AftSurvivalRegression()\
.setVectorCol("features")\
.setLabelCol("label")\
.setCensorCol("censor")\
.setPredictionCol("result")
pipeline = Pipeline().add(reg)
model = pipeline.fit(data)
model.save().lazyPrint(10)
model.transform(data).print()
pass |
the-stack_0_19720 | #!/usr/bin/env python3
import datetime, os, subprocess, sys, json, jinja2
from fractions import Fraction
from jinja2 import Template
'''
Usage: $ splittimes.py [input.json]
pdfs will be written to current directory
'''
# TODO: This file needs a lot of cleanup. Urgently!!
def main():
with open(sys.argv[1]) as input_data:
json_data = json.loads(input_data.read())
print("Course: " + json_data['name'])
for target_time in json_data['target times']:
print("Computing target time " + target_time + "...")
hh, mm = map(int, target_time.split(':'))
file_name = sys.argv[1]
name = json_data['name']
output_name = json_data['prefix']
points = json_data['points']
calculate_split_times(name, output_name, points, hh, mm)
def compute_between_indices(distances, partial_distances, height_differences, paces_per_section, partial_times, locations, i, j):
data = []
data.append(float(distances[i+1])/1000) # total distance
data.append(float(partial_distances[i])/1000) # partial distance
data.append(float(height_differences[i]*100)/partial_distances[i]) # gradient
data.append(format_time(seconds = int(paces_per_section[i]*1000))) # pace
data.append(format_time(int(paces_per_section[i]*partial_distances[i]))) # section time
data.append(format_time(partial_times[i], True)) # total time
data.append(locations[i+1]) # location
return data
def format_time(seconds, force_hours=False, round_seconds=False):
hours = seconds//3600
seconds -= hours*3600
minutes = seconds//60
seconds -= minutes*60
if round_seconds:
if seconds >= 30:
minutes += 1
if minutes == 60:
hours += 1
minutes = 0
if force_hours or hours > 0:
if round_seconds:
return '%d:%02d' % (hours, minutes)
return '%d:%02d:%02d' % (hours, minutes, seconds)
else:
if round_seconds:
return minutes
return '%d:%02d' % (minutes, seconds)
def calculate_split_times(name, output_name, all_points, hours, minutes):
points = 0
distances = []
heights = []
locations = []
locations_abbr = []
markant_distances = []
markant_names = []
markant_abbr = []
markant_sections = []
anchor = []
# key variable that computes the pace according to the gradient
# higher value means higher difference between uphill and downhill speed
alpha = Fraction(6,2)
for (i, point) in enumerate(all_points):
points += 1
distances.append(int(1000*point['dist']))
heights.append(point['alt'])
if point['primary']:
markant_sections.append(i)
markant_distances.append(1000*float(point['dist']))
markant_names.append(point['full name'])
markant_abbr.append(point['short name'])
locations_abbr.append(point['short name'])
locations.append(point['full name'])
anchor.append(point['anchor'])
total_time = (hours*60 + minutes)*60
tex_file_name = '{}_{}_{:02d}'.format(output_name, hours, minutes)
all_data = []
#split times for the sections
partial_distances = [distances[i]-distances[i-1] for i in range(1,points)]
height_differences = [heights[i]-heights[i-1] for i in range(1,points)]
total_distance = distances[-1]
global_pace = Fraction(total_time, total_distance)
global_slope = Fraction(heights[-1] - heights[0], total_distance)
paces_per_section = []
for i in range(points - 1):
local_slope = Fraction(height_differences[i], partial_distances[i])
paces_per_section.append(global_pace*(1 + alpha*(local_slope - global_slope)))
partial_times = []
partial = 0
for i in range(len(partial_distances)):
partial += paces_per_section[i]*partial_distances[i]
partial_times.append(int(partial))
data_1 = [['0 km', '0 km' ,'\centercell{---}', '\centercell{---}' ,'0:00 min', '0:00:00 h',locations[0]]]
for i in range(points - 1):
computed_data = compute_between_indices(distances, partial_distances, height_differences, paces_per_section, partial_times, locations, i, i+1)
data_1.append(list(map(lambda x: ('%.1f km' % x[1]) if x[0] <= 1 else ('%.1f \\%%' % x[1]) if x[0] == 2 else ('%s min/km' % x[1]) if x[0] == 3 else ('%s min' % x[1]) if x[0] == 4 else ('%s h' % x[1]) if x[0] == 5 else x[1], list(enumerate(computed_data)))))
all_data.append(data_1)
data_5 = []#['0 km', '0 km' ,'\centercell{---}', '\centercell{---}' ,'0:00 min', '0:00:00 h',locations[0]]]
for i in range(points - 1):
data = [float(distances[i + 1])/1000,
format_time(partial_times[i], True, True),
locations_abbr[i + 1]]
data_5.append(data)
all_data.append(data_5)
partial_times = [0]
partial_time = 0
section = 0
distance_used = 0
height_start = heights[0]
partial_distance = 0
while partial_distance <= distances[-1]-1000:
section_begin = section
distance_used_begin = distance_used
section_distance = 0
while section < len(partial_distances) and partial_distances[section] - distance_used <= 1000 - section_distance:
partial_time += paces_per_section[section]*(partial_distances[section] - distance_used)
section_distance += (partial_distances[section] - distance_used)
distance_used = 0
section += 1
if section_distance < 1000:
partial_time += paces_per_section[section]*(1000 - section_distance)
distance_used += (1000 - section_distance)
partial_times.append(int(partial_time))
if section == len(partial_distances):
section -= 1
height_end = heights[section] + (float(distance_used) / partial_distances[section]) * height_differences[section]
height_start = heights[section_begin] + (float(distance_used_begin) / partial_distances[section_begin]) * height_differences[section_begin]
partial_distance += 1000
data_2 = []
for i in range(1, len(partial_times)):
data = (
str(i).rjust(2),
#str(datetime.timedelta(seconds = partial_times[i]))[:7])
format_time(partial_times[i], True, True))
data_2.append(data)
all_data.append(data_2)
#split times pro 5 kilometer
partial_times = [0]
partial_time = 0
section = 0
distance_used = 0
height_start = heights[0]
partial_distance = 0
while partial_distance <= distances[-1] - 5000:
section_begin = section
distance_used_begin = distance_used
section_distance = 0
while section < len(partial_distances) and partial_distances[section] - distance_used <= 5000 - section_distance:
partial_time += paces_per_section[section]*(partial_distances[section] - distance_used)
section_distance += (partial_distances[section] - distance_used)
distance_used = 0
section += 1
if section_distance < 5000:
partial_time += paces_per_section[section]*(5000 - section_distance)
distance_used += (5000 - section_distance)
partial_times.append(int(partial_time))
if section == len(partial_distances):
section -= 1
height_end = heights[section] + (float(distance_used) / partial_distances[section]) * height_differences[section]
height_start = heights[section_begin] + (float(distance_used_begin) / partial_distances[section_begin]) * height_differences[section_begin]
partial_distance += 5000
data_3 = []
for i in range(1, len(partial_times)):
data =(
5*i,
format_time(partial_times[i], True, True))
data_3.append(data)
all_data.append(data_3)
# split times pro markantem punkt
partial_markant_distances = [markant_distances[i+1]-markant_distances[i] for i in range(len(markant_distances)-1)]
partial_times = [0]
partial_time = 0
section = 0
section_markant = 0
distance_used = 0
height_start = heights[0]
partial_distance = 0
while partial_distance < markant_distances[-1]:
section_begin = section
distance_used_begin = distance_used
section_distance = 0
while section < len(partial_distances) and partial_distances[section] - distance_used < partial_markant_distances[section_markant] - section_distance:
partial_time += paces_per_section[section]*(partial_distances[section] - distance_used)
section_distance += (partial_distances[section] - distance_used)
distance_used = 0
section += 1
if section == len(paces_per_section):
section -= 1
if section_distance < partial_markant_distances[section_markant]:
partial_time += paces_per_section[section]*(partial_markant_distances[section_markant] - section_distance)
distance_used += (partial_markant_distances[section_markant] - section_distance)
partial_times.append(int(partial_time))
height_end = heights[section] + (float(distance_used) / partial_distances[section]) * height_differences[section]
height_start = heights[section_begin] + (float(distance_used_begin) / partial_distances[section_begin]) * height_differences[section_begin]
partial_distance += partial_markant_distances[section_markant]
section_markant += 1
data_4 = []
for i in range(1, len(partial_times)):
data = (
float(markant_distances[i])/1000,
format_time(partial_times[i], True, True),
markant_abbr[i])
data_4.append(data)
all_data.append(data_4)
data_6 = []
for i in range(points - 1):
data_6.append((float(distances[i])/1000, heights[i], locations_abbr[i], anchor[i]))
all_data.append(data_6)
create_pdf(tex_file_name, name, hours, minutes, all_data)
def create_pdf(file_name, name, hours, minutes, all_data):
latex_jinja_env = jinja2.Environment(
block_start_string = '\BLOCK{',
block_end_string = '}',
variable_start_string = '\VAR{',
variable_end_string = '}',
comment_start_string = '\#{',
comment_end_string = '}',
line_statement_prefix = '%%',
line_comment_prefix = '%#',
trim_blocks = True,
autoescape = False,
loader = jinja2.FileSystemLoader(os.path.dirname(os.path.abspath(sys.argv[0])))
)
#pathname = os.path.dirname(sys.argv[0])
#template_path = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), 'template.tex')
#template = latex_jinja_env.get_template(template_path)
template = latex_jinja_env.get_template('template.tex')
print("output: " + file_name + '.tex')
fout = open(file_name + '.tex','w')
fout.write(template.render(
name = name,
hours = hours,
minutes = ('%02d' % minutes),
data1 = all_data[0],
data2 = all_data[2],
data3 = all_data[3],
data4 = all_data[4],
data5 = all_data[1],
data6 = all_data[5]))
fout.close()
os.system('rubber -d %s.tex' % file_name)
os.system('rubber --clean ' + file_name)
os.system('rm %s.tex' % file_name)
print('written to ' + file_name + '.pdf')
if __name__ == '__main__':
main()
|
the-stack_0_19724 | import re
from collections import defaultdict
from heapq import heappop, heappush
with open('23_input.txt', 'r') as f:
lines = [line for line in f]
# lines = [
# "pos=<10,12,12>, r=2",
# "pos=<12,14,12>, r=2",
# "pos=<16,12,12>, r=4",
# "pos=<14,14,14>, r=6",
# "pos=<50,50,50>, r=200",
# "pos=<10,10,10>, r=5",
# ]
def get_data():
return [tuple(map(int, re.findall('-?\d+', line))) for line in lines]
def dist(bot1, bot2=(0, 0, 0)):
return abs(bot1[0] - bot2[0]) + abs(bot1[1] - bot2[1]) + abs(bot1[2] - bot2[2])
bots = get_data()
"""
SHOULD WE TILE BY OCTOHEDRONS INSTEAD OF BOXES?
then the counting of intersections will be trivial
but they are not space filling...
=> consider overlapping octahedrons with centres on a cubic grid with grid length d
and octahedron size determined by Manhatten distance d. In a cube with side length d
the point the farthest aways from all 8 vertices is the centre and is
sqrt(3 * (d/2)^2) = d*sqrt(3/8) < d away
"""
"""
start with one massive box that contains all bot centers, which has a side lengh d = 2^n
generate smaller boxes with side length d / 2 and compute the number of bots that intersect it
put all candidate boxes on a heap with the following sorting:
(
n_intersections (bigger first),
box_size (smaller first),
box_coords
)
[if we put box_size first then we'll just consider pixel by pixel and not discard bix boxes]
max_intersections <- inf
candidates <- []
while q not empty:
pop a box
if n_intersections < max_intersections:
continue
if box_size == 1:
if n_intersections > max_intersections:
candidates = [box]
elif n_intersections == max_intersections:
candidates += [box]
else:
subdivide box d -> d/2
for each new box compute n_intersections
put each box into heap
from max_intersections choose point closest to origin
"""
r = 1
while not all(dist(b) <= r for b in bots):
r *= 2
print(sum((dist(bot) - bot[-1]) / r <= 1 for bot in bots))
max_intersections = 0
candidates = []
q = [(-len(bots), r, (0, 0, 0))]
while q:
n, r, (x, y, z) = heappop(q)
print(len(q), n, r, x, y, z, max_intersections)
if n > max_intersections:
continue
if r == 1:
if n < max_intersections:
candidates = [(x, y, z)]
max_intersections = n
elif n == max_intersections:
candidates += [(x, y, z)]
max_intersections = n
else:
r //= 2
for nx in (x - r, x, x + r):
for ny in (y - r, y, y + r):
for nz in (z - r, z, z + r):
nn = 0
for bot in bots:
d = dist((nx, ny, nz), bot)
if (d - bot[-1]) // r <= 0:
nn += 1
heappush(q, (-nn, r, (nx, ny, nz)))
# break
# while q:
# n, r, (x, y, z) = heappop(q)
# print(len(q), ":", n, r, x, y, z)
# for bot in bots:
# d = dist((x, y, z), bot)
# if (d - bot[-1]) / r <= 1:
# print(d / r, bot[-1] / r, (d - bot[-1]) / r)
print(candidates)
# candidates = [(22698921, 59279593, 11772354), (22698922, 59279593, 11772355), (22698921, 59279594, 11772355)]
for c in candidates:
print(dist(c))
print(sum(dist(bot, c) <= bot[-1] for bot in bots))
|
the-stack_0_19726 | import sys
import pytest
import numpy as np
import marshal
from keras.utils.generic_utils import custom_object_scope
from keras.utils.generic_utils import has_arg
from keras.utils.generic_utils import Progbar
from keras.utils.generic_utils import func_dump
from keras.utils.generic_utils import func_load
from keras import activations
from keras import regularizers
def test_progbar():
values_s = [None,
[['key1', 1], ['key2', 1e-4]],
[['key3', 1], ['key2', 1e-4]]]
for target in (len(values_s) - 1, None):
for verbose in (0, 1, 2):
bar = Progbar(target, width=30, verbose=verbose, interval=0.05)
for current, values in enumerate(values_s):
bar.update(current, values=values)
def test_custom_objects_scope():
def custom_fn():
pass
class CustomClass(object):
pass
with custom_object_scope({'CustomClass': CustomClass,
'custom_fn': custom_fn}):
act = activations.get('custom_fn')
assert act == custom_fn
cl = regularizers.get('CustomClass')
assert cl.__class__ == CustomClass
@pytest.mark.parametrize('fn, name, accept_all, expected', [
('f(x)', 'x', False, True),
('f(x)', 'y', False, False),
('f(x)', 'y', True, False),
('f(x, y)', 'y', False, True),
('f(x, y=1)', 'y', False, True),
('f(x, **kwargs)', 'x', False, True),
('f(x, **kwargs)', 'y', False, False),
('f(x, **kwargs)', 'y', True, True),
('f(x, y=1, **kwargs)', 'y', False, True),
# Keyword-only arguments (Python 3 only)
('f(x, *args, y=1)', 'y', False, True),
('f(x, *args, y=1)', 'z', True, False),
('f(x, *, y=1)', 'x', False, True),
('f(x, *, y=1)', 'y', False, True),
# lambda
(lambda x: x, 'x', False, True),
(lambda x: x, 'y', False, False),
(lambda x: x, 'y', True, False),
])
def test_has_arg(fn, name, accept_all, expected):
if isinstance(fn, str):
context = dict()
try:
exec('def {}: pass'.format(fn), context)
except SyntaxError:
if sys.version_info >= (3,):
raise
pytest.skip('Function is not compatible with Python 2')
# Sometimes exec adds builtins to the context
context.pop('__builtins__', None)
fn, = context.values()
assert has_arg(fn, name, accept_all) is expected
@pytest.mark.xfail(sys.version_info < (3, 3),
reason='inspect API does not reveal positional-only arguments')
def test_has_arg_positional_only():
assert has_arg(pow, 'x') is False
@pytest.mark.parametrize(
'test_function_type',
('simple function', 'closured function'))
def test_func_dump_and_load(test_function_type):
if test_function_type == 'simple function':
def test_func():
return r'\u'
elif test_function_type == 'closured function':
def get_test_func():
x = r'\u'
def test_func():
return x
return test_func
test_func = get_test_func()
else:
raise Exception('Unknown test case for test_func_dump_and_load')
serialized = func_dump(test_func)
deserialized = func_load(serialized)
assert deserialized.__code__ == test_func.__code__
assert deserialized.__defaults__ == test_func.__defaults__
assert deserialized.__closure__ == test_func.__closure__
def test_func_dump_and_load_closure():
y = 0
test_func = lambda x: x + y
serialized, _, closure = func_dump(test_func)
deserialized = func_load(serialized, closure=closure)
assert deserialized.__code__ == test_func.__code__
assert deserialized.__defaults__ == test_func.__defaults__
assert deserialized.__closure__ == test_func.__closure__
@pytest.mark.parametrize(
'test_func', [activations.softmax, np.argmax, lambda x: x**2, lambda x: x])
def test_func_dump_and_load_backwards_compat(test_func):
# this test ensures that models serialized prior to version 2.1.2 can still be
# deserialized
# see:
# https://github.com/evhub/keras/blob/2.1.1/keras/utils/generic_utils.py#L166
serialized = marshal.dumps(test_func.__code__).decode('raw_unicode_escape')
deserialized = func_load(serialized, defaults=test_func.__defaults__)
assert deserialized.__code__ == test_func.__code__
assert deserialized.__defaults__ == test_func.__defaults__
assert deserialized.__closure__ == test_func.__closure__
if __name__ == '__main__':
pytest.main([__file__])
|
the-stack_0_19727 | import cv2
import numpy as np
from matplotlib import pyplot as plt
import urllib
def read_from_url(url):
"""
This function is used to read an image from a given URL.
Args:
url: Link to the image
Returns:
image: a copy of the image from the URL
"""
url_response = urllib.request.urlopen(url)
image_array = np.array(bytearray(url_response.read()), dtype=np.uint8)
image = cv2.imdecode(image_array, -1)
return image
def plot_histogram(image):
"""
This function is used to plot the histogram for any given image.
Args:
image: input image for plotting histogram
Returns:
displays a histogram for the given input
"""
hist, bins = np.histogram(image.flatten(),256,[0,256])
cdf = hist.cumsum()
cdf_normalized = cdf * hist.max()/ cdf.max()
plt.plot(cdf_normalized, color = 'b')
plt.hist(image.flatten(),256,[0,256], color = 'r')
plt.xlim([0,256])
plt.legend(('cdf','histogram'), loc = 'upper left')
plt.show()
|
the-stack_0_19730 | import copy
from typing import Any, Iterator, List, Union
import numpy as np
import torch
from detectron2.layers.roi_align import ROIAlign
from torchvision.ops import RoIPool
class MyMaps(object):
"""# NOTE: This class stores the maps (NOCS, coordinates map, pvnet vector
maps, offset maps, heatmaps) for all objects in one image, support cpu_only
option.
Attributes:
tensor: bool Tensor of N,C,H,W, representing N instances in the image.
"""
def __init__(self, tensor: Union[torch.Tensor, np.ndarray], cpu_only: bool = True):
"""
Args:
tensor: float Tensor of N,C,H,W, representing N instances in the image.
cpu_only: keep the maps on cpu even when to(device) is called
"""
device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu")
tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)
assert tensor.dim() == 4, tensor.size()
self.image_size = tensor.shape[-2:]
self.tensor = tensor
self.cpu_only = cpu_only
def to(self, device: str, **kwargs) -> "MyMaps":
if not self.cpu_only:
return MyMaps(self.tensor.to(device, **kwargs), cpu_only=False)
else:
return MyMaps(self.tensor.to("cpu", **kwargs), cpu_only=True)
def to_device(self, device: str = "cuda", **kwargs) -> "MyMaps":
# force to device
return MyMaps(self.tensor.to(device, **kwargs), cpu_only=False)
def crop_and_resize(
self,
boxes: torch.Tensor,
map_size: int,
interpolation: str = "bilinear",
) -> torch.Tensor:
"""# NOTE: if self.cpu_only, convert boxes to cpu
Crop each map by the given box, and resize results to (map_size, map_size).
This can be used to prepare training targets.
Args:
boxes (Tensor): Nx4 tensor storing the boxes for each map
map_size (int): the size of the rasterized map.
interpolation (str): bilinear | nearest
Returns:
Tensor:
A bool tensor of shape (N, C, map_size, map_size), where
N is the number of predicted boxes for this image.
"""
assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self))
if self.cpu_only:
device = "cpu"
else:
device = self.tensor.device
batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]
rois = torch.cat([batch_inds, boxes.to(device)], dim=1) # Nx5
maps = self.tensor.to(dtype=torch.float32)
rois = rois.to(device=device)
# on cpu, speed compared to cv2?
if interpolation == "nearest":
op = RoIPool((map_size, map_size), 1.0)
elif interpolation == "bilinear":
op = ROIAlign((map_size, map_size), 1.0, 0, aligned=True)
else:
raise ValueError(f"Unknown interpolation type: {interpolation}")
output = op.forward(maps, rois)
return output
def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "MyMaps":
"""
Returns:
MyMaps: Create a new :class:`MyMaps` by indexing.
The following usage are allowed:
1. `new_maps = maps[3]`: return a `MyMaps` which contains only one map.
2. `new_maps = maps[2:10]`: return a slice of maps.
3. `new_maps = maps[vector]`, where vector is a torch.BoolTensor
with `length = len(maps)`. Nonzero elements in the vector will be selected.
Note that the returned object might share storage with this object,
subject to Pytorch's indexing semantics.
"""
if isinstance(item, int):
return MyMaps(self.tensor[item].view(1, -1))
m = self.tensor[item]
assert m.dim() == 4, "Indexing on MyMaps with {} returns a tensor with shape {}!".format(item, m.shape)
return MyMaps(m)
def __iter__(self) -> torch.Tensor:
yield from self.tensor
def __repr__(self) -> str:
s = self.__class__.__name__ + "("
s += "num_instances={})".format(len(self.tensor))
return s
def __len__(self) -> int:
return self.tensor.shape[0]
def nonempty(self) -> torch.Tensor:
"""Find maps that are non-empty.
Returns:
Tensor: a BoolTensor which represents
whether each map is empty (False) or non-empty (True).
"""
return self.tensor.flatten(1).any(dim=1)
|
the-stack_0_19731 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
# pylint: disable=g-classes-have-attributes
"""Callbacks: utilities called at certain points during model training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import collections
import copy
import csv
import io
import json
import os
import re
import time
import numpy as np
import six
from keras import backend as K
from keras.distribute import distributed_file_utils
from keras.distribute import worker_training_state
from keras.optimizer_v2 import learning_rate_schedule
from keras.utils import generic_utils
from keras.utils import tf_utils
from keras.utils import version_utils
from keras.utils.data_utils import Sequence
from keras.utils.generic_utils import Progbar
from keras.utils.io_utils import path_to_string
from keras.utils.mode_keys import ModeKeys
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
try:
import requests
except ImportError:
requests = None
# Note: `configure_callbacks` is only used in TF1.
def configure_callbacks(callbacks,
model,
do_validation=False,
batch_size=None,
epochs=None,
steps_per_epoch=None,
samples=None,
verbose=1,
count_mode='steps',
mode=ModeKeys.TRAIN):
"""Configures callbacks for use in various training loops.
Args:
callbacks: List of Callbacks.
model: Model being trained.
do_validation: Whether or not validation loop will be run.
batch_size: Number of samples per batch.
epochs: Number of epoch to train.
steps_per_epoch: Number of batches to run per training epoch.
samples: Number of training samples.
verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger.
count_mode: One of 'steps' or 'samples'. Per-batch or per-sample count.
mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.
Which loop mode to configure callbacks for.
Returns:
Instance of CallbackList used to control all Callbacks.
"""
# Check if callbacks have already been configured.
if isinstance(callbacks, CallbackList):
return callbacks
if not callbacks:
callbacks = []
# Add additional callbacks during training.
if mode == ModeKeys.TRAIN:
model.history = History()
callbacks = [BaseLogger()] + (callbacks or []) + [model.history]
if verbose:
callbacks.append(ProgbarLogger(count_mode))
callback_list = CallbackList(callbacks)
# Set callback model
callback_model = model._get_callback_model() # pylint: disable=protected-access
callback_list.set_model(callback_model)
set_callback_parameters(
callback_list,
model,
do_validation=do_validation,
batch_size=batch_size,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
samples=samples,
verbose=verbose,
mode=mode)
callback_list.model.stop_training = False
return callback_list
def set_callback_parameters(callback_list,
model,
do_validation=False,
batch_size=None,
epochs=None,
steps_per_epoch=None,
samples=None,
verbose=1,
mode=ModeKeys.TRAIN):
"""Sets callback parameters.
Args:
callback_list: CallbackList instance.
model: Model being trained.
do_validation: Whether or not validation loop will be run.
batch_size: Number of samples per batch.
epochs: Number of epoch to train.
steps_per_epoch: Number of batches to run per training epoch.
samples: Number of training samples.
verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger.
mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.
Which loop mode to configure callbacks for.
"""
metric_names = model.metrics_names
for cbk in callback_list:
if isinstance(cbk, (BaseLogger, ProgbarLogger)):
cbk.stateful_metrics = metric_names[1:] # Exclude `loss`
# Set callback parameters
callback_metrics = []
# When we have deferred build scenario with iterator input, we will compile
# when we standardize first batch of data.
if mode != ModeKeys.PREDICT:
callback_metrics = copy.copy(metric_names)
if do_validation:
callback_metrics += ['val_' + n for n in metric_names]
callback_params = {
'batch_size': batch_size,
'epochs': epochs,
'steps': steps_per_epoch,
'samples': samples,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
}
callback_list.set_params(callback_params)
def _is_generator_like(data):
"""Checks if data is a generator, Sequence, or Iterator."""
return (hasattr(data, '__next__') or hasattr(data, 'next') or isinstance(
data, (Sequence, tf.compat.v1.data.Iterator, tf.data.Iterator)))
def make_logs(model, logs, outputs, mode, prefix=''):
"""Computes logs for sending to `on_batch_end` methods."""
metric_names = model.metrics_names
if mode in {ModeKeys.TRAIN, ModeKeys.TEST} and metric_names:
for label, output in zip(metric_names, outputs):
logs[prefix + label] = output
else:
logs['outputs'] = outputs
return logs
@keras_export('keras.callbacks.CallbackList')
class CallbackList(object):
"""Container abstracting a list of callbacks."""
def __init__(self,
callbacks=None,
add_history=False,
add_progbar=False,
model=None,
**params):
"""Container for `Callback` instances.
This object wraps a list of `Callback` instances, making it possible
to call them all at once via a single endpoint
(e.g. `callback_list.on_epoch_end(...)`).
Args:
callbacks: List of `Callback` instances.
add_history: Whether a `History` callback should be added, if one does not
already exist in the `callbacks` list.
add_progbar: Whether a `ProgbarLogger` callback should be added, if one
does not already exist in the `callbacks` list.
model: The `Model` these callbacks are used with.
**params: If provided, parameters will be passed to each `Callback` via
`Callback.set_params`.
"""
self.callbacks = tf.nest.flatten(callbacks) if callbacks else []
self._add_default_callbacks(add_history, add_progbar)
if model:
self.set_model(model)
if params:
self.set_params(params)
# Performance optimization: determines if batch hooks need to be called.
# pylint: disable=protected-access
self._should_call_train_batch_hooks = any(
cb._implements_train_batch_hooks() for cb in self.callbacks)
self._should_call_test_batch_hooks = any(
cb._implements_test_batch_hooks() for cb in self.callbacks)
self._should_call_predict_batch_hooks = any(
cb._implements_predict_batch_hooks() for cb in self.callbacks)
# pylint: enable=protected-access
# Performance check: Check batch hooks for slowness compared to batch time.
# Only run check for custom callbacks (i.e. not present in this file).
self._check_timing = any([cbk.__class__.__name__ not in globals()
for cbk in self.callbacks])
self._num_batches_for_timing_check = 5
self._hook_times = {}
self._batch_start_time = None
self._batch_times = []
def _add_default_callbacks(self, add_history, add_progbar):
"""Adds `Callback`s that are always present."""
self._progbar = None
self._history = None
for cb in self.callbacks:
if isinstance(cb, ProgbarLogger):
self._progbar = cb
elif isinstance(cb, History):
self._history = cb
if self._progbar is None and add_progbar:
self._progbar = ProgbarLogger(count_mode='steps')
self.callbacks.insert(0, self._progbar)
if self._history is None and add_history:
self._history = History()
self.callbacks.append(self._history)
def append(self, callback):
self.callbacks.append(callback)
def set_params(self, params):
self.params = params
for callback in self.callbacks:
callback.set_params(params)
def set_model(self, model):
self.model = model
if self._history:
model.history = self._history
for callback in self.callbacks:
callback.set_model(model)
def _call_batch_hook(self, mode, hook, batch, logs=None):
"""Helper function for all batch_{begin | end} methods."""
if not self.callbacks:
return
if hook == 'begin':
self._call_batch_begin_hook(mode, batch, logs)
elif hook == 'end':
self._call_batch_end_hook(mode, batch, logs)
else:
raise ValueError('Unrecognized hook: {}'.format(hook))
def _call_batch_begin_hook(self, mode, batch, logs):
"""Helper function for `on_*_batch_begin` methods."""
hook_name = 'on_{mode}_batch_begin'.format(mode=mode)
self._call_batch_hook_helper(hook_name, batch, logs)
if self._check_timing:
self._batch_start_time = time.time()
def _call_batch_end_hook(self, mode, batch, logs):
"""Helper function for `on_*_batch_end` methods."""
hook_name = 'on_{mode}_batch_end'.format(mode=mode)
if self._check_timing and batch >= 1:
batch_time = time.time() - self._batch_start_time
self._batch_times.append(batch_time)
self._call_batch_hook_helper(hook_name, batch, logs)
if len(self._batch_times) >= self._num_batches_for_timing_check:
end_hook_name = hook_name
begin_hook_name = 'on_{mode}_batch_begin'.format(mode=mode)
avg_batch_time = sum(self._batch_times) / len(self._batch_times)
avg_end_hook_time = sum(self._hook_times[end_hook_name]) / len(
self._hook_times[end_hook_name])
avg_begin_hook_time = sum(self._hook_times[begin_hook_name]) / len(
self._hook_times[begin_hook_name])
threshold_time = 1.0 * avg_batch_time
warning_msg = ('Callback method `{hook}` is slow compared to '
'the batch time (batch time: {batch_time:.4f}s vs '
'`{hook}` time: {hook_time:.4f}s). Check your callbacks.')
if avg_begin_hook_time > threshold_time:
logging.warning(warning_msg.format(
hook=begin_hook_name,
batch_time=avg_batch_time,
hook_time=avg_begin_hook_time))
if avg_end_hook_time > threshold_time:
logging.warning(warning_msg.format(
hook=end_hook_name,
batch_time=avg_batch_time,
hook_time=avg_end_hook_time))
self._check_timing = False
self._batch_start_time = None
self._batch_times = []
self._hook_times = {}
def _call_batch_hook_helper(self, hook_name, batch, logs):
"""Helper function for `on_*_batch_*` methods."""
logs = logs or {}
numpy_logs = None
if self._check_timing:
start_time = time.time()
for callback in self.callbacks:
hook = getattr(callback, hook_name)
if getattr(callback, '_supports_tf_logs', False):
hook(batch, logs)
else:
if numpy_logs is None: # Only convert once.
numpy_logs = tf_utils.to_numpy_or_python_type(logs)
hook(batch, numpy_logs)
if self._check_timing:
if hook_name not in self._hook_times:
self._hook_times[hook_name] = []
self._hook_times[hook_name].append(time.time() - start_time)
def _call_begin_hook(self, mode):
"""Helper function for on_{train|test|predict}_begin methods."""
if mode == ModeKeys.TRAIN:
self.on_train_begin()
elif mode == ModeKeys.TEST:
self.on_test_begin()
else:
self.on_predict_begin()
def _call_end_hook(self, mode):
"""Helper function for on_{train|test|predict}_end methods."""
if mode == ModeKeys.TRAIN:
self.on_train_end()
elif mode == ModeKeys.TEST:
self.on_test_end()
else:
self.on_predict_end()
def on_batch_begin(self, batch, logs=None):
if self._should_call_train_batch_hooks:
self._call_batch_hook(ModeKeys.TRAIN, 'begin', batch, logs=logs)
def on_batch_end(self, batch, logs=None):
if self._should_call_train_batch_hooks:
self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs)
def on_epoch_begin(self, epoch, logs=None):
"""Calls the `on_epoch_begin` methods of its callbacks.
This function should only be called during TRAIN mode.
Args:
epoch: Integer, index of epoch.
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
logs = logs or {}
numpy_logs = None
for callback in self.callbacks:
if getattr(callback, '_supports_tf_logs', False):
callback.on_epoch_begin(epoch, logs)
else:
if numpy_logs is None: # Only convert once.
numpy_logs = tf_utils.to_numpy_or_python_type(logs)
callback.on_epoch_begin(epoch, numpy_logs)
def on_epoch_end(self, epoch, logs=None):
"""Calls the `on_epoch_end` methods of its callbacks.
This function should only be called during TRAIN mode.
Args:
epoch: Integer, index of epoch.
logs: Dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result keys
are prefixed with `val_`.
"""
logs = logs or {}
numpy_logs = None
for callback in self.callbacks:
if getattr(callback, '_supports_tf_logs', False):
callback.on_epoch_end(epoch, logs)
else:
if numpy_logs is None: # Only convert once.
numpy_logs = tf_utils.to_numpy_or_python_type(logs)
callback.on_epoch_end(epoch, numpy_logs)
def on_train_batch_begin(self, batch, logs=None):
"""Calls the `on_train_batch_begin` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.train_step`. Typically,
the values of the `Model`'s metrics are returned. Example:
`{'loss': 0.2, 'accuracy': 0.7}`.
"""
if self._should_call_train_batch_hooks:
self._call_batch_hook(ModeKeys.TRAIN, 'begin', batch, logs=logs)
def on_train_batch_end(self, batch, logs=None):
"""Calls the `on_train_batch_end` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
if self._should_call_train_batch_hooks:
self._call_batch_hook(ModeKeys.TRAIN, 'end', batch, logs=logs)
def on_test_batch_begin(self, batch, logs=None):
"""Calls the `on_test_batch_begin` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.test_step`. Typically,
the values of the `Model`'s metrics are returned. Example:
`{'loss': 0.2, 'accuracy': 0.7}`.
"""
if self._should_call_test_batch_hooks:
self._call_batch_hook(ModeKeys.TEST, 'begin', batch, logs=logs)
def on_test_batch_end(self, batch, logs=None):
"""Calls the `on_test_batch_end` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
if self._should_call_test_batch_hooks:
self._call_batch_hook(ModeKeys.TEST, 'end', batch, logs=logs)
def on_predict_batch_begin(self, batch, logs=None):
"""Calls the `on_predict_batch_begin` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.predict_step`,
it typically returns a dict with a key 'outputs' containing
the model's outputs.
"""
if self._should_call_predict_batch_hooks:
self._call_batch_hook(ModeKeys.PREDICT, 'begin', batch, logs=logs)
def on_predict_batch_end(self, batch, logs=None):
"""Calls the `on_predict_batch_end` methods of its callbacks.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
if self._should_call_predict_batch_hooks:
self._call_batch_hook(ModeKeys.PREDICT, 'end', batch, logs=logs)
def on_train_begin(self, logs=None):
"""Calls the `on_train_begin` methods of its callbacks.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
logs = logs or {}
numpy_logs = None
for callback in self.callbacks:
if getattr(callback, '_supports_tf_logs', False):
callback.on_train_begin(logs)
else:
if numpy_logs is None: # Only convert once.
numpy_logs = tf_utils.to_numpy_or_python_type(logs)
callback.on_train_begin(numpy_logs)
def on_train_end(self, logs=None):
"""Calls the `on_train_end` methods of its callbacks.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
logs = logs or {}
numpy_logs = None
for callback in self.callbacks:
if getattr(callback, '_supports_tf_logs', False):
callback.on_train_end(logs)
else:
if numpy_logs is None: # Only convert once.
numpy_logs = tf_utils.to_numpy_or_python_type(logs)
callback.on_train_end(numpy_logs)
def on_test_begin(self, logs=None):
"""Calls the `on_test_begin` methods of its callbacks.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
logs = logs or {}
numpy_logs = None
for callback in self.callbacks:
if getattr(callback, '_supports_tf_logs', False):
callback.on_test_begin(logs)
else:
if numpy_logs is None: # Only convert once.
numpy_logs = tf_utils.to_numpy_or_python_type(logs)
callback.on_test_begin(numpy_logs)
def on_test_end(self, logs=None):
"""Calls the `on_test_end` methods of its callbacks.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
logs = logs or {}
numpy_logs = None
for callback in self.callbacks:
if getattr(callback, '_supports_tf_logs', False):
callback.on_test_end(logs)
else:
if numpy_logs is None: # Only convert once.
numpy_logs = tf_utils.to_numpy_or_python_type(logs)
callback.on_test_end(numpy_logs)
def on_predict_begin(self, logs=None):
"""Calls the 'on_predict_begin` methods of its callbacks.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
logs = logs or {}
numpy_logs = None
for callback in self.callbacks:
if getattr(callback, '_supports_tf_logs', False):
callback.on_predict_begin(logs)
else:
if numpy_logs is None: # Only convert once.
numpy_logs = tf_utils.to_numpy_or_python_type(logs)
callback.on_predict_begin(numpy_logs)
def on_predict_end(self, logs=None):
"""Calls the `on_predict_end` methods of its callbacks.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
logs = logs or {}
numpy_logs = None
for callback in self.callbacks:
if getattr(callback, '_supports_tf_logs', False):
callback.on_predict_end(logs)
else:
if numpy_logs is None: # Only convert once.
numpy_logs = tf_utils.to_numpy_or_python_type(logs)
callback.on_predict_end(numpy_logs)
def __iter__(self):
return iter(self.callbacks)
@keras_export('keras.callbacks.Callback')
class Callback(object):
"""Abstract base class used to build new callbacks.
Attributes:
params: Dict. Training parameters
(eg. verbosity, batch size, number of epochs...).
model: Instance of `keras.models.Model`.
Reference of the model being trained.
The `logs` dictionary that callback methods
take as argument will contain keys for quantities relevant to
the current batch or epoch (see method-specific docstrings).
"""
def __init__(self):
self.validation_data = None # pylint: disable=g-missing-from-attributes
self.model = None
# Whether this Callback should only run on the chief worker in a
# Multi-Worker setting.
# TODO(omalleyt): Make this attr public once solution is stable.
self._chief_worker_only = None
self._supports_tf_logs = False
def set_params(self, params):
self.params = params
def set_model(self, model):
self.model = model
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_batch_begin(self, batch, logs=None):
"""A backwards compatibility alias for `on_train_batch_begin`."""
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_batch_end(self, batch, logs=None):
"""A backwards compatibility alias for `on_train_batch_end`."""
@doc_controls.for_subclass_implementers
def on_epoch_begin(self, epoch, logs=None):
"""Called at the start of an epoch.
Subclasses should override for any actions to run. This function should only
be called during TRAIN mode.
Args:
epoch: Integer, index of epoch.
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
@doc_controls.for_subclass_implementers
def on_epoch_end(self, epoch, logs=None):
"""Called at the end of an epoch.
Subclasses should override for any actions to run. This function should only
be called during TRAIN mode.
Args:
epoch: Integer, index of epoch.
logs: Dict, metric results for this training epoch, and for the
validation epoch if validation is performed. Validation result keys
are prefixed with `val_`. For training epoch, the values of the
`Model`'s metrics are returned. Example : `{'loss': 0.2, 'acc': 0.7}`.
"""
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_train_batch_begin(self, batch, logs=None):
"""Called at the beginning of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every `N`
batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.train_step`. Typically,
the values of the `Model`'s metrics are returned. Example:
`{'loss': 0.2, 'accuracy': 0.7}`.
"""
# For backwards compatibility.
self.on_batch_begin(batch, logs=logs)
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_train_batch_end(self, batch, logs=None):
"""Called at the end of a training batch in `fit` methods.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every `N`
batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
# For backwards compatibility.
self.on_batch_end(batch, logs=logs)
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_test_batch_begin(self, batch, logs=None):
"""Called at the beginning of a batch in `evaluate` methods.
Also called at the beginning of a validation batch in the `fit`
methods, if validation data is provided.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every `N`
batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.test_step`. Typically,
the values of the `Model`'s metrics are returned. Example:
`{'loss': 0.2, 'accuracy': 0.7}`.
"""
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_test_batch_end(self, batch, logs=None):
"""Called at the end of a batch in `evaluate` methods.
Also called at the end of a validation batch in the `fit`
methods, if validation data is provided.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every `N`
batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_predict_batch_begin(self, batch, logs=None):
"""Called at the beginning of a batch in `predict` methods.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every `N`
batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict, contains the return value of `model.predict_step`,
it typically returns a dict with a key 'outputs' containing
the model's outputs.
"""
@doc_controls.for_subclass_implementers
@generic_utils.default
def on_predict_batch_end(self, batch, logs=None):
"""Called at the end of a batch in `predict` methods.
Subclasses should override for any actions to run.
Note that if the `steps_per_execution` argument to `compile` in
`tf.keras.Model` is set to `N`, this method will only be called every `N`
batches.
Args:
batch: Integer, index of batch within the current epoch.
logs: Dict. Aggregated metric results up until this batch.
"""
@doc_controls.for_subclass_implementers
def on_train_begin(self, logs=None):
"""Called at the beginning of training.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
@doc_controls.for_subclass_implementers
def on_train_end(self, logs=None):
"""Called at the end of training.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently the output of the last call to `on_epoch_end()`
is passed to this argument for this method but that may change in
the future.
"""
@doc_controls.for_subclass_implementers
def on_test_begin(self, logs=None):
"""Called at the beginning of evaluation or validation.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
@doc_controls.for_subclass_implementers
def on_test_end(self, logs=None):
"""Called at the end of evaluation or validation.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently the output of the last call to
`on_test_batch_end()` is passed to this argument for this method
but that may change in the future.
"""
@doc_controls.for_subclass_implementers
def on_predict_begin(self, logs=None):
"""Called at the beginning of prediction.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
@doc_controls.for_subclass_implementers
def on_predict_end(self, logs=None):
"""Called at the end of prediction.
Subclasses should override for any actions to run.
Args:
logs: Dict. Currently no data is passed to this argument for this method
but that may change in the future.
"""
def _implements_train_batch_hooks(self):
"""Determines if this Callback should be called for each train batch."""
return (not generic_utils.is_default(self.on_batch_begin) or
not generic_utils.is_default(self.on_batch_end) or
not generic_utils.is_default(self.on_train_batch_begin) or
not generic_utils.is_default(self.on_train_batch_end))
def _implements_test_batch_hooks(self):
"""Determines if this Callback should be called for each test batch."""
return (not generic_utils.is_default(self.on_test_batch_begin) or
not generic_utils.is_default(self.on_test_batch_end))
def _implements_predict_batch_hooks(self):
"""Determines if this Callback should be called for each predict batch."""
return (not generic_utils.is_default(self.on_predict_batch_begin) or
not generic_utils.is_default(self.on_predict_batch_end))
@keras_export('keras.callbacks.BaseLogger')
class BaseLogger(Callback):
"""Callback that accumulates epoch averages of metrics.
This callback is automatically applied to every Keras model.
Args:
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is in `on_epoch_end`.
All others will be averaged in `on_epoch_end`.
"""
def __init__(self, stateful_metrics=None):
super(BaseLogger, self).__init__()
self.stateful_metrics = set(stateful_metrics or [])
def on_epoch_begin(self, epoch, logs=None):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
# In case of distribution strategy we can potentially run multiple steps
# at the same time, we should account for that in the `seen` calculation.
num_steps = logs.get('num_steps', 1)
self.seen += batch_size * num_steps
for k, v in logs.items():
if k in self.stateful_metrics:
self.totals[k] = v
else:
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs=None):
if logs is not None:
for k in self.params['metrics']:
if k in self.totals:
# Make value available to next callbacks.
if k in self.stateful_metrics:
logs[k] = self.totals[k]
else:
logs[k] = self.totals[k] / self.seen
@keras_export('keras.callbacks.TerminateOnNaN')
class TerminateOnNaN(Callback):
"""Callback that terminates training when a NaN loss is encountered.
"""
def __init__(self):
super(TerminateOnNaN, self).__init__()
self._supports_tf_logs = True
def on_batch_end(self, batch, logs=None):
logs = logs or {}
loss = logs.get('loss')
if loss is not None:
loss = tf_utils.to_numpy_or_python_type(loss)
if np.isnan(loss) or np.isinf(loss):
print('Batch %d: Invalid loss, terminating training' % (batch))
self.model.stop_training = True
@keras_export('keras.callbacks.ProgbarLogger')
class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout.
Args:
count_mode: One of `"steps"` or `"samples"`.
Whether the progress bar should
count samples seen or steps (batches) seen.
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is.
All others will be averaged over time (e.g. loss, etc).
If not provided, defaults to the `Model`'s metrics.
Raises:
ValueError: In case of invalid `count_mode`.
"""
def __init__(self, count_mode='samples', stateful_metrics=None):
super(ProgbarLogger, self).__init__()
self._supports_tf_logs = True
if count_mode == 'samples':
self.use_steps = False
elif count_mode == 'steps':
self.use_steps = True
else:
raise ValueError('Unknown `count_mode`: ' + str(count_mode))
# Defaults to all Model's metrics except for loss.
self.stateful_metrics = set(stateful_metrics) if stateful_metrics else None
self.seen = 0
self.progbar = None
self.target = None
self.verbose = 1
self.epochs = 1
self._train_step, self._test_step, self._predict_step = None, None, None
self._call_batch_hooks = True
self._called_in_fit = False
def set_params(self, params):
self.verbose = params['verbose']
self.epochs = params['epochs']
if self.use_steps and 'steps' in params:
self.target = params['steps']
elif not self.use_steps and 'samples' in params:
self.target = params['samples']
else:
self.target = None # Will be inferred at the end of the first epoch.
self._call_batch_hooks = self.verbose == 1
if self.target is None:
try:
self._train_step = self.model._train_counter # pylint: disable=protected-access
self._test_step = self.model._test_counter # pylint: disable=protected-access
self._predict_step = self.model._predict_counter # pylint: disable=protected-access
except AttributeError:
self._call_batch_hooks = True
def on_train_begin(self, logs=None):
# When this logger is called inside `fit`, validation is silent.
self._called_in_fit = True
def on_test_begin(self, logs=None):
if not self._called_in_fit:
self._reset_progbar()
self._maybe_init_progbar()
def on_predict_begin(self, logs=None):
self._reset_progbar()
self._maybe_init_progbar()
def on_epoch_begin(self, epoch, logs=None):
self._reset_progbar()
self._maybe_init_progbar()
if self.verbose and self.epochs > 1:
print('Epoch %d/%d' % (epoch + 1, self.epochs))
def on_train_batch_end(self, batch, logs=None):
self._batch_update_progbar(batch, logs)
def on_test_batch_end(self, batch, logs=None):
if not self._called_in_fit:
self._batch_update_progbar(batch, logs)
def on_predict_batch_end(self, batch, logs=None):
# Don't pass prediction results.
self._batch_update_progbar(batch, None)
def on_epoch_end(self, epoch, logs=None):
self._finalize_progbar(logs, self._train_step)
def on_test_end(self, logs=None):
if not self._called_in_fit:
self._finalize_progbar(logs, self._test_step)
def on_predict_end(self, logs=None):
self._finalize_progbar(logs, self._predict_step)
def _reset_progbar(self):
self.seen = 0
self.progbar = None
def _maybe_init_progbar(self):
if self.stateful_metrics is None:
if self.model:
self.stateful_metrics = set(m.name for m in self.model.metrics)
else:
self.stateful_metrics = set()
if self.progbar is None:
self.progbar = Progbar(
target=self.target,
verbose=self.verbose,
stateful_metrics=self.stateful_metrics,
unit_name='step' if self.use_steps else 'sample')
def _implements_train_batch_hooks(self):
return self._call_batch_hooks
def _implements_test_batch_hooks(self):
return self._call_batch_hooks
def _implements_predict_batch_hooks(self):
return self._call_batch_hooks
def _batch_update_progbar(self, batch, logs=None):
"""Updates the progbar."""
logs = logs or {}
self._maybe_init_progbar()
if self.use_steps:
self.seen = batch + 1 # One-indexed.
else:
# v1 path only.
logs = copy.copy(logs)
batch_size = logs.pop('size', 0)
num_steps = logs.pop('num_steps', 1)
logs.pop('batch', None)
add_seen = num_steps * batch_size
self.seen += add_seen
if self.verbose == 1:
# Only block async when verbose = 1.
logs = tf_utils.to_numpy_or_python_type(logs)
self.progbar.update(self.seen, list(logs.items()), finalize=False)
def _finalize_progbar(self, logs, counter):
logs = tf_utils.to_numpy_or_python_type(logs or {})
if self.target is None:
if counter is not None:
counter = counter.numpy()
if not self.use_steps:
counter *= logs.get('size', 1)
self.target = counter or self.seen
self.progbar.target = self.target
self.progbar.update(self.target, list(logs.items()), finalize=True)
@keras_export('keras.callbacks.History')
class History(Callback):
"""Callback that records events into a `History` object.
This callback is automatically applied to
every Keras model. The `History` object
gets returned by the `fit` method of models.
"""
def __init__(self):
super(History, self).__init__()
self.history = {}
def on_train_begin(self, logs=None):
self.epoch = []
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
self.epoch.append(epoch)
for k, v in logs.items():
self.history.setdefault(k, []).append(v)
# Set the history attribute on the model after the epoch ends. This will
# make sure that the state which is set is the latest one.
self.model.history = self
@keras_export('keras.callbacks.ModelCheckpoint')
class ModelCheckpoint(Callback):
"""Callback to save the Keras model or model weights at some frequency.
`ModelCheckpoint` callback is used in conjunction with training using
`model.fit()` to save a model or weights (in a checkpoint file) at some
interval, so the model or weights can be loaded later to continue the training
from the state saved.
A few options this callback provides include:
- Whether to only keep the model that has achieved the "best performance" so
far, or whether to save the model at the end of every epoch regardless of
performance.
- Definition of 'best'; which quantity to monitor and whether it should be
maximized or minimized.
- The frequency it should save at. Currently, the callback supports saving at
the end of every epoch, or after a fixed number of training batches.
- Whether only weights are saved, or the whole model is saved.
Note: If you get `WARNING:tensorflow:Can save best model only with <name>
available, skipping` see the description of the `monitor` argument for
details on how to get this right.
Example:
```python
model.compile(loss=..., optimizer=...,
metrics=['accuracy'])
EPOCHS = 10
checkpoint_filepath = '/tmp/checkpoint'
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='val_accuracy',
mode='max',
save_best_only=True)
# Model weights are saved at the end of every epoch, if it's the best seen
# so far.
model.fit(epochs=EPOCHS, callbacks=[model_checkpoint_callback])
# The model weights (that are considered the best) are loaded into the model.
model.load_weights(checkpoint_filepath)
```
Args:
filepath: string or `PathLike`, path to save the model file. e.g.
filepath = os.path.join(working_dir, 'ckpt', file_name). `filepath`
can contain named formatting options, which will be filled the value of
`epoch` and keys in `logs` (passed in `on_epoch_end`). For example: if
`filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`, then the model
checkpoints will be saved with the epoch number and the validation loss
in the filename. The directory of the filepath should not be reused by
any other callbacks to avoid conflicts.
monitor: The metric name to monitor. Typically the metrics are set by the
`Model.compile` method. Note:
* Prefix the name with `"val_`" to monitor validation metrics.
* Use `"loss"` or "`val_loss`" to monitor the model's total loss.
* If you specify metrics as strings, like `"accuracy"`, pass the same
string (with or without the `"val_"` prefix).
* If you pass `metrics.Metric` objects, `monitor` should be set to
`metric.name`
* If you're not sure about the metric names you can check the contents
of the `history.history` dictionary returned by
`history = model.fit()`
* Multi-output models set additional prefixes on the metric names.
verbose: verbosity mode, 0 or 1.
save_best_only: if `save_best_only=True`, it only saves when the model
is considered the "best" and the latest best model according to the
quantity monitored will not be overwritten. If `filepath` doesn't
contain formatting options like `{epoch}` then `filepath` will be
overwritten by each new better model.
mode: one of {'auto', 'min', 'max'}. If `save_best_only=True`, the
decision to overwrite the current save file is made based on either
the maximization or the minimization of the monitored quantity.
For `val_acc`, this should be `max`, for `val_loss` this should be
`min`, etc. In `auto` mode, the direction is automatically inferred
from the name of the monitored quantity.
save_weights_only: if True, then only the model's weights will be saved
(`model.save_weights(filepath)`), else the full model is saved
(`model.save(filepath)`).
save_freq: `'epoch'` or integer. When using `'epoch'`, the callback saves
the model after each epoch. When using integer, the callback saves the
model at end of this many batches. If the `Model` is compiled with
`steps_per_execution=N`, then the saving criteria will be
checked every Nth batch. Note that if the saving isn't aligned to
epochs, the monitored metric may potentially be less reliable (it
could reflect as little as 1 batch, since the metrics get reset every
epoch). Defaults to `'epoch'`.
options: Optional `tf.train.CheckpointOptions` object if
`save_weights_only` is true or optional `tf.saved_model.SaveOptions`
object if `save_weights_only` is false.
**kwargs: Additional arguments for backwards compatibility. Possible key
is `period`.
"""
def __init__(self,
filepath,
monitor='val_loss',
verbose=0,
save_best_only=False,
save_weights_only=False,
mode='auto',
save_freq='epoch',
options=None,
**kwargs):
super(ModelCheckpoint, self).__init__()
self._supports_tf_logs = True
self.monitor = monitor
self.verbose = verbose
self.filepath = path_to_string(filepath)
self.save_best_only = save_best_only
self.save_weights_only = save_weights_only
self.save_freq = save_freq
self.epochs_since_last_save = 0
self._batches_seen_since_last_saving = 0
self._last_batch_seen = 0
if save_weights_only:
if options is None or isinstance(
options, tf.train.CheckpointOptions):
self._options = options or tf.train.CheckpointOptions()
else:
raise TypeError('If save_weights_only is True, then `options` must be'
'either None or a tf.train.CheckpointOptions')
else:
if options is None or isinstance(options, tf.saved_model.SaveOptions):
self._options = options or tf.saved_model.SaveOptions()
else:
raise TypeError('If save_weights_only is False, then `options` must be'
'either None or a tf.saved_model.SaveOptions')
# Deprecated field `load_weights_on_restart` is for loading the checkpoint
# file from `filepath` at the start of `model.fit()`
# TODO(rchao): Remove the arg during next breaking release.
if 'load_weights_on_restart' in kwargs:
self.load_weights_on_restart = kwargs['load_weights_on_restart']
logging.warning('`load_weights_on_restart` argument is deprecated. '
'Please use `model.load_weights()` for loading weights '
'before the start of `model.fit()`.')
else:
self.load_weights_on_restart = False
# Deprecated field `period` is for the number of epochs between which
# the model is saved.
if 'period' in kwargs:
self.period = kwargs['period']
logging.warning('`period` argument is deprecated. Please use `save_freq` '
'to specify the frequency in number of batches seen.')
else:
self.period = 1
if mode not in ['auto', 'min', 'max']:
logging.warning('ModelCheckpoint mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
self.best = np.Inf
elif mode == 'max':
self.monitor_op = np.greater
self.best = -np.Inf
else:
if 'acc' in self.monitor or self.monitor.startswith('fmeasure'):
self.monitor_op = np.greater
self.best = -np.Inf
else:
self.monitor_op = np.less
self.best = np.Inf
if self.save_freq != 'epoch' and not isinstance(self.save_freq, int):
raise ValueError('Unrecognized save_freq: {}'.format(self.save_freq))
# Only the chief worker writes model checkpoints, but all workers
# restore checkpoint at on_train_begin().
self._chief_worker_only = False
def set_model(self, model):
self.model = model
# Use name matching rather than `isinstance` to avoid circular dependencies.
if (not self.save_weights_only and
not model._is_graph_network and # pylint: disable=protected-access
model.__class__.__name__ != 'Sequential'):
self.save_weights_only = True
def on_train_begin(self, logs=None):
if self.load_weights_on_restart:
filepath_to_load = (
self._get_most_recently_modified_file_matching_pattern(self.filepath))
if (filepath_to_load is not None and
self._checkpoint_exists(filepath_to_load)):
try:
# `filepath` may contain placeholders such as `{epoch:02d}`, and
# thus it attempts to load the most recently modified file with file
# name matching the pattern.
self.model.load_weights(filepath_to_load)
except (IOError, ValueError) as e:
raise ValueError('Error loading file from {}. Reason: {}'.format(
filepath_to_load, e))
def _implements_train_batch_hooks(self):
# Only call batch hooks when saving on batch
return self.save_freq != 'epoch'
def on_train_batch_end(self, batch, logs=None):
if self._should_save_on_batch(batch):
self._save_model(epoch=self._current_epoch, logs=logs)
def on_epoch_begin(self, epoch, logs=None):
self._current_epoch = epoch
def on_epoch_end(self, epoch, logs=None):
self.epochs_since_last_save += 1
# pylint: disable=protected-access
if self.save_freq == 'epoch':
self._save_model(epoch=epoch, logs=logs)
def _should_save_on_batch(self, batch):
"""Handles batch-level saving logic, supports steps_per_execution."""
if self.save_freq == 'epoch':
return False
if batch <= self._last_batch_seen: # New epoch.
add_batches = batch + 1 # batches are zero-indexed.
else:
add_batches = batch - self._last_batch_seen
self._batches_seen_since_last_saving += add_batches
self._last_batch_seen = batch
if self._batches_seen_since_last_saving >= self.save_freq:
self._batches_seen_since_last_saving = 0
return True
return False
def _save_model(self, epoch, logs):
"""Saves the model.
Args:
epoch: the epoch this iteration is in.
logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.
"""
logs = logs or {}
if isinstance(self.save_freq,
int) or self.epochs_since_last_save >= self.period:
# Block only when saving interval is reached.
logs = tf_utils.to_numpy_or_python_type(logs)
self.epochs_since_last_save = 0
filepath = self._get_file_path(epoch, logs)
try:
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
logging.warning('Can save best model only with %s available, '
'skipping.', self.monitor)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('\nEpoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s' % (epoch + 1, self.monitor,
self.best, current, filepath))
self.best = current
if self.save_weights_only:
self.model.save_weights(
filepath, overwrite=True, options=self._options)
else:
self.model.save(filepath, overwrite=True, options=self._options)
else:
if self.verbose > 0:
print('\nEpoch %05d: %s did not improve from %0.5f' %
(epoch + 1, self.monitor, self.best))
else:
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s' % (epoch + 1, filepath))
if self.save_weights_only:
self.model.save_weights(
filepath, overwrite=True, options=self._options)
else:
self.model.save(filepath, overwrite=True, options=self._options)
self._maybe_remove_file()
except IOError as e:
# `e.errno` appears to be `None` so checking the content of `e.args[0]`.
if 'is a directory' in six.ensure_str(e.args[0]).lower():
raise IOError('Please specify a non-directory filepath for '
'ModelCheckpoint. Filepath used is an existing '
'directory: {}'.format(filepath))
# Re-throw the error for any other causes.
raise e
def _get_file_path(self, epoch, logs):
"""Returns the file path for checkpoint."""
# pylint: disable=protected-access
try:
# `filepath` may contain placeholders such as `{epoch:02d}` and
# `{mape:.2f}`. A mismatch between logged metrics and the path's
# placeholders can cause formatting to fail.
file_path = self.filepath.format(epoch=epoch + 1, **logs)
except KeyError as e:
raise KeyError('Failed to format this callback filepath: "{}". '
'Reason: {}'.format(self.filepath, e))
self._write_filepath = distributed_file_utils.write_filepath(
file_path, self.model.distribute_strategy)
return self._write_filepath
def _maybe_remove_file(self):
# Remove the checkpoint directory in multi-worker training where this worker
# should not checkpoint. It is a dummy directory previously saved for sync
# distributed training.
distributed_file_utils.remove_temp_dir_with_filepath(
self._write_filepath, self.model.distribute_strategy)
def _checkpoint_exists(self, filepath):
"""Returns whether the checkpoint `filepath` refers to exists."""
if filepath.endswith('.h5'):
return tf.io.gfile.exists(filepath)
tf_saved_model_exists = tf.io.gfile.exists(filepath)
tf_weights_only_checkpoint_exists = tf.io.gfile.exists(
filepath + '.index')
return tf_saved_model_exists or tf_weights_only_checkpoint_exists
def _get_most_recently_modified_file_matching_pattern(self, pattern):
"""Returns the most recently modified filepath matching pattern.
Pattern may contain python formatting placeholder. If
`tf.train.latest_checkpoint()` does not return None, use that; otherwise,
check for most recently modified one that matches the pattern.
In the rare case where there are more than one pattern-matching file having
the same modified time that is most recent among all, return the filepath
that is largest (by `>` operator, lexicographically using the numeric
equivalents). This provides a tie-breaker when multiple files are most
recent. Note that a larger `filepath` can sometimes indicate a later time of
modification (for instance, when epoch/batch is used as formatting option),
but not necessarily (when accuracy or loss is used). The tie-breaker is
put in the logic as best effort to return the most recent, and to avoid
undeterministic result.
Modified time of a file is obtained with `os.path.getmtime()`.
This utility function is best demonstrated via an example:
```python
file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
test_dir = self.get_temp_dir()
path_pattern = os.path.join(test_dir, file_pattern)
file_paths = [
os.path.join(test_dir, file_name) for file_name in
['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
]
for file_path in file_paths:
# Write something to each of the files
self.assertEqual(
_get_most_recently_modified_file_matching_pattern(path_pattern),
file_paths[-1])
```
Args:
pattern: The file pattern that may optionally contain python placeholder
such as `{epoch:02d}`.
Returns:
The most recently modified file's full filepath matching `pattern`. If
`pattern` does not contain any placeholder, this returns the filepath
that
exactly matches `pattern`. Returns `None` if no match is found.
"""
dir_name = os.path.dirname(pattern)
base_name = os.path.basename(pattern)
base_name_regex = '^' + re.sub(r'{.*}', r'.*', base_name) + '$'
# If tf.train.latest_checkpoint tells us there exists a latest checkpoint,
# use that as it is more robust than `os.path.getmtime()`.
latest_tf_checkpoint = tf.train.latest_checkpoint(dir_name)
if latest_tf_checkpoint is not None and re.match(
base_name_regex, os.path.basename(latest_tf_checkpoint)):
return latest_tf_checkpoint
latest_mod_time = 0
file_path_with_latest_mod_time = None
n_file_with_latest_mod_time = 0
file_path_with_largest_file_name = None
if tf.io.gfile.exists(dir_name):
for file_name in os.listdir(dir_name):
# Only consider if `file_name` matches the pattern.
if re.match(base_name_regex, file_name):
file_path = os.path.join(dir_name, file_name)
mod_time = os.path.getmtime(file_path)
if (file_path_with_largest_file_name is None or
file_path > file_path_with_largest_file_name):
file_path_with_largest_file_name = file_path
if mod_time > latest_mod_time:
latest_mod_time = mod_time
file_path_with_latest_mod_time = file_path
# In the case a file with later modified time is found, reset
# the counter for the number of files with latest modified time.
n_file_with_latest_mod_time = 1
elif mod_time == latest_mod_time:
# In the case a file has modified time tied with the most recent,
# increment the counter for the number of files with latest modified
# time by 1.
n_file_with_latest_mod_time += 1
if n_file_with_latest_mod_time == 1:
# Return the sole file that has most recent modified time.
return file_path_with_latest_mod_time
else:
# If there are more than one file having latest modified time, return
# the file path with the largest file name.
return file_path_with_largest_file_name
@keras_export('keras.callbacks.experimental.BackupAndRestore', v1=[])
class BackupAndRestore(Callback):
"""Callback to back up and restore the training state.
`BackupAndRestore` callback is intended to recover from interruptions that
happened in the middle of a model.fit execution by backing up the
training states in a temporary checkpoint file (based on TF CheckpointManager)
at the end of each epoch. If training restarted before completion, the
training state and model are restored to the most recently saved state at the
beginning of a new model.fit() run.
Note that user is responsible to bring jobs back up.
This callback is important for the backup and restore mechanism for fault
tolerance purpose. And the model to be restored from an previous checkpoint is
expected to be the same as the one used to back up. If user changes arguments
passed to compile or fit, the checkpoint saved for fault tolerance can become
invalid.
Note:
1. This callback is not compatible with disabling eager execution.
2. A checkpoint is saved at the end of each epoch, when restoring we'll redo
any partial work from an unfinished epoch in which the training got restarted
(so the work done before a interruption doesn't affect the final model state).
3. This works for both single worker and multi-worker mode, only
MirroredStrategy and MultiWorkerMirroredStrategy are supported for now.
Example:
>>> class InterruptingCallback(tf.keras.callbacks.Callback):
... def on_epoch_begin(self, epoch, logs=None):
... if epoch == 4:
... raise RuntimeError('Interrupting!')
>>> callback = tf.keras.callbacks.experimental.BackupAndRestore(
... backup_dir="/tmp/backup")
>>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
>>> model.compile(tf.keras.optimizers.SGD(), loss='mse')
>>> try:
... model.fit(np.arange(100).reshape(5, 20), np.zeros(5), epochs=10,
... batch_size=1, callbacks=[callback, InterruptingCallback()],
... verbose=0)
... except:
... pass
>>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5), epochs=10,
... batch_size=1, callbacks=[callback], verbose=0)
>>> # Only 6 more epochs are run, since first trainning got interrupted at
>>> # zero-indexed epoch 4, second training will continue from 4 to 9.
>>> len(history.history['loss'])
6
Args:
backup_dir: String, path to store the checkpoint.
e.g. backup_dir = os.path.join(working_dir, 'backup')
This is the directory in which the system stores temporary files to
recover the model from jobs terminated unexpectedly. The directory
cannot be reused elsewhere to store other files, e.g. by
BackupAndRestore callback of another training, or by another callback
(ModelCheckpoint) of the same training.
"""
def __init__(self, backup_dir):
super(BackupAndRestore, self).__init__()
self.backup_dir = backup_dir
self._supports_tf_logs = True
self._supported_strategies = (
tf.distribute.MirroredStrategy,
tf.distribute.MultiWorkerMirroredStrategy,
tf.distribute.experimental.TPUStrategy, tf.distribute.TPUStrategy)
if not tf.executing_eagerly():
if tf.inside_function():
raise ValueError('This Callback\'s method contains Python state and '
'should be called outside of `tf.function`s.')
else: # Legacy graph mode:
raise ValueError(
'BackupAndRestore only supports eager mode. In graph '
'mode, consider using ModelCheckpoint to manually save '
'and restore weights with `model.load_weights()` and by '
'providing `initial_epoch` in `model.fit()` for fault tolerance.')
# Only the chief worker writes model checkpoints, but all workers
# restore checkpoint at on_train_begin().
self._chief_worker_only = False
def set_model(self, model):
self.model = model
def on_train_begin(self, logs=None):
# TrainingState is used to manage the training state needed for
# failure-recovery of a worker in training.
# pylint: disable=protected-access
if self.model._distribution_strategy and not isinstance(
self.model.distribute_strategy, self._supported_strategies):
raise NotImplementedError(
'%s is not supported yet. '
'Currently BackupAndRestore callback only supports empty strategy, '
'MirroredStrategy, MultiWorkerMirroredStrategy and TPUStrategy.' %
type(self.model.distribute_strategy).__name__)
self.model._training_state = (
worker_training_state.WorkerTrainingState(self.model, self.backup_dir))
self._training_state = self.model._training_state
self._training_state.restore()
def on_train_end(self, logs=None):
# pylint: disable=protected-access
# On exit of training, delete the training state backup file that was saved
# for the purpose of worker recovery.
self._training_state.delete_backup()
# Clean up the training state.
del self._training_state
del self.model._training_state
def on_epoch_end(self, epoch, logs=None):
# Back up the model and current epoch for possible future recovery.
self._training_state.back_up(epoch)
@keras_export('keras.callbacks.EarlyStopping')
class EarlyStopping(Callback):
"""Stop training when a monitored metric has stopped improving.
Assuming the goal of a training is to minimize the loss. With this, the
metric to be monitored would be `'loss'`, and mode would be `'min'`. A
`model.fit()` training loop will check at end of every epoch whether
the loss is no longer decreasing, considering the `min_delta` and
`patience` if applicable. Once it's found no longer decreasing,
`model.stop_training` is marked True and the training terminates.
The quantity to be monitored needs to be available in `logs` dict.
To make it so, pass the loss or metrics at `model.compile()`.
Args:
monitor: Quantity to be monitored.
min_delta: Minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than min_delta, will count as no
improvement.
patience: Number of epochs with no improvement
after which training will be stopped.
verbose: verbosity mode.
mode: One of `{"auto", "min", "max"}`. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `"max"`
mode it will stop when the quantity
monitored has stopped increasing; in `"auto"`
mode, the direction is automatically inferred
from the name of the monitored quantity.
baseline: Baseline value for the monitored quantity.
Training will stop if the model doesn't show improvement over the
baseline.
restore_best_weights: Whether to restore model weights from
the epoch with the best value of the monitored quantity.
If False, the model weights obtained at the last step of
training are used.
Example:
>>> callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)
>>> # This callback will stop the training when there is no improvement in
>>> # the loss for three consecutive epochs.
>>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
>>> model.compile(tf.keras.optimizers.SGD(), loss='mse')
>>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
... epochs=10, batch_size=1, callbacks=[callback],
... verbose=0)
>>> len(history.history['loss']) # Only 4 epochs are run.
4
"""
def __init__(self,
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=False):
super(EarlyStopping, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.baseline = baseline
self.min_delta = abs(min_delta)
self.wait = 0
self.stopped_epoch = 0
self.restore_best_weights = restore_best_weights
self.best_weights = None
if mode not in ['auto', 'min', 'max']:
logging.warning('EarlyStopping mode %s is unknown, '
'fallback to auto mode.', mode)
mode = 'auto'
if mode == 'min':
self.monitor_op = np.less
elif mode == 'max':
self.monitor_op = np.greater
else:
if 'acc' in self.monitor:
self.monitor_op = np.greater
else:
self.monitor_op = np.less
if self.monitor_op == np.greater:
self.min_delta *= 1
else:
self.min_delta *= -1
def on_train_begin(self, logs=None):
# Allow instances to be re-used
self.wait = 0
self.stopped_epoch = 0
if self.baseline is not None:
self.best = self.baseline
else:
self.best = np.Inf if self.monitor_op == np.less else -np.Inf
self.best_weights = None
def on_epoch_end(self, epoch, logs=None):
current = self.get_monitor_value(logs)
if current is None:
return
if self.monitor_op(current - self.min_delta, self.best):
self.best = current
self.wait = 0
if self.restore_best_weights:
self.best_weights = self.model.get_weights()
else:
self.wait += 1
if self.wait >= self.patience:
self.stopped_epoch = epoch
self.model.stop_training = True
if self.restore_best_weights:
if self.verbose > 0:
print('Restoring model weights from the end of the best epoch.')
self.model.set_weights(self.best_weights)
def on_train_end(self, logs=None):
if self.stopped_epoch > 0 and self.verbose > 0:
print('Epoch %05d: early stopping' % (self.stopped_epoch + 1))
def get_monitor_value(self, logs):
logs = logs or {}
monitor_value = logs.get(self.monitor)
if monitor_value is None:
logging.warning('Early stopping conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
return monitor_value
@keras_export('keras.callbacks.RemoteMonitor')
class RemoteMonitor(Callback):
"""Callback used to stream events to a server.
Requires the `requests` library.
Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
HTTP POST, with a `data` argument which is a
JSON-encoded dictionary of event data.
If `send_as_json=True`, the content type of the request will be
`"application/json"`.
Otherwise the serialized JSON will be sent within a form.
Args:
root: String; root url of the target server.
path: String; path relative to `root` to which the events will be sent.
field: String; JSON field under which the data will be stored.
The field is used only if the payload is sent within a form
(i.e. send_as_json is set to False).
headers: Dictionary; optional custom HTTP headers.
send_as_json: Boolean; whether the request should be
sent as `"application/json"`.
"""
def __init__(self,
root='http://localhost:9000',
path='/publish/epoch/end/',
field='data',
headers=None,
send_as_json=False):
super(RemoteMonitor, self).__init__()
self.root = root
self.path = path
self.field = field
self.headers = headers
self.send_as_json = send_as_json
def on_epoch_end(self, epoch, logs=None):
if requests is None:
raise ImportError('RemoteMonitor requires the `requests` library.')
logs = logs or {}
send = {}
send['epoch'] = epoch
for k, v in logs.items():
# np.ndarray and np.generic are not scalar types
# therefore we must unwrap their scalar values and
# pass to the json-serializable dict 'send'
if isinstance(v, (np.ndarray, np.generic)):
send[k] = v.item()
else:
send[k] = v
try:
if self.send_as_json:
requests.post(self.root + self.path, json=send, headers=self.headers)
else:
requests.post(
self.root + self.path, {self.field: json.dumps(send)},
headers=self.headers)
except requests.exceptions.RequestException:
logging.warning('Warning: could not reach RemoteMonitor '
'root server at ' + str(self.root))
@keras_export('keras.callbacks.LearningRateScheduler')
class LearningRateScheduler(Callback):
"""Learning rate scheduler.
At the beginning of every epoch, this callback gets the updated learning rate
value from `schedule` function provided at `__init__`, with the current epoch
and current learning rate, and applies the updated learning rate
on the optimizer.
Args:
schedule: a function that takes an epoch index (integer, indexed from 0)
and current learning rate (float) as inputs and returns a new
learning rate as output (float).
verbose: int. 0: quiet, 1: update messages.
Example:
>>> # This function keeps the initial learning rate for the first ten epochs
>>> # and decreases it exponentially after that.
>>> def scheduler(epoch, lr):
... if epoch < 10:
... return lr
... else:
... return lr * tf.math.exp(-0.1)
>>>
>>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
>>> model.compile(tf.keras.optimizers.SGD(), loss='mse')
>>> round(model.optimizer.lr.numpy(), 5)
0.01
>>> callback = tf.keras.callbacks.LearningRateScheduler(scheduler)
>>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
... epochs=15, callbacks=[callback], verbose=0)
>>> round(model.optimizer.lr.numpy(), 5)
0.00607
"""
def __init__(self, schedule, verbose=0):
super(LearningRateScheduler, self).__init__()
self.schedule = schedule
self.verbose = verbose
def on_epoch_begin(self, epoch, logs=None):
if not hasattr(self.model.optimizer, 'lr'):
raise ValueError('Optimizer must have a "lr" attribute.')
try: # new API
lr = float(K.get_value(self.model.optimizer.lr))
lr = self.schedule(epoch, lr)
except TypeError: # Support for old API for backward compatibility
lr = self.schedule(epoch)
if not isinstance(lr, (tf.Tensor, float, np.float32, np.float64)):
raise ValueError('The output of the "schedule" function '
'should be float.')
if isinstance(lr, tf.Tensor) and not lr.dtype.is_floating:
raise ValueError('The dtype of Tensor should be float')
K.set_value(self.model.optimizer.lr, K.get_value(lr))
if self.verbose > 0:
print('\nEpoch %05d: LearningRateScheduler reducing learning '
'rate to %s.' % (epoch + 1, lr))
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
def keras_model_summary(name, data, step=None):
"""Writes a Keras model as JSON to as a Summary.
Writing the Keras model configuration allows the TensorBoard graph plugin to
render a conceptual graph, as opposed to graph of ops. In case the model fails
to serialize as JSON, it ignores and returns False.
Args:
name: A name for this summary. The summary tag used for TensorBoard will be
this name prefixed by any active name scopes.
data: A Keras Model to write.
step: Explicit `int64`-castable monotonic step value for this summary. If
omitted, this defaults to `tf.summary.experimental.get_step()`, which must
not be None.
Returns:
True on success, or False if no summary was written because no default
summary writer was available.
Raises:
ValueError: if a default writer exists, but no step was provided and
`tf.summary.experimental.get_step()` is None.
"""
summary_metadata = tf.compat.v1.SummaryMetadata()
# Hard coding a plugin name. Please refer to go/tb-plugin-name-hardcode for
# the rationale.
summary_metadata.plugin_data.plugin_name = 'graph_keras_model'
# version number = 1
summary_metadata.plugin_data.content = b'1'
try:
json_string = data.to_json()
except Exception as exc: # pylint: disable=broad-except
# An exception should not break a model code.
logging.warn('Model failed to serialize as JSON. Ignoring... %s', exc)
return False
with tf.summary.experimental.summary_scope(name, 'graph_keras_model',
[data, step]) as (tag, _):
with tf.compat.v1.device('cpu:0'):
tensor = tf.constant(json_string, dtype=tf.string)
return tf.summary.write(
tag=tag, tensor=tensor, step=step, metadata=summary_metadata)
@keras_export('keras.callbacks.TensorBoard', v1=[])
class TensorBoard(Callback, version_utils.TensorBoardVersionSelector):
# pylint: disable=line-too-long
"""Enable visualizations for TensorBoard.
TensorBoard is a visualization tool provided with TensorFlow.
This callback logs events for TensorBoard, including:
* Metrics summary plots
* Training graph visualization
* Activation histograms
* Sampled profiling
If you have installed TensorFlow with pip, you should be able
to launch TensorBoard from the command line:
```
tensorboard --logdir=path_to_your_logs
```
You can find more information about TensorBoard
[here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).
Args:
log_dir: the path of the directory where to save the log files to be
parsed by TensorBoard. e.g. log_dir = os.path.join(working_dir, 'logs')
This directory should not be reused by any other callbacks.
histogram_freq: frequency (in epochs) at which to compute activation and
weight histograms for the layers of the model. If set to 0, histograms
won't be computed. Validation data (or split) must be specified for
histogram visualizations.
write_graph: whether to visualize the graph in TensorBoard. The log file
can become quite large when write_graph is set to True.
write_images: whether to write model weights to visualize as image in
TensorBoard.
write_steps_per_second: whether to log the training steps per second into
Tensorboard. This supports both epoch and batch frequency logging.
update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
writes the losses and metrics to TensorBoard after each batch. The same
applies for `'epoch'`. If using an integer, let's say `1000`, the
callback will write the metrics and losses to TensorBoard every 1000
batches. Note that writing too frequently to TensorBoard can slow down
your training.
profile_batch: Profile the batch(es) to sample compute characteristics.
profile_batch must be a non-negative integer or a tuple of integers.
A pair of positive integers signify a range of batches to profile.
By default, it will profile the second batch. Set profile_batch=0
to disable profiling.
embeddings_freq: frequency (in epochs) at which embedding layers will be
visualized. If set to 0, embeddings won't be visualized.
embeddings_metadata: a dictionary which maps layer name to a file name in
which metadata for this embedding layer is saved. See the
[details](
https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional)
about metadata files format. In case if the same metadata file is
used for all embedding layers, string can be passed.
Examples:
Basic usage:
```python
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="./logs")
model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback])
# Then run the tensorboard command to view the visualizations.
```
Custom batch-level summaries in a subclassed Model:
```python
class MyModel(tf.keras.Model):
def build(self, _):
self.dense = tf.keras.layers.Dense(10)
def call(self, x):
outputs = self.dense(x)
tf.summary.histogram('outputs', outputs)
return outputs
model = MyModel()
model.compile('sgd', 'mse')
# Make sure to set `update_freq=N` to log a batch-level summary every N batches.
# In addition to any `tf.summary` contained in `Model.call`, metrics added in
# `Model.compile` will be logged every N batches.
tb_callback = tf.keras.callbacks.TensorBoard('./logs', update_freq=1)
model.fit(x_train, y_train, callbacks=[tb_callback])
```
Custom batch-level summaries in a Functional API Model:
```python
def my_summary(x):
tf.summary.histogram('x', x)
return x
inputs = tf.keras.Input(10)
x = tf.keras.layers.Dense(10)(inputs)
outputs = tf.keras.layers.Lambda(my_summary)(x)
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', 'mse')
# Make sure to set `update_freq=N` to log a batch-level summary every N batches.
# In addition to any `tf.summary` contained in `Model.call`, metrics added in
# `Model.compile` will be logged every N batches.
tb_callback = tf.keras.callbacks.TensorBoard('./logs', update_freq=1)
model.fit(x_train, y_train, callbacks=[tb_callback])
```
Profiling:
```python
# Profile a single batch, e.g. the 5th batch.
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir='./logs', profile_batch=5)
model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback])
# Profile a range of batches, e.g. from 10 to 20.
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir='./logs', profile_batch=(10,20))
model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback])
```
"""
# pylint: enable=line-too-long
def __init__(self,
log_dir='logs',
histogram_freq=0,
write_graph=True,
write_images=False,
write_steps_per_second=False,
update_freq='epoch',
profile_batch=2,
embeddings_freq=0,
embeddings_metadata=None,
**kwargs):
super(TensorBoard, self).__init__()
self._supports_tf_logs = True
self._validate_kwargs(kwargs)
self.log_dir = path_to_string(log_dir)
self.histogram_freq = histogram_freq
self.write_graph = write_graph
self.write_images = write_images
self.write_steps_per_second = write_steps_per_second
self.update_freq = 1 if update_freq == 'batch' else update_freq
self.embeddings_freq = embeddings_freq
self.embeddings_metadata = embeddings_metadata
self._init_profile_batch(profile_batch)
self._epoch = 0
self._global_train_batch = 0
self._previous_epoch_iterations = 0
self._train_accumulated_time = 0
self._batch_start_time = 0
# Lazily initialized in order to avoid creating event files when
# not needed.
self._writers = {}
# Used to restore any existing `SummaryWriter` after training ends.
self._prev_summary_state = []
def _validate_kwargs(self, kwargs):
"""Handle arguments were supported in V1."""
if kwargs.get('write_grads', False):
logging.warning('`write_grads` will be ignored in TensorFlow 2.0 '
'for the `TensorBoard` Callback.')
if kwargs.get('batch_size', False):
logging.warning('`batch_size` is no longer needed in the '
'`TensorBoard` Callback and will be ignored '
'in TensorFlow 2.0.')
if kwargs.get('embeddings_layer_names', False):
logging.warning('`embeddings_layer_names` is not supported in '
'TensorFlow 2.0. Instead, all `Embedding` layers '
'will be visualized.')
if kwargs.get('embeddings_data', False):
logging.warning('`embeddings_data` is not supported in TensorFlow '
'2.0. Instead, all `Embedding` variables will be '
'visualized.')
unrecognized_kwargs = set(kwargs.keys()) - {
'write_grads', 'embeddings_layer_names', 'embeddings_data', 'batch_size'
}
# Only allow kwargs that were supported in V1.
if unrecognized_kwargs:
raise ValueError('Unrecognized arguments in `TensorBoard` '
'Callback: ' + str(unrecognized_kwargs))
def set_model(self, model):
"""Sets Keras model and writes graph if specified."""
self.model = model
self._log_write_dir = self._get_log_write_dir()
self._train_dir = os.path.join(self._log_write_dir, 'train')
self._train_step = self.model._train_counter # pylint: disable=protected-access
self._val_dir = os.path.join(self._log_write_dir, 'validation')
self._val_step = self.model._test_counter # pylint: disable=protected-access
self._writers = {} # Resets writers.
self._should_write_train_graph = False
if self.write_graph:
self._write_keras_model_summary()
self._should_write_train_graph = True
if self.embeddings_freq:
self._configure_embeddings()
@property
def _train_writer(self):
if 'train' not in self._writers:
self._writers['train'] = tf.summary.create_file_writer(
self._train_dir)
return self._writers['train']
@property
def _val_writer(self):
if 'val' not in self._writers:
self._writers['val'] = tf.summary.create_file_writer(self._val_dir)
return self._writers['val']
def _get_log_write_dir(self):
"""For multi-worker, only chief should write, others write to '/tmp'."""
return distributed_file_utils.write_dirpath(self.log_dir,
self.model.distribute_strategy)
def _delete_tmp_write_dir(self):
"""Deletes tmp write directories for multi-worker."""
distributed_file_utils.remove_temp_dirpath(self.log_dir,
self.model.distribute_strategy)
def _write_keras_model_train_graph(self):
"""Writes Keras model train_function graph to TensorBoard."""
with self._train_writer.as_default():
with tf.summary.record_if(True):
train_fn = self.model.train_function
# If the train_function is a `tf.function`, we can write out a graph
if hasattr(train_fn, 'function_spec'):
tf.summary.graph(train_fn._concrete_stateful_fn.graph) # pylint: disable=protected-access
def _write_keras_model_summary(self):
"""Writes Keras graph network summary to TensorBoard."""
with self._train_writer.as_default():
with tf.summary.record_if(True):
summary_writable = (
self.model._is_graph_network or # pylint: disable=protected-access
self.model.__class__.__name__ == 'Sequential') # pylint: disable=protected-access
if summary_writable:
keras_model_summary('keras', self.model, step=0)
def _configure_embeddings(self):
"""Configure the Projector for embeddings."""
# TODO(omalleyt): Add integration tests.
from google.protobuf import text_format
from keras.layers import embeddings
from keras.protobuf import projector_config_pb2
config = projector_config_pb2.ProjectorConfig()
for layer in self.model.layers:
if isinstance(layer, embeddings.Embedding):
embedding = config.embeddings.add()
# Embeddings are always the first layer, so this naming should be
# consistent in any keras models checkpoints.
name = 'layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUE'
embedding.tensor_name = name
if self.embeddings_metadata is not None:
if isinstance(self.embeddings_metadata, str):
embedding.metadata_path = self.embeddings_metadata
else:
if layer.name in self.embeddings_metadata.keys():
embedding.metadata_path = self.embeddings_metadata.pop(layer.name)
if self.embeddings_metadata and not isinstance(self.embeddings_metadata,
str):
raise ValueError('Unrecognized `Embedding` layer names passed to '
'`keras.callbacks.TensorBoard` `embeddings_metadata` '
'argument: ' + str(self.embeddings_metadata.keys()))
config_pbtxt = text_format.MessageToString(config)
path = os.path.join(self._log_write_dir, 'projector_config.pbtxt')
with tf.io.gfile.GFile(path, 'w') as f:
f.write(config_pbtxt)
def _push_writer(self, writer, step):
"""Sets the default writer for custom batch-level summaries."""
if self.update_freq == 'epoch':
return
summary_state = summary_ops_v2._summary_state # pylint: disable=protected-access
self._prev_summary_state.append({
'is_recording': summary_state.is_recording,
'writer': summary_state.writer,
'step': summary_state.step
})
if self.update_freq == 'epoch':
should_record = False
writer = None
else:
should_record = lambda: tf.equal(step % self.update_freq, 0)
summary_state.is_recording = should_record
summary_state.writer = writer
# TODO(b/151339474): Fix deadlock when not using .value() here.
tf.summary.experimental.set_step(step.value())
def _pop_writer(self):
"""Pops the current writer."""
if self.update_freq == 'epoch':
return
prev_state = self._prev_summary_state.pop()
summary_state = summary_ops_v2._summary_state # pylint: disable=protected-access
summary_state.is_recording = prev_state['is_recording']
summary_state.writer = prev_state['writer']
tf.summary.experimental.set_step(prev_state['step'])
def _close_writers(self):
for writer in self._writers.values():
writer.close()
def _init_profile_batch(self, profile_batch):
"""Validate profile_batch value and set the range of batches to profile.
Args:
profile_batch: The range of batches to profile. Should be a non-negative
integer or a comma separated string of pair of positive integers. A pair
of positive integers signify a range of batches to profile.
Returns:
A pair of non-negative integers specifying the start and stop batch to
profile.
Raises:
ValueError: If profile_batch is not an integer or a comma seperated pair
of positive integers.
"""
profile_batch_error_message = (
'profile_batch must be a non-negative integer or 2-tuple of positive '
'integers. A pair of positive integers signifies a range of batches '
'to profile. Found: {}'.format(profile_batch))
# Support legacy way of specifying "start,stop" or "start" as str.
if isinstance(profile_batch, six.string_types):
profile_batch = str(profile_batch).split(',')
profile_batch = tf.nest.map_structure(int, profile_batch)
if isinstance(profile_batch, int):
self._start_batch = profile_batch
self._stop_batch = profile_batch
elif isinstance(profile_batch, (tuple, list)) and len(profile_batch) == 2:
self._start_batch, self._stop_batch = profile_batch
else:
raise ValueError(profile_batch_error_message)
if self._start_batch < 0 or self._stop_batch < self._start_batch:
raise ValueError(profile_batch_error_message)
if self._start_batch > 0:
# Warm up and improve the profiling accuracy.
tf.profiler.experimental.start('')
tf.profiler.experimental.stop(save=False)
# True when a trace is running.
self._is_tracing = False
# Setting `profile_batch=0` disables profiling.
self._should_trace = not (self._start_batch == 0 and self._stop_batch == 0)
def on_train_begin(self, logs=None):
self._global_train_batch = 0
self._previous_epoch_iterations = 0
self._train_accumulated_time = 0
self._push_writer(self._train_writer, self._train_step)
def on_train_end(self, logs=None):
self._pop_writer()
if self._is_tracing:
self._stop_trace()
self._close_writers()
self._delete_tmp_write_dir()
def on_test_begin(self, logs=None):
self._push_writer(self._val_writer, self._val_step)
def on_test_end(self, logs=None):
self._pop_writer()
def _implements_train_batch_hooks(self):
return self._should_trace # Only call batch hooks when tracing is enabled
def on_train_batch_begin(self, batch, logs=None):
self._global_train_batch += 1
if self.write_steps_per_second:
self._batch_start_time = time.time()
if not self._should_trace:
return
if self._global_train_batch == self._start_batch:
self._start_trace()
def on_train_batch_end(self, batch, logs=None):
if self._should_write_train_graph:
self._write_keras_model_train_graph()
self._should_write_train_graph = False
if self.write_steps_per_second:
batch_run_time = time.time() - self._batch_start_time
self._train_accumulated_time += batch_run_time
summary_ops_v2.scalar('batch_steps_per_second', 1. / batch_run_time)
if not self._should_trace:
return
if self._is_tracing and self._global_train_batch >= self._stop_batch:
self._stop_trace()
def on_epoch_begin(self, epoch, logs=None):
# Keeps track of epoch for profiling.
self._epoch = epoch
if self.write_steps_per_second:
self._previous_epoch_iterations = self.model.optimizer.iterations.numpy()
self._train_accumulated_time = 0
def on_epoch_end(self, epoch, logs=None):
"""Runs metrics and histogram summaries at epoch end."""
self._log_epoch_metrics(epoch, logs)
if self.histogram_freq and epoch % self.histogram_freq == 0:
self._log_weights(epoch)
if self.embeddings_freq and epoch % self.embeddings_freq == 0:
self._log_embeddings(epoch)
def _start_trace(self):
tf.summary.trace_on(graph=True, profiler=False)
tf.profiler.experimental.start(logdir=self._train_dir)
self._is_tracing = True
def _stop_trace(self, batch=None):
"""Logs the trace graph to TensorBoard."""
if batch is None:
batch = self._stop_batch
with self._train_writer.as_default():
with tf.summary.record_if(True):
# TODO(b/126388999): Remove step info in the summary name.
tf.summary.trace_export(name='batch_%d' % batch, step=batch)
tf.profiler.experimental.stop()
self._is_tracing = False
def _collect_learning_rate(self, logs):
lr_schedule = getattr(self.model.optimizer, 'lr', None)
if isinstance(lr_schedule, learning_rate_schedule.LearningRateSchedule):
logs['learning_rate'] = lr_schedule(self.model.optimizer.iterations)
return logs
def _compute_steps_per_second(self):
current_iteration = self.model.optimizer.iterations.numpy()
steps_per_second = ((current_iteration - self._previous_epoch_iterations) /
(self._train_accumulated_time))
return steps_per_second
def _log_epoch_metrics(self, epoch, logs):
"""Writes epoch metrics out as scalar summaries.
Args:
epoch: Int. The global step to use for TensorBoard.
logs: Dict. Keys are scalar summary names, values are scalars.
"""
if not logs:
return
train_logs = {k: v for k, v in logs.items() if not k.startswith('val_')}
val_logs = {k: v for k, v in logs.items() if k.startswith('val_')}
train_logs = self._collect_learning_rate(train_logs)
if self.write_steps_per_second:
train_logs['steps_per_second'] = self._compute_steps_per_second()
with tf.summary.record_if(True):
if train_logs:
with self._train_writer.as_default():
for name, value in train_logs.items():
summary_ops_v2.scalar('epoch_' + name, value, step=epoch)
if val_logs:
with self._val_writer.as_default():
for name, value in val_logs.items():
name = name[4:] # Remove 'val_' prefix.
summary_ops_v2.scalar('epoch_' + name, value, step=epoch)
def _log_weights(self, epoch):
"""Logs the weights of the Model to TensorBoard."""
with self._train_writer.as_default():
with tf.summary.record_if(True):
for layer in self.model.layers:
for weight in layer.weights:
weight_name = weight.name.replace(':', '_')
summary_ops_v2.histogram(weight_name, weight, step=epoch)
if self.write_images:
self._log_weight_as_image(weight, weight_name, epoch)
self._train_writer.flush()
def _log_weight_as_image(self, weight, weight_name, epoch):
"""Logs a weight as a TensorBoard image."""
w_img = tf.compat.v1.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 1: # Bias case
w_img = tf.reshape(w_img, [1, shape[0], 1, 1])
elif len(shape) == 2: # Dense layer kernel case
if shape[0] > shape[1]:
w_img = tf.compat.v1.transpose(w_img)
shape = K.int_shape(w_img)
w_img = tf.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # ConvNet case
if K.image_data_format() == 'channels_last':
# Switch to channels_first to display every kernel as a separate
# image.
w_img = tf.compat.v1.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = tf.reshape(w_img, [shape[0], shape[1], shape[2], 1])
shape = K.int_shape(w_img)
# Not possible to handle 3D convnets etc.
if len(shape) == 4 and shape[-1] in [1, 3, 4]:
summary_ops_v2.image(weight_name, w_img, step=epoch)
def _log_embeddings(self, epoch):
embeddings_ckpt = os.path.join(self._log_write_dir, 'train',
'keras_embedding.ckpt-{}'.format(epoch))
self.model.save_weights(embeddings_ckpt)
@keras_export('keras.callbacks.ReduceLROnPlateau')
class ReduceLROnPlateau(Callback):
"""Reduce learning rate when a metric has stopped improving.
Models often benefit from reducing the learning rate by a factor
of 2-10 once learning stagnates. This callback monitors a
quantity and if no improvement is seen for a 'patience' number
of epochs, the learning rate is reduced.
Example:
```python
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
patience=5, min_lr=0.001)
model.fit(X_train, Y_train, callbacks=[reduce_lr])
```
Args:
monitor: quantity to be monitored.
factor: factor by which the learning rate will be reduced.
`new_lr = lr * factor`.
patience: number of epochs with no improvement after which learning rate
will be reduced.
verbose: int. 0: quiet, 1: update messages.
mode: one of `{'auto', 'min', 'max'}`. In `'min'` mode,
the learning rate will be reduced when the
quantity monitored has stopped decreasing; in `'max'` mode it will be
reduced when the quantity monitored has stopped increasing; in `'auto'`
mode, the direction is automatically inferred from the name of the
monitored quantity.
min_delta: threshold for measuring the new optimum, to only focus on
significant changes.
cooldown: number of epochs to wait before resuming normal operation after
lr has been reduced.
min_lr: lower bound on the learning rate.
"""
def __init__(self,
monitor='val_loss',
factor=0.1,
patience=10,
verbose=0,
mode='auto',
min_delta=1e-4,
cooldown=0,
min_lr=0,
**kwargs):
super(ReduceLROnPlateau, self).__init__()
self.monitor = monitor
if factor >= 1.0:
raise ValueError('ReduceLROnPlateau ' 'does not support a factor >= 1.0.')
if 'epsilon' in kwargs:
min_delta = kwargs.pop('epsilon')
logging.warning('`epsilon` argument is deprecated and '
'will be removed, use `min_delta` instead.')
self.factor = factor
self.min_lr = min_lr
self.min_delta = min_delta
self.patience = patience
self.verbose = verbose
self.cooldown = cooldown
self.cooldown_counter = 0 # Cooldown counter.
self.wait = 0
self.best = 0
self.mode = mode
self.monitor_op = None
self._reset()
def _reset(self):
"""Resets wait counter and cooldown counter.
"""
if self.mode not in ['auto', 'min', 'max']:
logging.warning('Learning rate reduction mode %s is unknown, '
'fallback to auto mode.', self.mode)
self.mode = 'auto'
if (self.mode == 'min' or
(self.mode == 'auto' and 'acc' not in self.monitor)):
self.monitor_op = lambda a, b: np.less(a, b - self.min_delta)
self.best = np.Inf
else:
self.monitor_op = lambda a, b: np.greater(a, b + self.min_delta)
self.best = -np.Inf
self.cooldown_counter = 0
self.wait = 0
def on_train_begin(self, logs=None):
self._reset()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
logs['lr'] = K.get_value(self.model.optimizer.lr)
current = logs.get(self.monitor)
if current is None:
logging.warning('Learning rate reduction is conditioned on metric `%s` '
'which is not available. Available metrics are: %s',
self.monitor, ','.join(list(logs.keys())))
else:
if self.in_cooldown():
self.cooldown_counter -= 1
self.wait = 0
if self.monitor_op(current, self.best):
self.best = current
self.wait = 0
elif not self.in_cooldown():
self.wait += 1
if self.wait >= self.patience:
old_lr = K.get_value(self.model.optimizer.lr)
if old_lr > np.float32(self.min_lr):
new_lr = old_lr * self.factor
new_lr = max(new_lr, self.min_lr)
K.set_value(self.model.optimizer.lr, new_lr)
if self.verbose > 0:
print('\nEpoch %05d: ReduceLROnPlateau reducing learning '
'rate to %s.' % (epoch + 1, new_lr))
self.cooldown_counter = self.cooldown
self.wait = 0
def in_cooldown(self):
return self.cooldown_counter > 0
@keras_export('keras.callbacks.CSVLogger')
class CSVLogger(Callback):
"""Callback that streams epoch results to a CSV file.
Supports all values that can be represented as a string,
including 1D iterables such as `np.ndarray`.
Example:
```python
csv_logger = CSVLogger('training.log')
model.fit(X_train, Y_train, callbacks=[csv_logger])
```
Args:
filename: Filename of the CSV file, e.g. `'run/log.csv'`.
separator: String used to separate elements in the CSV file.
append: Boolean. True: append if file exists (useful for continuing
training). False: overwrite existing file.
"""
def __init__(self, filename, separator=',', append=False):
self.sep = separator
self.filename = path_to_string(filename)
self.append = append
self.writer = None
self.keys = None
self.append_header = True
if six.PY2:
self.file_flags = 'b'
self._open_args = {}
else:
self.file_flags = ''
self._open_args = {'newline': '\n'}
super(CSVLogger, self).__init__()
def on_train_begin(self, logs=None):
if self.append:
if tf.io.gfile.exists(self.filename):
with open(self.filename, 'r' + self.file_flags) as f:
self.append_header = not bool(len(f.readline()))
mode = 'a'
else:
mode = 'w'
self.csv_file = io.open(self.filename,
mode + self.file_flags,
**self._open_args)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
def handle_value(k):
is_zero_dim_ndarray = isinstance(k, np.ndarray) and k.ndim == 0
if isinstance(k, six.string_types):
return k
elif isinstance(k, collections.abc.Iterable) and not is_zero_dim_ndarray:
return '"[%s]"' % (', '.join(map(str, k)))
else:
return k
if self.keys is None:
self.keys = sorted(logs.keys())
if self.model.stop_training:
# We set NA so that csv parsers do not fail for this last epoch.
logs = dict((k, logs[k]) if k in logs else (k, 'NA') for k in self.keys)
if not self.writer:
class CustomDialect(csv.excel):
delimiter = self.sep
fieldnames = ['epoch'] + self.keys
self.writer = csv.DictWriter(
self.csv_file,
fieldnames=fieldnames,
dialect=CustomDialect)
if self.append_header:
self.writer.writeheader()
row_dict = collections.OrderedDict({'epoch': epoch})
row_dict.update((key, handle_value(logs[key])) for key in self.keys)
self.writer.writerow(row_dict)
self.csv_file.flush()
def on_train_end(self, logs=None):
self.csv_file.close()
self.writer = None
@keras_export('keras.callbacks.LambdaCallback')
class LambdaCallback(Callback):
r"""Callback for creating simple, custom callbacks on-the-fly.
This callback is constructed with anonymous functions that will be called
at the appropriate time (during `Model.{fit | evaluate | predict}`).
Note that the callbacks expects positional arguments, as:
- `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
`epoch`, `logs`
- `on_batch_begin` and `on_batch_end` expect two positional arguments:
`batch`, `logs`
- `on_train_begin` and `on_train_end` expect one positional argument:
`logs`
Args:
on_epoch_begin: called at the beginning of every epoch.
on_epoch_end: called at the end of every epoch.
on_batch_begin: called at the beginning of every batch.
on_batch_end: called at the end of every batch.
on_train_begin: called at the beginning of model training.
on_train_end: called at the end of model training.
Example:
```python
# Print the batch number at the beginning of every batch.
batch_print_callback = LambdaCallback(
on_batch_begin=lambda batch,logs: print(batch))
# Stream the epoch loss to a file in JSON format. The file content
# is not well-formed JSON but rather has a JSON object per line.
import json
json_log = open('loss_log.json', mode='wt', buffering=1)
json_logging_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: json_log.write(
json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
on_train_end=lambda logs: json_log.close()
)
# Terminate some processes after having finished model training.
processes = ...
cleanup_callback = LambdaCallback(
on_train_end=lambda logs: [
p.terminate() for p in processes if p.is_alive()])
model.fit(...,
callbacks=[batch_print_callback,
json_logging_callback,
cleanup_callback])
```
"""
def __init__(self,
on_epoch_begin=None,
on_epoch_end=None,
on_batch_begin=None,
on_batch_end=None,
on_train_begin=None,
on_train_end=None,
**kwargs):
super(LambdaCallback, self).__init__()
self.__dict__.update(kwargs)
if on_epoch_begin is not None:
self.on_epoch_begin = on_epoch_begin
else:
self.on_epoch_begin = lambda epoch, logs: None
if on_epoch_end is not None:
self.on_epoch_end = on_epoch_end
else:
self.on_epoch_end = lambda epoch, logs: None
if on_batch_begin is not None:
self.on_batch_begin = on_batch_begin
else:
self.on_batch_begin = lambda batch, logs: None
if on_batch_end is not None:
self.on_batch_end = on_batch_end
else:
self.on_batch_end = lambda batch, logs: None
if on_train_begin is not None:
self.on_train_begin = on_train_begin
else:
self.on_train_begin = lambda logs: None
if on_train_end is not None:
self.on_train_end = on_train_end
else:
self.on_train_end = lambda logs: None
|
the-stack_0_19732 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import time
import os
import torch
from tqdm import tqdm
from fcos_core.config import cfg
from fcos_core.data.datasets.evaluation import evaluate
from ..utils.comm import is_main_process, get_world_size
from ..utils.comm import all_gather
from ..utils.comm import synchronize
from ..utils.timer import Timer, get_time_str
from .bbox_aug import im_detect_bbox_aug
def compute_on_dataset(model, data_loader, device, timer=None):
model.eval()
results_dict = {}
cpu_device = torch.device("cpu")
for _, batch in enumerate(tqdm(data_loader)):
images, targets, image_ids = batch
with torch.no_grad():
if timer:
timer.tic()
if cfg.TEST.BBOX_AUG.ENABLED:
output = im_detect_bbox_aug(model, images, device)
else:
output = model(images.to(device))
if timer:
torch.cuda.synchronize()
timer.toc()
output = [o.to(cpu_device) for o in output]
results_dict.update(
{img_id: result for img_id, result in zip(image_ids, output)}
)
return results_dict
def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu):
all_predictions = all_gather(predictions_per_gpu)
if not is_main_process():
return
# merge the list of dicts
predictions = {}
for p in all_predictions:
predictions.update(p)
# convert a dict where the key is the index in a list
image_ids = list(sorted(predictions.keys()))
if len(image_ids) != image_ids[-1] + 1:
logger = logging.getLogger("fcos_core.inference")
logger.warning(
"Number of images that were gathered from multiple processes is not "
"a contiguous set. Some images might be missing from the evaluation"
)
# convert to a list
predictions = [predictions[i] for i in image_ids]
return predictions
def inference(
model,
data_loader,
dataset_name,
iou_types=("bbox",),
box_only=False,
device="cuda",
expected_results=(),
expected_results_sigma_tol=4,
output_folder=None,
):
# convert to a torch.device for efficiency
device = torch.device(device)
num_devices = get_world_size()
logger = logging.getLogger("fcos_core.inference")
dataset = data_loader.dataset
logger.info("Start evaluation on {} dataset({} images).".format(dataset_name, len(dataset)))
total_timer = Timer()
inference_timer = Timer()
total_timer.tic()
predictions = compute_on_dataset(model, data_loader, device, inference_timer)
# wait for all processes to complete before measuring the time
synchronize()
total_time = total_timer.toc()
total_time_str = get_time_str(total_time)
logger.info(
"Total run time: {} ({} s / img per device, on {} devices)".format(
total_time_str, total_time * num_devices / len(dataset), num_devices
)
)
total_infer_time = get_time_str(inference_timer.total_time)
logger.info(
"Model inference time: {} ({} s / img per device, on {} devices)".format(
total_infer_time,
inference_timer.total_time * num_devices / len(dataset),
num_devices,
)
)
predictions = _accumulate_predictions_from_multiple_gpus(predictions)
if not is_main_process():
return
if output_folder:
torch.save(predictions, os.path.join(output_folder, "predictions.pth"))
extra_args = dict(
box_only=box_only,
iou_types=iou_types,
expected_results=expected_results,
expected_results_sigma_tol=expected_results_sigma_tol,
)
return evaluate(dataset=dataset,
predictions=predictions,
output_folder=output_folder,
**extra_args)
|
the-stack_0_19733 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~stream_analytics_management_client.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.OperationListResult"]
"""Lists all of the available Stream Analytics related operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~stream_analytics_management_client.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-04-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.StreamAnalytics/operations'} # type: ignore
|
the-stack_0_19736 | """Offer device oriented automation."""
import voluptuous as vol
from homeassistant.components.device_automation import (
DEVICE_TRIGGER_BASE_SCHEMA,
async_get_device_automation_platform,
)
from homeassistant.const import CONF_DOMAIN
from .exceptions import InvalidDeviceAutomationConfig
# mypy: allow-untyped-defs, no-check-untyped-defs
TRIGGER_SCHEMA = DEVICE_TRIGGER_BASE_SCHEMA.extend({}, extra=vol.ALLOW_EXTRA)
async def async_validate_trigger_config(hass, config):
"""Validate config."""
platform = await async_get_device_automation_platform(
hass, config[CONF_DOMAIN], "trigger"
)
if not hasattr(platform, "async_validate_trigger_config"):
return platform.TRIGGER_SCHEMA(config)
try:
return await getattr(platform, "async_validate_trigger_config")(hass, config)
except InvalidDeviceAutomationConfig as err:
raise vol.Invalid(str(err) or "Invalid trigger configuration") from err
async def async_attach_trigger(hass, config, action, automation_info):
"""Listen for trigger."""
platform = await async_get_device_automation_platform(
hass, config[CONF_DOMAIN], "trigger"
)
return await platform.async_attach_trigger(hass, config, action, automation_info)
|
the-stack_0_19737 | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 24 16:31:30 2016
@author: Eric
"""
import argparse
import scipy.io as io
#import pickle
import LCALearner
import numpy as np
import sys
import pca.pca
sys.modules['pca'] = pca.pca #this is a workaround to get the pickled pca to load. I think it basically tells python that pca.pca is an acceptable name for pca
parser = argparse.ArgumentParser(description="Learn dictionaries for LCA with given parameters.")
parser.add_argument('-o', '--overcompleteness', default=4, type=float)
parser.add_argument('-f', '--datafolder', default='../audition/Nicole Code/', type=str)
parser.add_argument('-r', '--resultsfolder', default='../audition/Results/',type=str)
parser.add_argument('-s', '--datasuffix', default='new', type=str)
args=parser.parse_args()
datafolder = args.datafolder#'../audition/Data/'
resultsfolder = args.resultsfolder
oc = args.overcompleteness
datasuffix = args.datasuffix
numinput = 200
numunits = int(oc*numinput)
stuff = io.loadmat(datafolder+'PCAmatrices'+datasuffix+'.mat')
mypca = pca.pca.PCA(dim=200,whiten=True)
mypca.eVectors = stuff['E'].reshape((25,256,200))[:,::-1,:].reshape((6400,200)).T # flip the PC spectrograms upside down
mypca.sValues = np.sqrt(np.diag(np.abs(stuff['D1'])))
mypca.sValues = mypca.sValues[::-1]
mypca.mean_vec = np.zeros(6400)
mypca.ready=True
origshape = (25,256)
spectros = io.loadmat(datafolder+'dMatPCA'+datasuffix+'.mat')['dMatPCA'].T
lca = LCALearner.LCALearner(spectros, numunits, datatype="spectro", pca = mypca, stimshape=origshape, paramfile='dummy')
lca.tolerance = .01
lca.max_iter = 4
lambdas = [0.2, 0.4, 0.6, 0.8, 1.0, 1.2, 1.4]
for lam in lambdas:
savestr = resultsfolder+str(oc)+'OC' + str(lam) + datasuffix
lca.Q = lca.rand_dict()
lca.min_thresh = lam
lca.save_params(savestr+'.pickle')
lca.run(ntrials=10000)
lca.run(ntrials=200000, rate_decay=.99995)
lca.sort_dict()
lca.save_params()
|
the-stack_0_19739 | from ..const import (
KOI8R_STOPBYTES
)
class VernamError(Exception):
pass
def transform(text: str, key: str) -> str:
"""Vernam cipher (charset 'KOI8-r'). Encryption/decryption function.
Args:
text: text to be encrypted/decrypted.
key: arbitrary character set.
Returns:
Encrypted or decrypted string.
"""
if not text:
raise VernamError("Input text is empty!")
if not key:
raise VernamError("The key is missing!")
# attempt to change the encoding of the input text
try:
text_bytes = bytearray(text, encoding="KOI8-r")
except UnicodeEncodeError:
raise VernamError("Invalid character in input text! (KOI8-r)")
# attempt to change key encoding
try:
key_bytes = bytearray(key, encoding="KOI8-r")
except UnicodeEncodeError:
raise VernamError("Invalid character in key! (KOI8-r)")
for i in range(len(text_bytes)):
text_bytes[i] ^= key_bytes[i % len(key_bytes)]
if text_bytes[i] in KOI8R_STOPBYTES:
raise VernamError("Service byte received! Change the key or text.")
try:
modified_text = text_bytes.decode("KOI8-r")
except UnicodeDecodeError:
raise VernamError("Decoding error! (from 'KOI8-r')")
return modified_text
def make(text: str, key: str) -> str:
"""Vernam cipher (charset 'KOI8-r'). Interface for calling encryption/decryption functions.
Args:
text: text to be encrypted/decrypted.
key: arbitrary character set.
Returns:
Encrypted or decrypted string.
"""
return transform(text, key)
|
the-stack_0_19741 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""BERT classification finetuning runner in tf2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import math
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow as tf
# pylint: disable=g-import-not-at-top,redefined-outer-name,reimported
from official.modeling import model_training_utils
from official.nlp import bert_modeling as modeling
from official.nlp import bert_models
from official.nlp import optimization
from official.nlp.bert import common_flags
from official.nlp.bert import input_pipeline
from official.nlp.bert import model_saving_utils
from official.utils.misc import distribution_utils
from official.utils.misc import keras_utils
flags.DEFINE_enum(
'mode', 'train_and_eval', ['train_and_eval', 'export_only'],
'One of {"train_and_eval", "export_only"}. `train_and_eval`: '
'trains the model and evaluates in the meantime. '
'`export_only`: will take the latest checkpoint inside '
'model_dir and export a `SavedModel`.')
flags.DEFINE_string('train_data_path', None,
'Path to training data for BERT classifier.')
flags.DEFINE_string('eval_data_path', None,
'Path to evaluation data for BERT classifier.')
# Model training specific flags.
flags.DEFINE_string(
'input_meta_data_path', None,
'Path to file that contains meta data about input '
'to be used for training and evaluation.')
flags.DEFINE_integer('train_batch_size', 32, 'Batch size for training.')
flags.DEFINE_integer('eval_batch_size', 32, 'Batch size for evaluation.')
common_flags.define_common_bert_flags()
FLAGS = flags.FLAGS
def get_loss_fn(num_classes, loss_factor=1.0):
"""Gets the classification loss function."""
def classification_loss_fn(labels, logits):
"""Classification loss."""
labels = tf.squeeze(labels)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(
tf.cast(labels, dtype=tf.int32), depth=num_classes, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(
tf.cast(one_hot_labels, dtype=tf.float32) * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
loss *= loss_factor
return loss
return classification_loss_fn
def get_dataset_fn(input_file_pattern, max_seq_length, global_batch_size,
is_training):
"""Gets a closure to create a dataset."""
def _dataset_fn(ctx=None):
"""Returns tf.data.Dataset for distributed BERT pretraining."""
batch_size = ctx.get_per_replica_batch_size(
global_batch_size) if ctx else global_batch_size
dataset = input_pipeline.create_classifier_dataset(
input_file_pattern,
max_seq_length,
batch_size,
is_training=is_training,
input_pipeline_context=ctx)
return dataset
return _dataset_fn
def run_bert_classifier(strategy,
bert_config,
input_meta_data,
model_dir,
epochs,
steps_per_epoch,
steps_per_loop,
eval_steps,
warmup_steps,
initial_lr,
init_checkpoint,
train_input_fn,
eval_input_fn,
custom_callbacks=None,
run_eagerly=False,
use_keras_compile_fit=False):
"""Run BERT classifier training using low-level API."""
max_seq_length = input_meta_data['max_seq_length']
num_classes = input_meta_data['num_labels']
def _get_classifier_model():
"""Gets a classifier model."""
classifier_model, core_model = (
bert_models.classifier_model(
bert_config,
tf.float32,
num_classes,
max_seq_length,
hub_module_url=FLAGS.hub_module_url))
classifier_model.optimizer = optimization.create_optimizer(
initial_lr, steps_per_epoch * epochs, warmup_steps)
if FLAGS.fp16_implementation == 'graph_rewrite':
# Note: when flags_obj.fp16_implementation == "graph_rewrite", dtype as
# determined by flags_core.get_tf_dtype(flags_obj) would be 'float32'
# which will ensure tf.compat.v2.keras.mixed_precision and
# tf.train.experimental.enable_mixed_precision_graph_rewrite do not double
# up.
classifier_model.optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(
classifier_model.optimizer)
return classifier_model, core_model
# During distributed training, loss used for gradient computation is
# summed over from all replicas. When Keras compile/fit() API is used,
# the fit() API internally normalizes the loss by dividing the loss by
# the number of replicas used for computation. However, when custom
# training loop is used this is not done automatically and should be
# done manually by the end user.
loss_multiplier = 1.0
if FLAGS.scale_loss and not use_keras_compile_fit:
loss_multiplier = 1.0 / strategy.num_replicas_in_sync
loss_fn = get_loss_fn(num_classes, loss_factor=loss_multiplier)
# Defines evaluation metrics function, which will create metrics in the
# correct device and strategy scope.
def metric_fn():
return tf.keras.metrics.SparseCategoricalAccuracy(
'test_accuracy', dtype=tf.float32)
if use_keras_compile_fit:
# Start training using Keras compile/fit API.
logging.info('Training using TF 2.0 Keras compile/fit API with '
'distribution strategy.')
return run_keras_compile_fit(
model_dir,
strategy,
_get_classifier_model,
train_input_fn,
eval_input_fn,
loss_fn,
metric_fn,
init_checkpoint,
epochs,
steps_per_epoch,
eval_steps,
custom_callbacks=None)
# Use user-defined loop to start training.
logging.info('Training using customized training loop TF 2.0 with '
'distribution strategy.')
return model_training_utils.run_customized_training_loop(
strategy=strategy,
model_fn=_get_classifier_model,
loss_fn=loss_fn,
model_dir=model_dir,
steps_per_epoch=steps_per_epoch,
steps_per_loop=steps_per_loop,
epochs=epochs,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
eval_steps=eval_steps,
init_checkpoint=init_checkpoint,
metric_fn=metric_fn,
custom_callbacks=custom_callbacks,
run_eagerly=run_eagerly)
def run_keras_compile_fit(model_dir,
strategy,
model_fn,
train_input_fn,
eval_input_fn,
loss_fn,
metric_fn,
init_checkpoint,
epochs,
steps_per_epoch,
eval_steps,
custom_callbacks=None):
"""Runs BERT classifier model using Keras compile/fit API."""
with strategy.scope():
training_dataset = train_input_fn()
evaluation_dataset = eval_input_fn()
bert_model, sub_model = model_fn()
optimizer = bert_model.optimizer
if init_checkpoint:
checkpoint = tf.train.Checkpoint(model=sub_model)
checkpoint.restore(init_checkpoint).assert_existing_objects_matched()
bert_model.compile(optimizer=optimizer, loss=loss_fn, metrics=[metric_fn()])
summary_dir = os.path.join(model_dir, 'summaries')
summary_callback = tf.keras.callbacks.TensorBoard(summary_dir)
checkpoint_path = os.path.join(model_dir, 'checkpoint')
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
checkpoint_path, save_weights_only=True)
if custom_callbacks is not None:
custom_callbacks += [summary_callback, checkpoint_callback]
else:
custom_callbacks = [summary_callback, checkpoint_callback]
bert_model.fit(
x=training_dataset,
validation_data=evaluation_dataset,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_steps=eval_steps,
callbacks=custom_callbacks)
return bert_model
def export_classifier(model_export_path, input_meta_data,
restore_model_using_load_weights,
bert_config, model_dir):
"""Exports a trained model as a `SavedModel` for inference.
Args:
model_export_path: a string specifying the path to the SavedModel directory.
input_meta_data: dictionary containing meta data about input and model.
restore_model_using_load_weights: Whether to use checkpoint.restore() API
for custom checkpoint or to use model.load_weights() API.
There are 2 different ways to save checkpoints. One is using
tf.train.Checkpoint and another is using Keras model.save_weights().
Custom training loop implementation uses tf.train.Checkpoint API
and Keras ModelCheckpoint callback internally uses model.save_weights()
API. Since these two API's cannot be used together, model loading logic
must be take into account how model checkpoint was saved.
bert_config: Bert configuration file to define core bert layers.
model_dir: The directory where the model weights and training/evaluation
summaries are stored.
Raises:
Export path is not specified, got an empty string or None.
"""
if not model_export_path:
raise ValueError('Export path is not specified: %s' % model_export_path)
if not model_dir:
raise ValueError('Export path is not specified: %s' % model_dir)
classifier_model = bert_models.classifier_model(
bert_config, tf.float32, input_meta_data['num_labels'],
input_meta_data['max_seq_length'])[0]
model_saving_utils.export_bert_model(
model_export_path,
model=classifier_model,
checkpoint_dir=model_dir,
restore_model_using_load_weights=restore_model_using_load_weights)
def run_bert(strategy,
input_meta_data,
train_input_fn=None,
eval_input_fn=None):
"""Run BERT training."""
if FLAGS.model_type == 'bert':
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
else:
assert FLAGS.model_type == 'albert'
bert_config = modeling.AlbertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.mode == 'export_only':
# As Keras ModelCheckpoint callback used with Keras compile/fit() API
# internally uses model.save_weights() to save checkpoints, we must
# use model.load_weights() when Keras compile/fit() is used.
export_classifier(FLAGS.model_export_path, input_meta_data,
FLAGS.use_keras_compile_fit,
bert_config, FLAGS.model_dir)
return
if FLAGS.mode != 'train_and_eval':
raise ValueError('Unsupported mode is specified: %s' % FLAGS.mode)
# Enables XLA in Session Config. Should not be set for TPU.
keras_utils.set_config_v2(FLAGS.enable_xla)
epochs = FLAGS.num_train_epochs
train_data_size = input_meta_data['train_data_size']
steps_per_epoch = int(train_data_size / FLAGS.train_batch_size)
warmup_steps = int(epochs * train_data_size * 0.1 / FLAGS.train_batch_size)
eval_steps = int(
math.ceil(input_meta_data['eval_data_size'] / FLAGS.eval_batch_size))
if not strategy:
raise ValueError('Distribution strategy has not been specified.')
trained_model = run_bert_classifier(
strategy,
bert_config,
input_meta_data,
FLAGS.model_dir,
epochs,
steps_per_epoch,
FLAGS.steps_per_loop,
eval_steps,
warmup_steps,
FLAGS.learning_rate,
FLAGS.init_checkpoint,
train_input_fn,
eval_input_fn,
run_eagerly=FLAGS.run_eagerly,
use_keras_compile_fit=FLAGS.use_keras_compile_fit)
if FLAGS.model_export_path:
# As Keras ModelCheckpoint callback used with Keras compile/fit() API
# internally uses model.save_weights() to save checkpoints, we must
# use model.load_weights() when Keras compile/fit() is used.
model_saving_utils.export_bert_model(
FLAGS.model_export_path,
model=trained_model,
restore_model_using_load_weights=FLAGS.use_keras_compile_fit)
return trained_model
def main(_):
# Users should always run this script under TF 2.x
assert tf.version.VERSION.startswith('2.')
with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader:
input_meta_data = json.loads(reader.read().decode('utf-8'))
if not FLAGS.model_dir:
FLAGS.model_dir = '/tmp/bert20/'
strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=FLAGS.distribution_strategy,
num_gpus=FLAGS.num_gpus,
tpu_address=FLAGS.tpu)
max_seq_length = input_meta_data['max_seq_length']
train_input_fn = get_dataset_fn(
FLAGS.train_data_path,
max_seq_length,
FLAGS.train_batch_size,
is_training=True)
eval_input_fn = get_dataset_fn(
FLAGS.eval_data_path,
max_seq_length,
FLAGS.eval_batch_size,
is_training=False)
run_bert(strategy, input_meta_data, train_input_fn, eval_input_fn)
if __name__ == '__main__':
flags.mark_flag_as_required('bert_config_file')
flags.mark_flag_as_required('input_meta_data_path')
flags.mark_flag_as_required('model_dir')
app.run(main)
|
the-stack_0_19742 | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.atoms.affine.trace import trace
from cvxpy.expressions.variable import Variable
def normNuc_canon(expr, args):
A = args[0]
m, n = A.shape
# Create the equivalent problem:
# minimize (trace(U) + trace(V))/2
# subject to:
# [U A; A.T V] is positive semidefinite
X = Variable((m+n, m+n), PSD=True)
constraints = []
# Fix X using the fact that A must be affine by the DCP rules.
# X[0:rows,rows:rows+cols] == A
constraints.append(X[0:m, m:m+n] == A)
trace_value = 0.5 * trace(X)
return trace_value, constraints
|
the-stack_0_19743 | #!/usr/bin/env python3
#
# File: changeStatisticsALAAM.py
# Author: Alex Stivala
# Created: February 2020
#
"""Functions to compute change statistics for ALAAM. Each function takes a Graph
G and outcome vector A and returns the change statistic for changing
outcome of node i to 1.
The attribute statistics take also as their first parameter the name of
the attribute to use, used as the key in the relevant attribute dictionary
in the Graph object. So that these functions have the same signature as
the structural statistics, use functools.partial() to create a function
with the (G, A, i) signature, e.g. partial(changeo_Oc, "age").
Similarly for the use of the setting network (another fixed graph for
different relationships than the main graph G) the setting-homophily
change statistic is used as e.g. partial(changeSettingHomophily, Gsetting).
The change statistics here are documented in Daraganova & Robins
(2013) Tables 9.1-9.3 (pp. 110-112) and the PNet manual Appendix B
"IPNet Graph Statistics" (pp. 42-43), and here I use a similar naming
convention to the latter. Similarly, the diagrams will follow a
similar convention where a black or solid node, shown as an asterisk
"*" here, denotes an actor with the outcome attribute, while a hollow
or white node, shown as an lowercase "o" here, denotes an actor with
or without the attribute.
See
G. Daraganova and G. Robins. Autologistic actor attribute models. In
D. Lusher, J. Koskinen, and G. Robins, editors, Exponential Random
Graph Models for Social Networks, chapter 9, pages 102-114. Cambridge
University Press, New York, 2013.
G. Robins, P. Pattison, and P. Elliott. Network models for social
influence processes. Psychometrika, 66(2):161-189, 2001.
Wang, P., Robins, G., & Pattison, P. (2009). PNet: A program for the
simulation and estimation of exponential random graph
models. University of Melbourne.
"""
import math
from Graph import Graph,NA_VALUE
def changeDensity(G, A, i):
"""
change statistic for [outcome attribute] Density
*
"""
return 1
def changeActivity(G, A, i):
"""
change statistic for Activity
*--o
"""
return G.degree(i)
def changeTwoStar(G, A, i):
"""
Change statistic for two-star
*--o
\
o
"""
return (G.degree(i) * (G.degree(i) - 1))/2.0 if G.degree(i) > 1 else 0
def changeThreeStar(G, A, i):
"""
Change statistic for three-star
o
/
*--o
\
o
"""
return ( G.degree(i) * (G.degree(i) - 1) * (G.degree(i) - 2) / 6.0
if G.degree(i) > 2 else 0 )
def changePartnerActivityTwoPath(G, A, i):
"""
Change statistic for partner activity actor two-path (Alter-2Star1A)
*--o--o
"""
delta = 0
for u in G.neighbourIterator(i):
delta += G.degree(i) + G.degree(u) - 2
return delta
def changeTriangleT1(G, A, i):
"""
Change statistic for actor triangle (T1)
o
/ \
*---o
"""
delta = 0
if G.degree(i) < 2:
return 0
else:
for u in G.neighbourIterator(i):
for v in G.neighbourIterator(u):
if v != i and G.isEdge(i, v):
delta += 1
assert delta % 2 == 0
return delta / 2.0
def changeContagion(G, A, i):
"""
change statistic for Contagion (partner attribute)
*--*
"""
delta = 0
for u in G.neighbourIterator(i):
if A[u] == 1:
delta += 1
return delta
def changeIndirectPartnerAttribute(G, A, i):
"""
Change statistic for indirect partner attribute (Alter-2Star2A);
structural equivalence between actors with attribute (two-path equivalence)
*--o--*
"""
delta = 0
for u in G.neighbourIterator(i):
for v in G.neighbourIterator(u):
if v != i and A[v] == 1:
delta += 1
return delta
def changePartnerAttributeActivity(G, A, i):
"""Change statistic for partner attribute activity (NB called
"Partner-Activity" in PNet manual IPNet graph statistics (p. 42))
*--*--o
"""
delta = 0
for u in G.neighbourIterator(i):
if A[u] == 1:
delta += G.degree(i) + G.degree(u) - 2
return delta
# def changePartnerPartnerAttribute_OLD(G, A, i):
# """
# Change statistic for partner-partner-attribute (partner-resource)
# *--*--*
# """
# delta = 0
# for u in G.neighbourIterator(i):
# if A[u] == 1:
# # FIXME this is inefficient, iterating over all nodes
# for v in range(G.numNodes()):
# if v == i or v == u:
# continue
# if A[v] == 1:
# if G.isEdge(u, v):
# delta += 2
# if G.isEdge(i, v):
# delta += 1
# return delta
def changePartnerPartnerAttribute(G, A, i):
"""
Change statistic for partner-partner-attribute (partner-resource)
*--*--*
"""
# delta_OLD = changePartnerPartnerAttribute_OLD(G, A, i)
delta = 0
for u in G.neighbourIterator(i):
if A[u] == 1:
for v in G.neighbourIterator(u):
if A[v] == 1:
delta += 2
for v in G.neighbourIterator(i):
if A[v] == 1 and v != u:
delta += 1
# assert delta == delta_OLD
return delta
def changeTriangleT2(G, A, i):
"""
Change statistic for partner attribute triangle (T2)
*
/ \
*---o
"""
delta = 0
if G.degree(i) < 2:
return 0
else:
for u in G.neighbourIterator(i):
if A[u] == 1:
for v in G.neighbourIterator(u):
if v != i and G.isEdge(i, v):
delta += 1
return delta
def changeTriangleT3(G, A, i):
"""
Change statistic for partner-partner attribute triangle (T3)
*
/ \
*---*
"""
delta = 0
if G.degree(i) < 2:
return 0
else:
for u in G.neighbourIterator(i):
if A[u] == 1:
for v in G.neighbourIterator(u):
if v != i and A[v] == 1 and G.isEdge(i, v):
delta += 1
assert delta % 2 == 0
return delta / 2.0
def changeoOb(attrname, G, A, i):
"""change statistic for binary exogenous attribute oOb (outcome
attribute related to binary attribute on same node)
[*]
"""
return 0 if G.binattr[attrname][i] == NA_VALUE else G.binattr[attrname][i]
def changeo_Ob(attrname, G, A, i):
"""change statistic for binary exogenous partner attribute o_Ob (outcome
attribute related to binary attribute on partner node)
*--[o]
"""
delta = 0
for u in G.neighbourIterator(i):
delta += 0 if G.binattr[attrname][i] == NA_VALUE else G.binattr[attrname][u]
return delta
def changeoOc(attrname, G, A, i):
"""change statistic for continuous exogenous attribute oOc (outcome
attribute related to continuous attribute on same node)
(*)
"""
return 0 if math.isnan(G.contattr[attrname][i]) else G.contattr[attrname][i]
def changeo_Oc(attrname, G, A, i):
"""change statistic for continuous exogenous partner attribute o_Oc (outcome
attribute related to continuous attribute on partner node)
*--(o)
"""
delta = 0
for u in G.neighbourIterator(i):
delta += 0 if math.isnan(G.contattr[attrname][u]) else G.contattr[attrname][u]
return delta
def changeoO_Osame(attrname, G, A, i):
"""
Change statistic for categorical matching exogenous attributes oO_Osame
(outcome attribtue related to matching categorical exogenous attributes on
this and partner node)
{*}--{o}
"""
delta = 0
for u in G.neighbourIterator(i):
if (G.catattr[attrname][u] != NA_VALUE and G.catattr[attrname][i] != NA_VALUE and
G.catattr[attrname][u] == G.catattr[attrname][i]):
delta += 1
return delta
def changeoO_Odiff(attrname, G, A, i):
"""Change statistic for categorical mismatching exogenous attributes
oO_Odiff (outcome attribtue related to mismatching categorical
exogenous attributes on this and partner node)
{*}--<o>
"""
delta = 0
for u in G.neighbourIterator(i):
if (G.catattr[attrname][u] != NA_VALUE and G.catattr[attrname][i] != NA_VALUE and
G.catattr[attrname][u] != G.catattr[attrname][i]):
delta += 1
return delta
def changeSettingHomophily(settingGraph, G, A, i):
"""Change statistic for Setting-Homophily, outcome attribute on two actors
connected in the setting network.
*...*
(where '...' denotes an edge in the setting network (settingGraph) rather
than the main network G denoted '--')
"""
delta = 0
for u in settingGraph.neighbourIterator(i): #note settingGraph not G
if A[u] == 1:
delta += 1
return delta
|
the-stack_0_19745 | """This example shows how to hook the property getting/setting process to
change the value before it is saved and before it is applied again.
E.g. consider that we have a property that stores a namedtuple that we need
to dump as a list (because yaml doesn't understand named tuple) and create
a named tuple again when restoring.
``get_config_property`` and
``apply_config_property`` are the needed hook methods, that are
automatically used if present in the class. See also ``apply_config_child``
for similarly hooking into applying the children objects.
The default, when not provided is to use ``apply_config``, so if
overriding, that should probably also be used for the base case.
"""
from collections import namedtuple
from tree_config import dump_config, load_config, apply_config, \
read_config_from_object
Point = namedtuple('Point', ['x', 'y'])
class App:
_config_props_ = ('point', 'name')
point = Point(11, 34)
name = ''
def get_config_property(self, name):
if name == 'point':
return tuple(self.point)
return getattr(self, name)
def apply_config_property(self, name, value):
if name == 'point':
self.point = Point(*value)
else:
setattr(self, name, value)
if __name__ == '__main__':
# create app and set properties
app = App()
# now get and save config to yaml file
dump_config('custom_value_example.yaml', read_config_from_object(app))
print(f'point is: {app.point}')
# Now we should have a custom_value_example.yaml file with the contents:
"""
name: ''
point: [11, 34]
"""
# load config and apply it
apply_config(app, load_config(app, 'custom_value_example.yaml'))
print(f'point is: {app.point}')
# when run, this prints:
"""
point is: Point(x=11, y=34)
point is: Point(x=11, y=34)
"""
|
the-stack_0_19746 | import pytest
from astropy.tests.helper import assert_quantity_allclose
from astropy import units as u
from astropy.time import Time
from poliastro.bodies import Earth, Sun
from poliastro.twobody import Orbit
from poliastro.twobody.propagation import kepler, mean_motion, cowell
import numpy as np
from poliastro.util import norm
def test_sample_angle_zero_returns_same():
# Data from Vallado, example 2.4
r0 = [1131.340, -2282.343, 6672.423] * u.km
v0 = [-5.64305, 4.30333, 2.42879] * u.km / u.s
ss0 = Orbit.from_vectors(Earth, r0, v0)
nu_values = [0] * u.deg
_, rr = ss0.sample(ss0.nu + nu_values)
assert_quantity_allclose(rr[0].get_xyz(), ss0.r)
@pytest.mark.parametrize("time_of_flight", [1 * u.min, 40 * u.min])
@pytest.mark.parametrize("method", [kepler, mean_motion, cowell])
def test_sample_one_point_equals_propagation_small_deltas(time_of_flight, method):
# Time arithmetic loses precision, see
# https://github.com/astropy/astropy/issues/6638
# Data from Vallado, example 2.4
r0 = [1131.340, -2282.343, 6672.423] * u.km
v0 = [-5.64305, 4.30333, 2.42879] * u.km / u.s
ss0 = Orbit.from_vectors(Earth, r0, v0)
sample_times = Time([ss0.epoch + time_of_flight])
expected_ss = ss0.propagate(time_of_flight, method)
_, rr = ss0.sample(sample_times, method)
assert_quantity_allclose(rr[0].get_xyz(), expected_ss.r)
@pytest.mark.parametrize("time_of_flight", [6 * u.h, 2 * u.day])
@pytest.mark.parametrize("method", [kepler, mean_motion, cowell])
def test_sample_one_point_equals_propagation_big_deltas(time_of_flight, method):
# Data from Vallado, example 2.4
r0 = [1131.340, -2282.343, 6672.423] * u.km
v0 = [-5.64305, 4.30333, 2.42879] * u.km / u.s
ss0 = Orbit.from_vectors(Earth, r0, v0)
sample_times = Time([ss0.epoch + time_of_flight])
expected_ss = ss0.propagate(time_of_flight)
_, rr = ss0.sample(sample_times, method)
assert_quantity_allclose(rr[0].get_xyz(), expected_ss.r)
def test_sample_nu_values():
# Data from Vallado, example 2.4
r0 = [1131.340, -2282.343, 6672.423] * u.km
v0 = [-5.64305, 4.30333, 2.42879] * u.km / u.s
ss0 = Orbit.from_vectors(Earth, r0, v0)
nu_values = [0, 90, 180] * u.deg
expected_ss = ss0.propagate(ss0.period / 2)
_, rr = ss0.sample(nu_values)
assert len(rr) == len(nu_values)
assert_quantity_allclose(norm(rr[0].get_xyz()), expected_ss.r_p)
assert_quantity_allclose(norm(rr[-1].get_xyz()), expected_ss.r_a)
@pytest.mark.parametrize("num_points", [3, 5, 7, 9, 11, 101])
def test_sample_num_points(num_points):
# Data from Vallado, example 2.4
r0 = [1131.340, -2282.343, 6672.423] * u.km
v0 = [-5.64305, 4.30333, 2.42879] * u.km / u.s
ss0 = Orbit.from_vectors(Earth, r0, v0)
# TODO: Test against the perigee and apogee
# expected_ss = ss0.propagate(ss0.period / 2)
_, rr = ss0.sample(num_points)
assert len(rr) == num_points
# assert_quantity_allclose(rr[num_points // 2].get_xyz(), expected_ss.r)
@pytest.mark.parametrize('method', [
mean_motion,
cowell,
pytest.param(kepler, marks=pytest.mark.xfail),
])
def test_sample_big_orbits(method):
# See https://github.com/poliastro/poliastro/issues/265
ss = Orbit.from_vectors(
Sun,
[-9018878.6, -94116055, 22619059] * u.km,
[-49.950923, -12.948431, -4.2925158] * u.km / u.s
)
times, positions = ss.sample(15, method=method)
assert len(times) == len(positions) == 15
|
the-stack_0_19754 | # coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from lrs import lr
class NoamDecayLr(lr.Lr):
"""Decay the learning rate during each training step, follows Transformer"""
def __init__(self,
init_lr, # initial learning rate
warmup_steps, # warmup step
hidden_size, # model hidden size
name="noam_decay_lr" # model name, no use
):
super(NoamDecayLr, self).__init__(init_lr, name=name)
self.warmup_steps = warmup_steps
self.hidden_size = hidden_size
def step(self, step):
step = float(step)
warmup_steps = float(self.warmup_steps)
multiplier = float(self.hidden_size) ** -0.5
decay = multiplier * np.minimum((step + 1) * (warmup_steps ** -1.5),
(step + 1) ** -0.5)
self.lrate = self.init_lrate * decay
|
the-stack_0_19758 | import copy
import logging
from itertools import chain
import numpy as np
import torch
from torch.nn import functional as F
from rltoolkit import config
from rltoolkit.algorithms.ddpg import DDPG
from rltoolkit.algorithms.sac.models import SAC_Actor, SAC_Critic
logger = logging.getLogger(__name__)
class SAC(DDPG):
def __init__(
self,
alpha_lr: float = config.ALPHA_LR,
alpha: float = config.ALPHA,
tau: float = config.TAU,
pi_update_freq: int = config.PI_UPDATE_FREQ,
act_noise: float = 0,
*args,
**kwargs,
):
f"""Soft Actor-Critic implementation
Args:
alpha_lr (float, optional): Learning rate of the alpha.
Defaults to { config.ALPHA_LR }.
alpha (float, optional): Initial alpha value. Defaults to { config.ALPHA }.
pi_update_freq (int, optional): Frequency of policy updates
(in SAC updates). Defaults to { config.PI_UPDATE_FREQ }.
act_noise (float, optional): Actions noise multiplier.
Defaults to { 0 }.
actor_lr (float, optional): Learning rate of the actor.
Defaults to { config.DDPG_LR }.
critic_lr (float, optional): Learning rate of the critic.
Defaults to { config.DDPG_LR }.
tau (float, optional): Tau coefficient for polyak averaging.
Defaults to { config.TAU }.
update_batch_size (int, optional): Batch size for gradient step.
Defaults to { config.UPDATE_BATCH_SIZE }.
buffer_size (int, optional): Size of replay buffer.
Defaults to { config.BUFFER_SIZE }.
random_frames (int, optional): Number of frames with random actions at
the beggining. Defaults to { config.RANDOM_FRAMES }.
update_freq (int, optional): Freqency of SAC updates (in frames).
Defaults to { config.UPDATE_FREQ }.
grad_steps (int, optional): Number of SAC updates for one step.
Defaults to { config.GRAD_STEPS }.
env_name (str, optional): Name of the gym environment.
Defaults to { config.ENV_NAME }.
gamma (float, optional): Discount factor. Defaults to { config.GAMMA }.
stats_freq (int, optional): Frequency of logging the progress.
Defaults to { config.STATS_FREQ }.
batch_size (int, optional): Number of frames used for one algorithm step
(could be higher because batch collection stops when rollout ends).
Defaults to { config.BATCH_SIZE }.
iterations (int, optional): Number of algorithms iterations.
Defaults to { config.ITERATIONS }.
max_frames (int, optional): Limit of frames for training.
Defaults to { None }.
return_done (Union[int, None], optional): target return, which will stop
training if reached. Defaults to { config.RETURN_DONE }.
log_dir (str, optional): Path for basic logs which includes final model.
Defaults to { config.LOG_DIR }.
use_gpu (bool, optional): Use CUDA. Defaults to { config.USE_GPU }.
tensorboard_dir (Union[str, None], optional): Path to tensorboard logs.
Defaults to { config.TENSORBOARD_DIR }.
tensorboard_comment (str, optional): Comment for tensorboard files.
Defaults to { config.TENSORBOARD_COMMENT }.
verbose (int, optional): Verbose level. Defaults to { config.VERBOSE }.
render (bool, optional): Render rollouts to tensorboard.
Defaults to { config.RENDER }.
"""
super().__init__(*args, **kwargs)
self._actor = None
self.actor_optimizer = None
self._critic_1 = None
self.critic_1_optimizer = None
self.critic_1_targ = None
self._critic_2 = None
self.critic_2_optimizer = None
self.critic_2_targ = None
self.alpha_lr = alpha_lr
self.alpha = alpha
self.pi_update_freq = pi_update_freq
self.actor = SAC_Actor(self.ob_dim, self.ac_lim, self.ac_dim, self.discrete)
self.critic_1 = SAC_Critic(self.ob_dim, self.ac_dim, self.discrete)
self.critic_2 = SAC_Critic(self.ob_dim, self.ac_dim, self.discrete)
self.loss = {"actor": 0.0, "critic_1": 0.0, "critic_2": 0.0}
new_hparams = {
"hparams/alpha_lr": self.alpha_lr,
"hparams/alpha": self.alpha,
"hparams/pi_update_freq": self.pi_update_freq,
}
self.hparams.update(new_hparams)
self.target_entropy = -torch.prod(
torch.tensor(self.ac_dim, dtype=torch.float32)
).item()
self.log_alpha = torch.tensor(
np.log(self.alpha), requires_grad=True, device=self.device
)
self.alpha_opt = self.opt([self.log_alpha], lr=alpha_lr)
@property
def actor(self):
return self._actor
@actor.setter
def actor(self, model: torch.nn.Module):
self._actor, self.actor_optimizer = self.set_model(model, self.actor_lr)
@property
def critic_1(self):
return self._critic_1
@critic_1.setter
def critic_1(self, model: torch.nn.Module):
self._critic_1, self.critic_1_optimizer = self.set_model(model, self.critic_lr)
self.critic_1_targ = copy.deepcopy(self._critic_1)
@property
def critic_2(self):
return self._critic_2
@critic_2.setter
def critic_2(self, model: torch.nn.Module):
self._critic_2, self.critic_2_optimizer = self.set_model(model, self.critic_lr)
self.critic_2_targ = copy.deepcopy(self._critic_2)
def compute_qfunc_targ(
self, reward: torch.Tensor, next_obs: torch.Tensor, done: torch.Tensor
):
"""Compute targets for Q-functions
Args:
reward (torch.Tensor): batch of rewards
next_obs (torch.Tensor): batch of next observations
done (torch.Tensor): batch of done
Returns:
torch.Tensor: Q-function targets for the batch
"""
with torch.no_grad():
sampled_next_action, sampled_next_logprob = self._actor(next_obs)
q1_target = self.critic_1_targ(next_obs, sampled_next_action)
q2_target = self.critic_2_targ(next_obs, sampled_next_action)
q_target = torch.min(q1_target, q2_target)
qfunc_target = reward + self.gamma * (1 - done) * (
q_target - self.alpha * sampled_next_logprob
)
return qfunc_target
def compute_pi_loss(
self,
obs: torch.Tensor,
sampled_action: torch.Tensor,
sampled_logprob: torch.Tensor,
):
"""Loss for the policy
Args:
obs (torch.Tensor): batch of observations
sampled_action (torch.Tensor): actions sampled from policy
sampled_logprob (torch.Tensor): log-probabilities of actions
Returns:
torch.Tensor: policy loss
"""
q1 = self._critic_1(obs, sampled_action)
q2 = self._critic_2(obs, sampled_action)
q = torch.min(q1, q2)
loss = (self.alpha * sampled_logprob - q).mean()
return loss
def update_target_q(self):
"""Update target networks with Polyak averaging
"""
with torch.no_grad():
# Polyak averaging:
critics_params = chain(
self._critic_1.parameters(), self._critic_2.parameters()
)
targets_params = chain(
self.critic_1_targ.parameters(), self.critic_2_targ.parameters()
)
for q_params, targ_params in zip(critics_params, targets_params):
targ_params.data.mul_(1 - self.tau)
targ_params.data.add_((self.tau) * q_params.data)
def compute_alpha_loss(self, sampled_logprob: torch.Tensor):
"""Compute loss for temperature update
Args:
sampled_logprob (torch.Tensor): batch of sampled log-probabilities
from the actor
Returns:
torch.Tensor: loss for temperature (alpha)
"""
# alpha_loss = (
# self.log_alpha * (-sampled_logprob.detach() - self.target_entropy)
# ).mean()
sampled_logprob = sampled_logprob.detach()
alpha_loss = self.log_alpha.exp() * (-sampled_logprob - self.target_entropy)
return alpha_loss.mean()
def update(
self,
obs: torch.Tensor,
next_obs: torch.Tensor,
action: torch.Tensor,
reward: torch.Tensor,
done: torch.Tensor,
):
"""Soft Actor-Critic update:
Args:
obs (torch.Tensor): observations tensor
next_obs (torch.Tensor): next observations tensor
action (torch.Tensor): actions tensor
reward (torch.Tensor): rewards tensor
done (torch.Tensor): dones tensor
"""
y = self.compute_qfunc_targ(reward, next_obs, done)
# Update Q-functions by one step
y_q1 = self._critic_1(obs, action)
loss_q1 = F.mse_loss(y_q1, y)
y_q2 = self._critic_2(obs, action)
loss_q2 = F.mse_loss(y_q2, y)
self.loss["critic_1"] = loss_q1.item()
self.loss["critic_2"] = loss_q2.item()
self.critic_1_optimizer.zero_grad()
loss_q1.backward()
self.critic_1_optimizer.step()
self.critic_2_optimizer.zero_grad()
loss_q2.backward()
self.critic_2_optimizer.step()
# Update policy by one step
self._critic_1.eval()
self._critic_2.eval()
sampled_action, sampled_logprob = self._actor(obs)
# if self.stats_logger.frames % (self.update_freq * self.pi_update_freq) == 0:
loss = self.compute_pi_loss(obs, sampled_action, sampled_logprob)
self.loss["actor"] = loss.item()
self.actor_optimizer.zero_grad()
loss.backward()
self.actor_optimizer.step()
# Update target networks
self.update_target_q()
self._critic_1.train()
self._critic_2.train()
# Update temperature
alpha_loss = self.compute_alpha_loss(sampled_logprob)
self.alpha_opt.zero_grad()
alpha_loss.backward()
self.alpha_opt.step()
self.alpha = self.log_alpha.exp().item()
def add_tensorboard_logs(self, *args, **kwargs):
super().add_tensorboard_logs(*args, **kwargs)
if self.debug_mode:
self.tensorboard_writer.log_sac_alpha(self.iteration, self.alpha)
def collect_params_dict(self):
params_dict = {}
params_dict["actor"] = self.actor.state_dict()
params_dict["critic_1"] = self.critic_1.state_dict()
params_dict["critic_2"] = self.critic_2.state_dict()
params_dict["obs_mean"] = self.replay_buffer.obs_mean
params_dict["obs_std"] = self.replay_buffer.obs_std
return params_dict
def apply_params_dict(self, params_dict):
self.actor.load_state_dict(params_dict["actor"])
self.critic_1.load_state_dict(params_dict["critic_1"])
self.critic_2.load_state_dict(params_dict["critic_2"])
self.replay_buffer.obs_mean = params_dict["obs_mean"]
self.replay_buffer.obs_std = params_dict["obs_std"]
def save_model(self, save_path=None) -> str:
if self.filename is None and save_path is None:
raise AttributeError
elif save_path is None:
save_path = str(self.log_path)
torch.save(self._actor.state_dict(), save_path + "_actor_model.pt")
torch.save(self._critic_1.state_dict(), save_path + "_critic_1_model.pt")
torch.save(self._critic_2.state_dict(), save_path + "_critic_2_model.pt")
return save_path
if __name__ == "__main__":
with torch.cuda.device(1):
model = SAC(
env_name="HalfCheetah-v2",
iterations=200,
gamma=0.99,
batch_size=1000,
stats_freq=5,
test_episodes=2,
update_batch_size=100,
update_freq=50,
grad_steps=50,
# random_frames=10000,
use_gpu=True,
obs_norm=False,
tensorboard_dir="logs_norm",
tensorboard_comment="",
)
model.train()
|
the-stack_0_19760 | from typing import List
class Solution:
def maxArea(self, h: int, w: int, horizontalCuts: List[int], verticalCuts: List[int]) -> int:
horizontalCuts.sort()
verticalCuts.sort()
horizontalCuts = [0] + horizontalCuts + [h]
verticalCuts = [0] + verticalCuts + [w]
maxH = 0
for i in range(len(horizontalCuts)-1):
maxH = max(maxH, horizontalCuts[i+1]-horizontalCuts[i])
maxW = 0
for i in range(len(verticalCuts)-1):
maxW = max(maxW, verticalCuts[i+1]-verticalCuts[i])
return maxH * maxW % 1000000007
if __name__ == "__main__":
h = 5
w = 4
horizontalCuts = [3]
verticalCuts = [3]
s = Solution()
result = s.maxArea(h, w, horizontalCuts, verticalCuts)
print(result)
|
the-stack_0_19761 | from __future__ import print_function, unicode_literals
import io
import os
# Paths
SURICATA_DIR = '/opt/suricata/'
SURICATA_CONFIG_FILE = os.path.join(SURICATA_DIR, 'suricata.yaml')
IFACE_CONFIG_FILE = os.path.join(SURICATA_DIR, 'af-packet.yaml')
ADDRESS_CONFIG_FILE = os.path.join(SURICATA_DIR, 'address-groups.yaml')
# Interface configuration template
IFACE_CONFIG_TEMPLATE = """\
- interface: {iface:}
threads: 1
cluster-id: {cluster_id:}
cluster-type: cluster_flow
defrag: yes
use-mmap: yes
"""
ADDRESS_CONFIG_TEMPLATE = """\
HOME_NET: "{home_net:}"
EXTERNAL_NET: "!$HOME_NET"
HTTP_SERVERS: "$HOME_NET"
SMTP_SERVERS: "$HOME_NET"
SQL_SERVERS: "$HOME_NET"
DNS_SERVERS: "$HOME_NET"
TELNET_SERVERS: "$HOME_NET"
AIM_SERVERS: "$EXTERNAL_NET"
DNP3_SERVER: "$HOME_NET"
DNP3_CLIENT: "$HOME_NET"
MODBUS_CLIENT: "$HOME_NET"
MODBUS_SERVER: "$HOME_NET"
ENIP_CLIENT: "$HOME_NET"
ENIP_SERVER: "$HOME_NET"
"""
# Set the interfaces
env_ifaces = os.getenv('OBSRVBL_PNA_IFACES')
if env_ifaces:
all_ifaces = sorted(env_ifaces.split())
else:
all_ifaces = sorted(os.listdir('/sys/class/net/'))
with io.open(IFACE_CONFIG_FILE, 'wt') as outfile:
print('%YAML 1.1', file=outfile)
print('---', file=outfile)
print('af-packet:', file=outfile)
for i, iface in enumerate(all_ifaces):
if iface.startswith('lo'):
continue
iface_config = IFACE_CONFIG_TEMPLATE.format(
iface=iface,
cluster_id=99 - i,
)
print(iface_config, file=outfile)
# Set the home networks
env_networks = os.getenv(
'OBSRVBL_NETWORKS', '10.0.0.0/8 172.16.0.0/12 192.168.0.0/16'
)
home_net = '[{}]'.format(','.join(env_networks.split()))
with io.open(ADDRESS_CONFIG_FILE, 'wt') as outfile:
print('%YAML 1.1', file=outfile)
print('---', file=outfile)
address_config = ADDRESS_CONFIG_TEMPLATE.format(home_net=home_net)
print(address_config, file=outfile)
|
the-stack_0_19762 | from typing import Tuple
from starlette.datastructures import Scope
from starlette.routing import Match, Route, URLPath
from .locale import get_locale
class LocaleRoute(Route):
def matches(self, scope: Scope) -> Tuple[Match, Scope]:
if scope["type"] != "http": # pragma: no cover
# Eg hot-reload WebSocket.
return super().matches(scope)
path = scope["path"]
assert "language" in scope, "LocaleMiddleware is not installed"
language = scope["language"]
# Let route match regardless of language prefix.
# Example: a request to "/fr/xyz" should match `LocaleRoute("/xyz")`.
language_prefix = f"/{language}"
if not path.startswith(language_prefix):
# Unknown languages are passed through and result
# in a "no-match", as expected.
return super().matches(scope)
# Don't normalize in-place to avoid interferring with
# matching of other routes.
normalized_scope = {**scope, "path": path[len(language_prefix) :]}
return super().matches(normalized_scope)
def url_path_for(self, name: str, **path_params: str) -> URLPath:
# Allow `url_for(..., language=...)`
language = path_params.pop("language", None)
url_path = super().url_path_for(name, **path_params)
# Default to active language if not passed as path param.
if language is None:
locale = get_locale()
language = locale.language
# Prepend language prefix
# Eg given LocaleRoute("/", name="home") and language="fr",
# url_for('home') would return "/fr/" (rather than "/").
language_prefix = f"/{language}"
assert not url_path.startswith(language_prefix)
return URLPath(f"{language_prefix}{url_path}")
|
the-stack_0_19764 | import unittest
import os
from pyiron.base.project.generic import Project
class TestGenericJob(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.file_location = os.path.dirname(os.path.abspath(__file__))
cls.project = Project(os.path.join(cls.file_location, 'jobs_testing'))
@classmethod
def tearDownClass(cls):
file_location = os.path.dirname(os.path.abspath(__file__))
project = Project(os.path.join(file_location, 'jobs_testing'))
project.remove(enforce=True)
# def test_generic_jobs(self):
# ham = self.project.create_job("ExampleJob", "job_single")
# job_ser = self.project.create_job("GenericMaster", "job_list")
# job_ser.append(ham)
# job_ser.to_hdf()
# job_ser_reload = self.project.create_job("GenericMaster", "job_list")
# job_ser_reload.from_hdf()
# self.assertTrue(job_ser_reload['job_single/input/input_inp'])
# job_ser.remove()
# ham.remove()
#
# def test_generic_jobs_ex(self):
# ham = self.project.create_job("ExampleJob", "job_single_ex")
# ham.to_hdf()
# job_ser = self.project.create_job("GenericMaster", "job_list_ex")
# job_ser.append(ham)
# job_ser.to_hdf()
# self.assertTrue(job_ser['job_single_ex/input/input_inp'])
# job_ser_reload = self.project.create_job("GenericMaster", "job_list_ex")
# job_ser_reload.from_hdf()
# self.assertTrue(job_ser_reload['job_single_ex/input/input_inp'])
# job_ser.remove()
# ham.remove()
if __name__ == '__main__':
unittest.main()
|
the-stack_0_19765 | import datetime
import io
import json
import logging
import os
import sys
import threading
import apscheduler.schedulers.background
import bs4
import colorama
import flask
import flask_cors
import googleapiclient
import requests
import src.functions.config
import src.functions.credentials
import src.functions.metadata
import src.functions.tests
colorama.init()
print(
"====================================================\n\033[96m libDrive - v1.4.7\033[94m\n @eliasbenb\033[0m\n====================================================\n"
)
print("\033[32mREADING CONFIG...\033[0m")
if os.getenv("LIBDRIVE_CONFIG"):
config_str = os.getenv("LIBDRIVE_CONFIG")
with open("config.json", "w+") as w:
json.dump(obj=json.loads(config_str), fp=w, sort_keys=True, indent=4)
config = src.functions.config.readConfig()
print("DONE.\n")
print("\033[32mREADING METADATA...\033[0m")
metadata = src.functions.metadata.readMetadata(config)
if os.getenv("LIBDRIVE_CLOUD") and config.get("refresh_token"):
config, drive = src.functions.credentials.refreshCredentials(config)
params = {
"supportsAllDrives": True,
"includeItemsFromAllDrives": True,
"fields": "files(id,name)",
"q": "'%s' in parents and trashed = false and mimeType = 'application/json'"
% (os.getenv("LIBDRIVE_CLOUD")),
}
files = drive.files().list(**params).execute()["files"]
config_file = next((i for i in files if i["name"] == "config.json"), None)
metadata_file = next((i for i in files if i["name"] == "metadata.json"), None)
if config_file:
request = drive.files().get_media(fileId=config_file["id"])
fh = io.BytesIO()
downloader = googleapiclient.http.MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
config = json.loads(fh.getvalue())
config, drive = src.functions.credentials.refreshCredentials(config)
src.functions.config.updateConfig(config)
if metadata_file:
request = drive.files().get_media(fileId=metadata_file["id"])
fh = io.BytesIO()
downloader = googleapiclient.http.MediaIoBaseDownload(fh, request)
done = False
while done is False:
status, done = downloader.next_chunk()
metadata = json.loads(fh.getvalue())
with open("metadata.json", "w+") as w:
json.dump(metadata, w)
print("DONE.\n")
if not config.get("account_list"):
config["account_list"] = []
if config.get("account_list") == [] and config.get("signup") == False:
config["auth"] = False
if not config.get("auth"):
config["auth"] = False
if not config.get("build_interval"):
config["build_interval"] = 360
if not config.get("build_type"):
config["build_type"] = "hybrid"
if not config.get("category_list"):
config["category_list"] = []
if not config.get("cloudflare"):
config["cloudflare"] = ""
if not config.get("prefer_mkv"):
config["prefer_mkv"] = False
if not config.get("prefer_mp4"):
config["prefer_mp4"] = True
if not config.get("service_accounts"):
config["service_accounts"] = []
if not config.get("signup"):
config["signup"] = False
if not config.get("subtitles"):
config["subtitles"] = False
if not config.get("transcoded"):
config["transcoded"] = False
if not config.get("ui_config"):
config["ui_config"] = {}
with open("config.json", "w+") as w:
json.dump(obj=config, fp=w, sort_keys=True, indent=4)
print("\033[32mTESTING YOUR CONFIG...\033[0m")
src.functions.tests.tmdb_test(config)
src.functions.tests.category_list_test(config)
src.functions.tests.account_list_test(config)
src.functions.tests.cloudflare_test(config)
print("DONE.\n")
def threaded_metadata():
for thread in threading.enumerate():
if thread.name == "metadata_thread":
print("DONE.\n")
return (
{
"code": 500,
"content": None,
"message": "libDrive is already building metadata, please wait.",
"success": False,
},
500,
)
config = src.functions.config.readConfig()
if len(config.get("category_list")) > 0:
metadata_thread = threading.Thread(
target=src.functions.metadata.writeMetadata,
args=(config,),
daemon=True,
name="metadata_thread",
)
metadata_thread.start()
else:
with open("./metadata.json", "w+") as w:
w.write(json.dumps([]))
return (
{
"code": 200,
"content": None,
"message": "libDrive is building your new metadata.",
"success": True,
},
200,
)
def create_app():
if os.path.exists("./build"):
LIBDRIVE_DEBUG = os.getenv("LIBDRIVE_DEBUG")
if LIBDRIVE_DEBUG:
if LIBDRIVE_DEBUG.lower() == "true":
LIBDRIVE_DEBUG = True
else:
LIBDRIVE_DEBUG = False
else:
LIBDRIVE_DEBUG = False
r = open("./build/index.html", "r")
soup = bs4.BeautifulSoup(r.read(), features="html.parser")
if config.get("ui_config", {}).get("icon"):
try:
soup.find("meta", {"id": "@ld-meta-og-image"})["content"] = config.get(
"ui_config", {}
).get("icon")
except:
pass
try:
soup.find("link", {"id": "@ld-link-icon"})["href"] = config.get(
"ui_config", {}
).get("icon")
except:
pass
else:
try:
soup.find("meta", {"id": "@ld-meta-og-image"})[
"content"
] = "/images/icons/icon-512x512.png"
except:
pass
try:
soup.find("link", {"id": "@ld-link-icon"})["href"] = "/favicon.ico"
except:
pass
if config.get("ui_config", {}).get("title"):
try:
soup.find("meta", {"id": "@ld-meta-og-title"})["content"] = config.get(
"ui_config", {}
).get("title")
except:
pass
try:
soup.find("meta", {"id": "@ld-meta-og-site_name"})[
"content"
] = config.get("ui_config", {}).get("title")
except:
pass
try:
soup.find("title", {"id": "@ld-title"}).string = config.get(
"ui_config", {}
).get("title")
except:
pass
else:
try:
soup.find("meta", {"id": "@ld-meta-og-title"})["content"] = "libDrive"
except:
pass
try:
soup.find("meta", {"id": "@ld-meta-og-site_name"})[
"content"
] = "libDrive"
except:
pass
try:
soup.find("title", {"id": "@ld-title"}).string = "libDrive"
except:
pass
if (
config.get("arcio")
and config.get("arcio") != ""
and LIBDRIVE_DEBUG == False
):
req = requests.get("https://arc.io/arc-sw.js")
with open("./build/arc-sw.js", "wb") as wb:
wb.write(req.content)
code = config.get("arcio")
if code == "dev":
code = "tUUqUjhw"
soup.find("script", {"id": "@ld-script-arcio"})[
"src"
] = "//arc.io/widget.min.js#%s" % (code)
else:
if os.path.exists("./build/arc-sw.js"):
os.remove("./build/arc-sw.js")
soup.find("script", {"id": "@ld-script-arcio"})["src"] = ""
with open("./build/index.html", "w+") as w:
w.write(str(soup))
r.close()
app = flask.Flask(__name__, static_folder="build")
build_interval = config.get("build_interval")
if not build_interval:
build_interval = 360
if build_interval != 0:
print("\033[32mCREATING CRON JOB...\033[0m")
sched = apscheduler.schedulers.background.BackgroundScheduler(daemon=True)
sched.add_job(
threaded_metadata,
"interval",
minutes=build_interval,
)
sched.start()
print("DONE.\n")
config_categories = [d["id"] for d in config["category_list"]]
metadata_categories = [d["id"] for d in metadata]
if len(metadata) > 0 and sorted(config_categories) == sorted(metadata_categories):
if build_interval == 0:
return app
elif datetime.datetime.utcnow() <= datetime.datetime.strptime(
metadata[-1]["buildTime"], "%Y-%m-%d %H:%M:%S.%f"
) + datetime.timedelta(minutes=build_interval):
return app
else:
threaded_metadata()
else:
threaded_metadata()
return app
app = create_app()
flask_cors.CORS(app)
app.secret_key = config.get("secret_key")
from src.routes.auth import authBP
from src.routes.config import configBP
from src.routes.debug import debugBP
from src.routes.download import downloadBP
from src.routes.environment import environmentBP
from src.routes.image import imageBP
from src.routes.metadata import metadataBP
from src.routes.ping import pingBP
from src.routes.rebuild import rebuildBP
from src.routes.redirectdownload import redirectdownloadBP
from src.routes.restart import restartBP
from src.routes.signup import signupBP
from src.routes.streammap import streammapBP
from src.routes.subtitledownload import subtitledownloadBP
from src.routes.trailer import trailerBP
app.register_blueprint(authBP)
app.register_blueprint(configBP)
app.register_blueprint(debugBP)
app.register_blueprint(downloadBP)
app.register_blueprint(environmentBP)
app.register_blueprint(imageBP)
app.register_blueprint(metadataBP)
app.register_blueprint(pingBP)
app.register_blueprint(rebuildBP)
app.register_blueprint(redirectdownloadBP)
app.register_blueprint(restartBP)
app.register_blueprint(signupBP)
app.register_blueprint(streammapBP)
app.register_blueprint(subtitledownloadBP)
app.register_blueprint(trailerBP)
@app.route("/", defaults={"path": ""})
@app.route("/<path:path>")
async def serve(path):
if (path != "") and os.path.exists("%s/%s" % (app.static_folder, path)):
return flask.send_from_directory(app.static_folder, path)
else:
return flask.send_from_directory(app.static_folder, "index.html")
if __name__ == "__main__":
print("\033[32mSERVING SERVER...\033[0m")
LIBDRIVE_DEBUG = os.getenv("LIBDRIVE_DEBUG")
if LIBDRIVE_DEBUG:
if LIBDRIVE_DEBUG.lower() == "true":
LIBDRIVE_DEBUG = True
else:
LIBDRIVE_DEBUG = False
else:
LIBDRIVE_DEBUG = False
print("DONE.\n")
app.run(
host="0.0.0.0",
port=9999,
threaded=True,
debug=LIBDRIVE_DEBUG,
)
else:
print("\033[32mINITIALIZING LOGGER...\033[0m")
if not os.path.exists("./logs"):
os.mkdir("./logs")
logs_path = os.path.abspath("./logs")
logs_max_files = 5
def sorted_ls(path):
mtime = lambda f: os.stat(os.path.join(path, f)).st_mtime
return list(sorted(os.listdir(path), key=mtime))
del_list = sorted_ls(logs_path)[0 : (len(sorted_ls(logs_path)) - logs_max_files)]
for del_file in del_list:
try:
os.remove(os.path.join(logs_path, del_file))
except:
pass
logging.getLogger("googleapiclient").setLevel(logging.WARNING)
logging.getLogger("oauth2client").setLevel(logging.WARNING)
logging.getLogger("waitress").setLevel(logging.INFO)
logging.basicConfig(
filename="./logs/%s.log"
% (datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S")),
level=logging.INFO,
)
console_logger = logging.getLogger()
console_logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
console_logger.addHandler(console_handler)
print("DONE.\n")
|
the-stack_0_19767 | # -*-coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import os
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
from libs.networks import resnet, resnet_gluoncv, mobilenet_v2, xception
from libs.box_utils import anchor_utils, generate_anchors, generate_rotate_anchors
from libs.configs import cfgs
from libs.losses import losses_dcl, losses
from libs.box_utils import show_box_in_tensor
from libs.detection_oprations.proposal_opr_dcl import postprocess_detctions
from libs.detection_oprations.anchor_target_layer_without_boxweight_dcl import anchor_target_layer
from help_utils.densely_coded_label import get_code_len
class DetectionNetwork(object):
def __init__(self, base_network_name, is_training):
self.base_network_name = base_network_name
self.is_training = is_training
if cfgs.METHOD == 'H':
self.num_anchors_per_location = len(cfgs.ANCHOR_SCALES) * len(cfgs.ANCHOR_RATIOS)
else:
self.num_anchors_per_location = len(cfgs.ANCHOR_SCALES) * len(cfgs.ANCHOR_RATIOS) * len(cfgs.ANCHOR_ANGLES)
self.method = cfgs.METHOD
self.losses_dict = {}
self.coding_len = get_code_len(int(cfgs.ANGLE_RANGE / cfgs.OMEGA), mode=cfgs.ANGLE_MODE)
def build_base_network(self, input_img_batch):
if self.base_network_name.startswith('resnet_v1'):
return resnet.resnet_base(input_img_batch, scope_name=self.base_network_name, is_training=self.is_training)
elif self.base_network_name in ['resnet152_v1d', 'resnet101_v1d', 'resnet50_v1d']:
return resnet_gluoncv.resnet_base(input_img_batch, scope_name=self.base_network_name,
is_training=self.is_training)
elif self.base_network_name.startswith('MobilenetV2'):
return mobilenet_v2.mobilenetv2_base(input_img_batch, is_training=self.is_training)
elif self.base_network_name.startswith('xception'):
return xception.xception_base(input_img_batch, is_training=self.is_training)
else:
raise ValueError('Sry, we only support resnet, mobilenet_v2 and xception')
def rpn_cls_net(self, inputs, scope_list, reuse_flag, level):
rpn_conv2d_3x3 = inputs
for i in range(4):
rpn_conv2d_3x3 = slim.conv2d(inputs=rpn_conv2d_3x3,
num_outputs=256,
kernel_size=[3, 3],
stride=1,
activation_fn=tf.nn.relu,
weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,
biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER,
scope='{}_{}'.format(scope_list[0], i),
reuse=reuse_flag)
rpn_box_scores = slim.conv2d(rpn_conv2d_3x3,
num_outputs=cfgs.CLASS_NUM * self.num_anchors_per_location,
kernel_size=[3, 3],
stride=1,
weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,
biases_initializer=cfgs.FINAL_CONV_BIAS_INITIALIZER,
scope=scope_list[2],
activation_fn=None,
reuse=reuse_flag)
rpn_box_scores = tf.reshape(rpn_box_scores, [-1, cfgs.CLASS_NUM],
name='rpn_{}_classification_reshape'.format(level))
rpn_box_probs = tf.sigmoid(rpn_box_scores, name='rpn_{}_classification_sigmoid'.format(level))
return rpn_box_scores, rpn_box_probs
def rpn_reg_net(self, inputs, scope_list, reuse_flag, level):
rpn_conv2d_3x3 = inputs
for i in range(4):
rpn_conv2d_3x3 = slim.conv2d(inputs=rpn_conv2d_3x3,
num_outputs=256,
kernel_size=[3, 3],
weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,
biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER,
stride=1,
activation_fn=tf.nn.relu,
scope='{}_{}'.format(scope_list[1], i),
reuse=reuse_flag)
rpn_delta_boxes = slim.conv2d(rpn_conv2d_3x3,
num_outputs=4 * self.num_anchors_per_location,
kernel_size=[3, 3],
stride=1,
weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,
biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER,
scope=scope_list[3],
activation_fn=None,
reuse=reuse_flag)
rpn_angle_cls = slim.conv2d(rpn_conv2d_3x3,
num_outputs=self.coding_len * self.num_anchors_per_location,
kernel_size=[3, 3],
stride=1,
weights_initializer=cfgs.SUBNETS_WEIGHTS_INITIALIZER,
biases_initializer=cfgs.SUBNETS_BIAS_INITIALIZER,
scope=scope_list[4],
activation_fn=None,
reuse=reuse_flag)
rpn_delta_boxes = tf.reshape(rpn_delta_boxes, [-1, 4],
name='rpn_{}_regression_reshape'.format(level))
rpn_angle_cls = tf.reshape(rpn_angle_cls, [-1, self.coding_len],
name='rpn_{}_angle_cls_reshape'.format(level))
return rpn_delta_boxes, rpn_angle_cls
def rpn_net(self, feature_pyramid):
rpn_delta_boxes_list = []
rpn_scores_list = []
rpn_probs_list = []
rpn_angle_cls_list = []
with tf.variable_scope('rpn_net'):
with slim.arg_scope([slim.conv2d], weights_regularizer=slim.l2_regularizer(cfgs.WEIGHT_DECAY)):
for level in cfgs.LEVEL:
if cfgs.SHARE_NET:
reuse_flag = None if level == cfgs.LEVEL[0] else True
scope_list = ['conv2d_3x3_cls', 'conv2d_3x3_reg', 'rpn_classification',
'rpn_regression', 'rpn_angle_cls']
else:
reuse_flag = None
scope_list = ['conv2d_3x3_cls_' + level, 'conv2d_3x3_reg_' + level,
'rpn_classification_' + level, 'rpn_regression_' + level,
'rpn_angle_cls_' + level]
rpn_box_scores, rpn_box_probs = self.rpn_cls_net(feature_pyramid[level], scope_list, reuse_flag, level)
rpn_delta_boxes, rpn_angle_cls = self.rpn_reg_net(feature_pyramid[level], scope_list, reuse_flag,
level)
rpn_scores_list.append(rpn_box_scores)
rpn_probs_list.append(rpn_box_probs)
rpn_delta_boxes_list.append(rpn_delta_boxes)
rpn_angle_cls_list.append(rpn_angle_cls)
rpn_all_delta_boxes = tf.concat(rpn_delta_boxes_list, axis=0)
rpn_all_boxes_scores = tf.concat(rpn_scores_list, axis=0)
rpn_all_boxes_probs = tf.concat(rpn_probs_list, axis=0)
rpn_angle_cls = tf.concat(rpn_angle_cls_list, axis=0)
return rpn_all_delta_boxes, rpn_all_boxes_scores, rpn_all_boxes_probs, rpn_angle_cls
def make_anchors(self, feature_pyramid):
with tf.variable_scope('make_anchors'):
anchor_list = []
level_list = cfgs.LEVEL
with tf.name_scope('make_anchors_all_level'):
for level, base_anchor_size, stride in zip(level_list, cfgs.BASE_ANCHOR_SIZE_LIST, cfgs.ANCHOR_STRIDE):
'''
(level, base_anchor_size) tuple:
(P3, 32), (P4, 64), (P5, 128), (P6, 256), (P7, 512)
'''
featuremap_height, featuremap_width = tf.shape(feature_pyramid[level])[1], \
tf.shape(feature_pyramid[level])[2]
featuremap_height = tf.cast(featuremap_height, tf.float32)
featuremap_width = tf.cast(featuremap_width, tf.float32)
# tmp_anchors = anchor_utils.make_anchors(base_anchor_size=base_anchor_size,
# anchor_scales=cfgs.ANCHOR_SCALES,
# anchor_ratios=cfgs.ANCHOR_RATIOS,
# featuremap_height=featuremap_height,
# featuremap_width=featuremap_width,
# stride=stride,
# name='make_anchors_{}'.format(level))
if self.method == 'H':
tmp_anchors = tf.py_func(generate_anchors.generate_anchors_pre,
inp=[featuremap_height, featuremap_width, stride,
np.array(cfgs.ANCHOR_SCALES) * stride, cfgs.ANCHOR_RATIOS, 4.0],
Tout=[tf.float32])
tmp_anchors = tf.reshape(tmp_anchors, [-1, 4])
else:
tmp_anchors = generate_rotate_anchors.make_anchors(base_anchor_size=base_anchor_size,
anchor_scales=cfgs.ANCHOR_SCALES,
anchor_ratios=cfgs.ANCHOR_RATIOS,
anchor_angles=cfgs.ANCHOR_ANGLES,
featuremap_height=featuremap_height,
featuremap_width=featuremap_width,
stride=stride)
tmp_anchors = tf.reshape(tmp_anchors, [-1, 5])
anchor_list.append(tmp_anchors)
all_level_anchors = tf.concat(anchor_list, axis=0)
return all_level_anchors
def add_anchor_img_smry(self, img, anchors, labels, method):
positive_anchor_indices = tf.reshape(tf.where(tf.greater_equal(labels, 1)), [-1])
# negative_anchor_indices = tf.reshape(tf.where(tf.equal(labels, 0)), [-1])
positive_anchor = tf.gather(anchors, positive_anchor_indices)
# negative_anchor = tf.gather(anchors, negative_anchor_indices)
pos_in_img = show_box_in_tensor.only_draw_boxes(img_batch=img,
boxes=positive_anchor,
method=method)
# neg_in_img = show_box_in_tensor.only_draw_boxes(img_batch=img,
# boxes=negative_anchor)
tf.summary.image('positive_anchor', pos_in_img)
# tf.summary.image('negative_anchors', neg_in_img)
def build_whole_detection_network(self, input_img_batch, gtboxes_batch_h, gtboxes_batch_r, gt_encode_label, gpu_id=0):
if self.is_training:
gtboxes_batch_h = tf.reshape(gtboxes_batch_h, [-1, 5])
gtboxes_batch_h = tf.cast(gtboxes_batch_h, tf.float32)
gtboxes_batch_r = tf.reshape(gtboxes_batch_r, [-1, 6])
gtboxes_batch_r = tf.cast(gtboxes_batch_r, tf.float32)
gt_encode_label = tf.reshape(gt_encode_label, [-1, self.coding_len])
gt_encode_label = tf.cast(gt_encode_label, tf.float32)
# 1. build base network
feature_pyramid = self.build_base_network(input_img_batch)
# 2. build rpn
rpn_box_pred, rpn_cls_score, rpn_cls_prob, rpn_angle_cls = self.rpn_net(feature_pyramid)
# 3. generate_anchors
anchors = self.make_anchors(feature_pyramid)
# 4. postprocess rpn proposals. such as: decode, clip, filter
if self.is_training:
with tf.variable_scope('build_loss'):
labels, target_delta, anchor_states, target_boxes, target_encode_label = tf.py_func(
func=anchor_target_layer,
inp=[gtboxes_batch_h, gtboxes_batch_r,
gt_encode_label, anchors, gpu_id],
Tout=[tf.float32, tf.float32, tf.float32,
tf.float32, tf.float32])
if self.method == 'H':
self.add_anchor_img_smry(input_img_batch, anchors, anchor_states, 0)
else:
self.add_anchor_img_smry(input_img_batch, anchors, anchor_states, 1)
cls_loss = losses.focal_loss(labels, rpn_cls_score, anchor_states)
reg_loss = losses.smooth_l1_loss(target_delta, rpn_box_pred, anchor_states)
# angle_cls_loss = losses_dcl.angle_cls_focal_loss(target_encode_label, rpn_angle_cls,
# anchor_states, decimal_weight=None)
angle_cls_loss = losses_dcl.angle_cls_period_focal_loss(target_encode_label, rpn_angle_cls,
anchor_states, target_boxes,
decimal_weight=cfgs.DATASET_NAME.startswith('DOTA'))
self.losses_dict = {'cls_loss': cls_loss * cfgs.CLS_WEIGHT,
'reg_loss': reg_loss * cfgs.REG_WEIGHT,
'angle_cls_loss': angle_cls_loss * cfgs.ANGLE_WEIGHT}
with tf.variable_scope('postprocess_detctions'):
scores, category, boxes_angle = postprocess_detctions(rpn_bbox_pred=rpn_box_pred,
rpn_cls_prob=rpn_cls_prob,
rpn_angle_prob=tf.sigmoid(rpn_angle_cls),
anchors=anchors,
is_training=self.is_training,
gpu_id=gpu_id)
# boxes = tf.stop_gradient(boxes)
scores = tf.stop_gradient(scores)
category = tf.stop_gradient(category)
boxes_angle = tf.stop_gradient(boxes_angle)
if self.is_training:
return scores, category, boxes_angle, self.losses_dict
else:
return scores, category, boxes_angle
def get_restorer(self):
checkpoint_path = tf.train.latest_checkpoint(os.path.join(cfgs.TRAINED_CKPT, cfgs.VERSION))
if checkpoint_path != None:
if cfgs.RESTORE_FROM_RPN:
print('___restore from rpn___')
model_variables = slim.get_model_variables()
restore_variables = [var for var in model_variables if not var.name.startswith('FastRCNN_Head')] + \
[slim.get_or_create_global_step()]
for var in restore_variables:
print(var.name)
restorer = tf.train.Saver(restore_variables)
else:
restorer = tf.train.Saver()
print("model restore from :", checkpoint_path)
else:
checkpoint_path = cfgs.PRETRAINED_CKPT
print("model restore from pretrained mode, path is :", checkpoint_path)
model_variables = slim.get_model_variables()
# for var in model_variables:
# print(var.name)
# print(20*"__++__++__")
def name_in_ckpt_rpn(var):
return var.op.name
def name_in_ckpt_fastrcnn_head(var):
'''
Fast-RCNN/resnet_v1_50/block4 -->resnet_v1_50/block4
Fast-RCNN/MobilenetV2/** -- > MobilenetV2 **
:param var:
:return:
'''
return '/'.join(var.op.name.split('/')[1:])
nameInCkpt_Var_dict = {}
for var in model_variables:
if var.name.startswith('Fast-RCNN/'+self.base_network_name): # +'/block4'
var_name_in_ckpt = name_in_ckpt_fastrcnn_head(var)
nameInCkpt_Var_dict[var_name_in_ckpt] = var
else:
if var.name.startswith(self.base_network_name):
var_name_in_ckpt = name_in_ckpt_rpn(var)
nameInCkpt_Var_dict[var_name_in_ckpt] = var
else:
continue
restore_variables = nameInCkpt_Var_dict
for key, item in restore_variables.items():
print("var_in_graph: ", item.name)
print("var_in_ckpt: ", key)
print(20*"___")
restorer = tf.train.Saver(restore_variables)
print(20 * "****")
print("restore from pretrained_weighs in IMAGE_NET")
return restorer, checkpoint_path
def get_gradients(self, optimizer, loss):
'''
:param optimizer:
:param loss:
:return:
return vars and grads that not be fixed
'''
# if cfgs.FIXED_BLOCKS > 0:
# trainable_vars = tf.trainable_variables()
# # trained_vars = slim.get_trainable_variables()
# start_names = [cfgs.NET_NAME + '/block%d'%i for i in range(1, cfgs.FIXED_BLOCKS+1)] + \
# [cfgs.NET_NAME + '/conv1']
# start_names = tuple(start_names)
# trained_var_list = []
# for var in trainable_vars:
# if not var.name.startswith(start_names):
# trained_var_list.append(var)
# # slim.learning.train()
# grads = optimizer.compute_gradients(loss, var_list=trained_var_list)
# return grads
# else:
# return optimizer.compute_gradients(loss)
return optimizer.compute_gradients(loss)
def enlarge_gradients_for_bias(self, gradients):
final_gradients = []
with tf.variable_scope("Gradient_Mult") as scope:
for grad, var in gradients:
scale = 1.0
if cfgs.MUTILPY_BIAS_GRADIENT and './biases' in var.name:
scale = scale * cfgs.MUTILPY_BIAS_GRADIENT
if not np.allclose(scale, 1.0):
grad = tf.multiply(grad, scale)
final_gradients.append((grad, var))
return final_gradients
|
the-stack_0_19768 | # -*- coding: utf-8 -*-
# _______ _______ _______ _______ _
# |\ /|( ___ )( ____ )( )( ___ )( ( /||\ /|
# | ) ( || ( ) || ( )|| () () || ( ) || \ ( |( \ / )
# | (___) || (___) || (____)|| || || || | | || \ | | \ (_) /
# | ___ || ___ || __)| |(_)| || | | || (\ \) | \ /
# | ( ) || ( ) || (\ ( | | | || | | || | \ | ) (
# | ) ( || ) ( || ) \ \__| ) ( || (___) || ) \ | | |
# |/ \||/ \||/ \__/|/ \|(_______)|/ )_) \_/
#
# Copyright (C) 2016 Laurynas Riliskis
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Created on 2/29/16.
import json
import os
from .utils.tools import get_file_names
from .app import Application
from .utils.logger import logger as Log
from .model import JsonRepresentation as Json, Models, DataModel
from .model import Action, ForeignKey, __ALL__FIELDS__, Constraint
from .builder.base import FileObject
class Generator:
def __init__(self, args):
self.args = args
self._files = get_file_names(args.path)
self.config_file_name = self._files[0] # fist one is config
self.models_file_names = self._files[1:] # the rest are models
Log.debug("Model to load: " + str(self._files))
self.app = Application(args.path, args.output, self.config_file_name)
self.tmpl_path = os.path.dirname(os.path.realpath(__file__)) + \
"/templates/"
self.output_dir = self.args.output
self.models = []
self.config = dict()
self.config['header'] = Application.__DEF__HEADER__
def load_models(self):
# load files
for mFile in self.models_file_names:
Log.debug("Model path: " + self.args.path + mFile)
model_name = mFile.split('.json')[0]
Log.debug("Model name: " + model_name)
with open(mFile, encoding='utf-8') as data_file:
json_model = json.loads(data_file.read())
Log.debug("json_model=" + str(json_model))
if not json_model:
Log.error("Can't load Json model")
raise ValueError("json error")
mode_documentation = json_model.get(Json.DOCUMENTATION)
model = DataModel(name=model_name, documentation=mode_documentation)
self.models.append(model)
json_fields = json_model.get(Json.FIELDS)
# create fields and add to the model
for field in json_fields:
try:
name = field[Json.NAME]
field_type = field[Json.TYPE]
except KeyError:
# we deliberately brake here if mandatory fields are not in
raise KeyError("Mandatory fields are not defines")
documentation = field.get(Json.DOCUMENTATION)
isIndex = field.get(Json.INDEX)
if not isIndex:
isIndex = False
isNullable = field.get(Json.NULLABLE)
if not isNullable:
isNullable = True
# TODO: supported for autoincreament
isAutoIncrement = False
if not isIndex:
isIndex = False
defaultValue = field.get(Json.DEFAULT_VALUE)
defaultValueLegacy = field.get(Json.DEFAULT_VALUE_LEGACY)
enumName = field.get(Json.ENUM_NAME)
enumValuesJson = field.get(Json.ENUM_VALUES)
# TODO: fix enums
enumValues = []
if enumValuesJson:
enumValues = []
for eVal in enumValuesJson:
pass
foreign_key_json = field.get(Json.FOREIGN_KEY)
foreign_key = None
isId = False
if foreign_key_json:
table = foreign_key_json.get(Json.FOREIGN_KEY_TABLE)
on_delete = Action.from_json_name(
Json.FOREIGN_KEY_ON_DELETE_ACTION)
foreign_key = ForeignKey(table, on_delete)
model.add_field(__ALL__FIELDS__.get(field_type)(
model, name, documentation, isId, isIndex,
isNullable, isAutoIncrement, defaultValue, enumName,
enumValues, foreign_key
)
)
# end field loop
# TODO check id fied creation
id_fields = json_model.get(Json.ID_FIELD)
id_field_name = "_id"
if id_fields:
if len(id_fields) != 1:
raise ValueError("Invalid number of idField ")
id_field_name = id_fields[0]
if "_id" == id_field_name:
name = id_field_name
id_field_obj = __ALL__FIELDS__.get("Long")(
model, name, "Primary key.", True, False,
False, True, None, None, None, None
)
model.add_id_field(id_field_obj)
else:
id_field_obj = model.get_field_by_name(id_field_name)
if not id_field_obj:
raise ValueError("No just ID field %s" % id_field_name)
if id_field_obj.type not in ["Integer", "Long", "Date", "Enum"]:
raise ValueError(
"ID field must be of type Integer, Long, Date or Enum")
if id_field_obj.is_nullable:
raise ValueError(
"ID Field %s can not be nullable" % id_field_name)
if not id_field_obj.is_index:
raise ValueError(
"ID Field %s must be index" % id_field_name)
id_field_obj.set_is_id()
# Constraints
constraints_json = json_model.get(Json.CONSTRAINTS)
if constraints_json:
for constrain in constraints_json:
Log.debbug("constraintJson=" + str(constrain))
name = constrain.get(Constraint.Json.NAME)
definition = constrain.get(Constraint.Json.DEFINITION)
model.add_constrain(Constraint(name, definition))
Models.add_model(model)
Log.debug("Model created")
Log.debug(model)
Log.debug('*' * 80)
def make_manifest(self):
tmpl_data = dict()
tmpl_data['config'] = self.app
tmpl_data['models'] = Models.ALL_DATA_MODELS
file_name = "provider_manifest_data.txt"
Log.info("Provider declaration to paste in the AndroidManifest.xml "
"file is located in the file: %s" % file_name)
template = FileObject(build_path=self.app.output_path,
file_name=file_name,
tmpl_path=self.tmpl_path,
tmpl_name='manifest.tmpl',
tmpl_data=tmpl_data
)
template.render_file()
def make_table_columns(self):
tmpl_data = dict()
tmpl_data['config'] = self.app
tmpl_data['all_models'] = Models.get_models()
for model in Models.get_models():
tmpl_data['model'] = model
template = FileObject(build_path=self.app.provider_dir +
model.name_lower_case + "/",
file_name=model.name_camel_case + "Columns.java",
tmpl_path=self.tmpl_path,
tmpl_name='columns.tmpl',
tmpl_data=tmpl_data
)
template.render_file()
def make_models(self):
tmpl_data = dict()
tmpl_data['config'] = self.app
tmpl_data['all_models'] = Models.get_models()
for model in Models.get_models():
tmpl_data['model'] = model
template = FileObject(build_path=self.app.provider_dir +
model.name_lower_case + "/",
file_name=model.name_camel_case +
"Model.java",
tmpl_path=self.tmpl_path,
tmpl_name='model.tmpl',
tmpl_data=tmpl_data
)
template.render_file()
def make_wrappers(self):
tmpl_data = dict()
tmpl_data['config'] = self.app
tmpl_data['all_models'] = Models.get_models()
out_dir = self.app.provider_dir + "base/"
# AbstractCursor
template = FileObject(build_path=out_dir,
file_name="AbstractCursor.java",
tmpl_path=self.tmpl_path,
tmpl_name='abstractcursor.tmpl',
tmpl_data=tmpl_data
)
template.render_file()
# AbstractContentValuesWrapper
template = FileObject(build_path=out_dir,
file_name="AbstractContentValues.java",
tmpl_path=self.tmpl_path,
tmpl_name='abstractcontentvalues.tmpl',
tmpl_data=tmpl_data
)
template.render_file()
# AbstractSelection
template = FileObject(build_path=out_dir,
file_name="AbstractSelection.java",
tmpl_path=self.tmpl_path,
tmpl_name='abstractselection.tmpl',
tmpl_data=tmpl_data
)
template.render_file()
# BaseContentProvider
template = FileObject(build_path=out_dir,
file_name="BaseContentProvider.java",
tmpl_path=self.tmpl_path,
tmpl_name='basecontentprovider.tmpl',
tmpl_data=tmpl_data
)
template.render_file()
# BaseModel
template = FileObject(build_path=out_dir,
file_name="BaseModel.java",
tmpl_path=self.tmpl_path,
tmpl_name='abstractmodel.tmpl',
tmpl_data=tmpl_data
)
template.render_file()
# models
for model in self.models:
# Cursor wrapper
model_name = model.name_camel_case
tmpl_data['model'] = model
out_dir = self.app.provider_dir + model.package_name + "/"
template = FileObject(build_path=out_dir,
file_name=model_name + "Cursor.java",
tmpl_path=self.tmpl_path,
tmpl_name='cursor.tmpl',
tmpl_data=tmpl_data
)
template.render_file()
# ContentValues wrapper
template = FileObject(build_path=out_dir,
file_name=model_name + "ContentValues.java",
tmpl_path=self.tmpl_path,
tmpl_name='contentvalues.tmpl',
tmpl_data=tmpl_data
)
template.render_file()
# Selection builder
template = FileObject(build_path=out_dir,
file_name=model_name + "Selection.java",
tmpl_path=self.tmpl_path,
tmpl_name='selection.tmpl',
tmpl_data=tmpl_data
)
template.render_file()
# enums appending to one file
for field in model.fields:
if field.is_enum:
tmpl_data['field'] = field
template = FileObject(build_path=out_dir,
file_name=field.enum_name + ".java",
tmpl_path=self.tmpl_path,
tmpl_name='enum.tmpl',
tmpl_data=tmpl_data
)
template.render_file()
def make_content_provider(self):
tmpl_data = dict()
tmpl_data['config'] = self.app
tmpl_data['models'] = Models.ALL_DATA_MODELS
template = FileObject(build_path=self.app.provider_dir,
file_name=self.app.PROVIDER_CLASS_NAME + ".java",
tmpl_path=self.tmpl_path,
tmpl_name='contentprovider.tmpl',
tmpl_data=tmpl_data
)
template.render_file()
def make_sqlite_open_helper(self):
tmpl_data = dict()
tmpl_data['config'] = self.app
tmpl_data['models'] = Models.ALL_DATA_MODELS
template = FileObject(build_path=self.app.provider_dir,
file_name=self.app.SQLITE_OPEN_HELPER_CLASS_NAME + ".java",
tmpl_path=self.tmpl_path,
tmpl_name='sqliteopenhelper.tmpl',
tmpl_data=tmpl_data
)
template.render_file()
def make_generate_sqlite_open_helper_callbacks(self):
tmpl_data = dict()
tmpl_data['config'] = self.app
tmpl_data['models'] = Models.ALL_DATA_MODELS
template = FileObject(build_path=self.app.provider_dir,
file_name=self.app.SQLITE_OPEN_HELPER_CALLBACKS_CLASS_NAME + ".java",
tmpl_path=self.tmpl_path,
tmpl_name='sqliteopenhelpercallbacks.tmpl',
tmpl_data=tmpl_data
)
template.render_file()
def make_model_representations(self):
pass
def make_model_representer(self):
pass
def make_model_change_listner(self):
pass
def go(self):
self.load_models()
self.make_table_columns()
self.make_table_columns()
self.make_models()
self.make_wrappers()
self.make_content_provider()
self.make_sqlite_open_helper()
self.make_generate_sqlite_open_helper_callbacks()
self.make_manifest()
self.make_model_representations()
self.make_model_representer()
self.make_model_change_listner()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.