max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
lldb/packages/Python/lldbsuite/test/commands/command/script/callables.py | medismailben/llvm-project | 2,338 | 12621457 |
import lldb
# bunch of different kinds of python callables that should
# all work as commands.
def check(debugger, command, context, result, internal_dict):
if (not isinstance(debugger, lldb.SBDebugger) or
not isinstance(command, str) or
not isinstance(result, lldb.SBCommandReturnObject) or
not isinstance(internal_dict, dict) or
(not context is None and
not isinstance(context, lldb.SBExecutionContext))):
raise Exception()
result.AppendMessage("All good.")
def vfoobar(*args):
check(*args)
def v5foobar(debugger, command, context, result, internal_dict, *args):
check(debugger, command, context, result, internal_dict)
def foobar(debugger, command, context, result, internal_dict):
check(debugger, command, context, result, internal_dict)
def foobar4(debugger, command, result, internal_dict):
check(debugger, command, None, result, internal_dict)
class FooBar:
@staticmethod
def sfoobar(debugger, command, context, result, internal_dict):
check(debugger, command, context, result, internal_dict)
@classmethod
def cfoobar(cls, debugger, command, context, result, internal_dict):
check(debugger, command, context, result, internal_dict)
def ifoobar(self, debugger, command, context, result, internal_dict):
check(debugger, command, context, result, internal_dict)
def __call__(self, debugger, command, context, result, internal_dict):
check(debugger, command, context, result, internal_dict)
@staticmethod
def sfoobar4(debugger, command, result, internal_dict):
check(debugger, command, None, result, internal_dict)
@classmethod
def cfoobar4(cls, debugger, command, result, internal_dict):
check(debugger, command, None, result, internal_dict)
def ifoobar4(self, debugger, command, result, internal_dict):
check(debugger, command, None, result, internal_dict)
class FooBar4:
def __call__(self, debugger, command, result, internal_dict):
check(debugger, command, None, result, internal_dict)
FooBarObj = FooBar()
FooBar4Obj = FooBar4() |
Packs/HealthCheck/Scripts/HealthCheckServerLog/HealthCheckServerLog.py | diCagri/content | 799 | 12621476 | import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
# noqa: F401
# noqa: F401
import re
def findOldestDate(incidentDate, newDate):
incidentDate = datetime.strptime(incidentDate, "%Y-%m-%d %H:%M:%S")
newDate = datetime.strptime(newDate, "%Y-%m-%d %H:%M:%S")
return min([incidentDate, newDate])
def findNewestDate(incidentDate, newDate):
incidentDate = datetime.strptime(incidentDate, "%Y-%m-%d %H:%M:%S")
newDate = datetime.strptime(newDate, "%Y-%m-%d %H:%M:%S")
return max([incidentDate, newDate])
context = demisto.context()
suggestions = []
knownerrors = [
{
"Got permission denied while trying to connect to the Docker daemon socket at unix:///var/run/docker.sock: Get": [
"Error Found: `Got permission denied while trying to connect to the Docker daemon socket at unix`",
"Please refer to https://knowledgebase.paloaltonetworks.com/KCSArticleDetail?id=kA14u000000HB4oCAG"
]
},
{
'[Errno 13] Permission denied:': [
'Error Found: `[Errno 13] Permission denied`',
"Please refer to https://knowledgebase.paloaltonetworks.com/KCSArticleDetail?id=kA14u000000HB4ZCAW"
]
},
{
'config.json: permission denied': [
'Error Found: `config.json: permission denied`',
"Please refer to https://knowledgebase.paloaltonetworks.com/KCSArticleDetail?id=kA14u000000HB4tCAG"
]
},
{
'Error response from daemon: OCI runtime create failed:': [
'Error Found: `Error response from daemon: OCI runtime create failed`',
"Please refer to https://knowledgebase.paloaltonetworks.com/KCSArticleDetail?id=kA14u000000HB4eCAG"
]
},
{
'proxyconnect tcp: tls: oversized record received with length 20527': [
'Error Found: `proxyconnect tcp: tls: oversized record received with length 20527`',
"Please refer to https://knowledgebase.paloaltonetworks.com/KCSArticleDetail?id=kA10g000000PNhpCAG"
]
},
{
"error: websocket: not a websocket handshake: 'upgrade' token not found in 'Connection' header": [
'Error Found: `websocket: not a websocket handshake: upgrade token not found in Connection header`',
"Please refer to https://knowledgebase.paloaltonetworks.com/KCSArticleDetail?id=kA10g000000PNiOCAW"
]
},
{
"Create more free space in thin pool or use dm.min_free_space": [
'Error Found: `Create more free space in thin pool or use dm.min_free_space`',
"Please refer to https://knowledgebase.paloaltonetworks.com/KCSArticleDetail?id=kA10g000000PNhQCAW"
]
},
{
"in pool reached high watermark": [
"Error Found: `amount of active containers in pool reached high watermark`",
"Check and increase high watermark for docker: https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-0/"
"cortex-xsoar-admin/cortex-xsoar-overview/performance-tuning-of-cortex-xsoar-server"
]
},
{
"no space left on device": [
"Error Found: `no space left on device`",
"Free up Disk Space with Data Archiving: https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-0/"
"cortex-xsoar-admin/manage-data/free-up-disc-space-with-data-archiving"
]
},
{
"ImportError: No module named": [
"Error Found: `ImportError: No module named`",
"Python environment missing dependency or docker image outdated."
]
},
{
"(error: websocket: close 1006 (abnormal closure): unexpected EOF)": [
" Error Found: `error: websocket: close 1006 (abnormal closure): unexpected EOF`",
"WebSocket Configuration: https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-1/cortex-xsoar-admin/installation/"
"post-installation-checklist/websocket-configuration.html#idee004eaa-34d9-41a1-a8d0-aba3bf9f91bb"
]
},
{
"fatal error: runtime: out of memory": [
"Error Found: `fatal error: runtime: out of memory.`",
"Performance Tuning of Cortex XSOAR Server: https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-0/"
"cortex-xsoar-admin/cortex-xsoar-overview/performance-tuning-of-cortex-xsoar-server"
]
},
{
"error Wrong schedule format": [
"Error Found: `error Wrong schedule format`",
"Change jobs.serverSiemIncidents.schedule=<time in minutes> to Xm. for example 5 minuets should be 5m"
]
},
{
"error Failed on ensure function for": [
"Error Found: `error Failed on ensure function for`",
"Reindex the Entire Database: "
"https://docs.paloaltonetworks.com/cortex/cortex-xsoar/6-0/cortex-xsoar-admin/manage-data/reindex-the-database"
]
},
{
"Version didnt change": [
"Error Found: `Version didnt change`",
"Upgrade used an older version, Re-run the upgrade with the latest version."
]
},
{
"layout-edit-.json: invalid argument": [
"Error Found: `layout-edit-.json: invalid argument`",
"Please contact customer support"
]
},
{
"error: unsupported mode": [
"Error Found: `error: unsupported mode`",
"Remove old index files under /usr/local/demisto/dist. and do a hard refresh in the browser. "
"No service restart needed"
]
}
]
res = []
context_since = context.get('LogServer', {}).get('since')
since = log_until = restartcount = None
context_log_until = context.get('LogServer', {}).get('logUntil')
context_restartcount = context.get('LogServer', {}).get('restartCount')
path = demisto.executeCommand('getFilePath', {'id': demisto.args()['entryID']})
if path[0]['Type'] == entryTypes['error']:
demisto.results('File not found')
else:
try:
with open(path[0]['Contents']['path'], 'r') as f:
data_line = f.readlines()
# find Since and find knownErrors
for line in data_line:
if 'good luck' in line:
if (context_restartcount is None) and (restartcount is None):
restartcount = 1
elif (context_restartcount is not None) and (restartcount is None):
restartcount = int(context_restartcount)
restartcount += 1
elif (context_restartcount is not None) and (restartcount is not None):
restartcount += 1
for item in knownerrors:
for (err, suggest) in item.items():
if err in line:
if suggest not in suggestions:
suggestions.append(suggest)
if (context_since is None) and (since is None):
since = re.findall('(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})', line)
oldestDate = since[0]
continue
elif (context_since is not None) and (since is None):
since = re.findall('(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})', line)
oldestDate = findOldestDate(since[0], context_since)
continue
else:
continue
# find Last Log
for line in reversed(data_line):
if (context_log_until is None) and (log_until is None):
log_until = re.findall('(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})', line)
if not log_until:
log_until = None
continue
newestDate = log_until[0]
break
elif (context_since is not None) and (log_until is None):
log_until = re.findall('(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2})', line)
if not log_until:
continue
newestDate = log_until[0]
newestDate = findNewestDate(log_until[0], context_log_until)
break
else:
oldestDate = context_since
newestDate = context_log_until
break
demisto.setContext("LogServer.since", str(oldestDate))
demisto.setContext("LogServer.logUntil", str(newestDate))
demisto.setContext("LogServer.restartCount", restartcount)
demisto.executeCommand("setIncident", {"healthcheckrestartcount": restartcount,
"healthchecklogsince": str(oldestDate),
"healthcheckloguntil": str(newestDate)})
if suggestions:
for entry in suggestions:
res.append({"category": "Log Analysis", "severity": "High", "description": entry[0], "resolution": entry[1]})
results = CommandResults(
readable_output="HealthCheckServerLog Done",
outputs_prefix="HealthCheck.ActionableItems",
outputs=res)
return_results(results)
except UnicodeDecodeError:
demisto.results("Could not read file")
|
extra_tests/snippets/os_info.py | dbrgn/RustPython | 11,058 | 12621483 | import os
assert os.name == 'posix' or os.name == 'nt'
|
release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/RebarContainerParameterManager.py | htlcnn/ironpython-stubs | 182 | 12621494 | <filename>release/stubs.min/Autodesk/Revit/DB/Structure/__init___parts/RebarContainerParameterManager.py
class RebarContainerParameterManager(object,IDisposable):
""" Provides implementation of RebarContainer parameters overrides. """
def AddOverride(self,paramId,value):
"""
AddOverride(self: RebarContainerParameterManager,paramId: ElementId,value: int)
Adds an override for the given parameter as its value will be displayed for the
Rebar Container element.
paramId: The id of the parameter
value: The override value of the parameter.
AddOverride(self: RebarContainerParameterManager,paramId: ElementId,value: float)
Adds an override for the given parameter as its value will be displayed for the
Rebar Container element.
paramId: The id of the parameter
value: The override value of the parameter.
AddOverride(self: RebarContainerParameterManager,paramId: ElementId,value: ElementId)
Adds an override for the given parameter as its value will be displayed for the
Rebar Container element.
paramId: The id of the parameter
value: The override value of the parameter.
AddOverride(self: RebarContainerParameterManager,paramId: ElementId,value: str)
Adds an override for the given parameter as its value will be displayed for the
Rebar Container element.
paramId: The id of the parameter
value: The override value of the parameter.
"""
pass
def AddSharedParameterAsOverride(self,paramId):
"""
AddSharedParameterAsOverride(self: RebarContainerParameterManager,paramId: ElementId)
Adds a shared parameter as one of the parameter overrides stored by this Rebar
Container element.
paramId: The id of the shared parameter element
"""
pass
def ClearOverrides(self):
"""
ClearOverrides(self: RebarContainerParameterManager)
Clears any overridden values from all parameters of the associated
RebarContainer element.
"""
pass
def Dispose(self):
""" Dispose(self: RebarContainerParameterManager) """
pass
def IsOverriddenParameterModifiable(self,paramId):
"""
IsOverriddenParameterModifiable(self: RebarContainerParameterManager,paramId: ElementId) -> bool
Checks if overridden parameter is modifiable.
paramId: Overridden parameter id
Returns: True if the parameter is modifiable,false if the parameter is readonly.
"""
pass
def IsParameterOverridden(self,paramId):
"""
IsParameterOverridden(self: RebarContainerParameterManager,paramId: ElementId) -> bool
Checks if the parameter has an override
paramId: The id of the parameter element
Returns: True if the parameter has an override
"""
pass
def IsRebarContainerParameter(self,paramId):
"""
IsRebarContainerParameter(self: RebarContainerParameterManager,paramId: ElementId) -> bool
Checks if the parameter is a Rebar Container parameter
paramId: The id of the parameter element
Returns: True if the parameter is a Rebar Container parameter
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: RebarContainerParameterManager,disposing: bool) """
pass
def RemoveOverride(self,paramId):
"""
RemoveOverride(self: RebarContainerParameterManager,paramId: ElementId)
Removes an overridden value from the given parameter.
paramId: The id of the parameter
"""
pass
def SetOverriddenParameterModifiable(self,paramId):
"""
SetOverriddenParameterModifiable(self: RebarContainerParameterManager,paramId: ElementId)
Sets this overridden parameter to be modifiable.
paramId: Overridden parameter id
"""
pass
def SetOverriddenParameterReadonly(self,paramId):
"""
SetOverriddenParameterReadonly(self: RebarContainerParameterManager,paramId: ElementId)
Sets this overridden parameter to be readonly.
paramId: Overridden parameter id
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: RebarContainerParameterManager) -> bool
"""
|
src/dialogs/folder.py | Sammmasqw/Facepager | 430 | 12621496 | <reponame>Sammmasqw/Facepager
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import QFileDialog, QCheckBox, QHBoxLayout, QLabel
class SelectFolderDialog(QFileDialog):
"""
Create a custom Folder Dialog with an option to import files as nodes
"""
def __init__(self,*args,**kwargs):
super(SelectFolderDialog,self).__init__(*args,**kwargs)
self.setOption(QFileDialog.DontUseNativeDialog)
self.setFileMode(QFileDialog.Directory)
#QFileDialog.getExistingDirectory(self, 'Select Download Folder', datadir)) #, QFileDialog.ShowDirsOnly
#self.mainWindow = self.parent()
self.optionNodes = QCheckBox("Add selected files as nodes",self)
self.optionNodes.clicked.connect(self.optionNodesClick)
#self.optionNodes.setCheckState(Qt.CheckState.Checked)
layout = self.layout()
row = layout.rowCount()
layout.addWidget(QLabel('Options'),row,0)
options = QHBoxLayout()
options.addWidget(self.optionNodes)
options.addStretch(1)
layout.addLayout(options,row,1,1,2)
self.setLayout(layout)
#if self.exec_():
#if os.path.isfile(self.selectedFiles()[0]):
def optionNodesClick(self):
if self.optionNodes.isChecked():
self.setFileMode(QFileDialog.ExistingFiles)
else:
self.setFileMode(QFileDialog.Directory) |
python-package/lightgbm/callback.py | PyVCEchecker/LightGBM | 5,753 | 12621514 | <reponame>PyVCEchecker/LightGBM
# coding: utf-8
"""Callbacks library."""
import collections
from functools import partial
from typing import Any, Callable, Dict, List, Tuple, Union
from .basic import _ConfigAliases, _log_info, _log_warning
_EvalResultTuple = Union[
List[Tuple[str, str, float, bool]],
List[Tuple[str, str, float, bool, float]]
]
def _gt_delta(curr_score: float, best_score: float, delta: float) -> bool:
return curr_score > best_score + delta
def _lt_delta(curr_score: float, best_score: float, delta: float) -> bool:
return curr_score < best_score - delta
class EarlyStopException(Exception):
"""Exception of early stopping."""
def __init__(self, best_iteration: int, best_score: _EvalResultTuple) -> None:
"""Create early stopping exception.
Parameters
----------
best_iteration : int
The best iteration stopped.
best_score : list of (eval_name, metric_name, eval_result, is_higher_better) tuple or (eval_name, metric_name, eval_result, is_higher_better, stdv) tuple
Scores for each metric, on each validation set, as of the best iteration.
"""
super().__init__()
self.best_iteration = best_iteration
self.best_score = best_score
# Callback environment used by callbacks
CallbackEnv = collections.namedtuple(
"CallbackEnv",
["model",
"params",
"iteration",
"begin_iteration",
"end_iteration",
"evaluation_result_list"])
def _format_eval_result(value: _EvalResultTuple, show_stdv: bool = True) -> str:
"""Format metric string."""
if len(value) == 4:
return f"{value[0]}'s {value[1]}: {value[2]:g}"
elif len(value) == 5:
if show_stdv:
return f"{value[0]}'s {value[1]}: {value[2]:g} + {value[4]:g}"
else:
return f"{value[0]}'s {value[1]}: {value[2]:g}"
else:
raise ValueError("Wrong metric value")
def log_evaluation(period: int = 1, show_stdv: bool = True) -> Callable:
"""Create a callback that logs the evaluation results.
By default, standard output resource is used.
Use ``register_logger()`` function to register a custom logger.
Note
----
Requires at least one validation data.
Parameters
----------
period : int, optional (default=1)
The period to log the evaluation results.
The last boosting stage or the boosting stage found by using ``early_stopping`` callback is also logged.
show_stdv : bool, optional (default=True)
Whether to log stdv (if provided).
Returns
-------
callback : callable
The callback that logs the evaluation results every ``period`` boosting iteration(s).
"""
def _callback(env: CallbackEnv) -> None:
if period > 0 and env.evaluation_result_list and (env.iteration + 1) % period == 0:
result = '\t'.join([_format_eval_result(x, show_stdv) for x in env.evaluation_result_list])
_log_info(f'[{env.iteration + 1}]\t{result}')
_callback.order = 10 # type: ignore
return _callback
def record_evaluation(eval_result: Dict[str, Dict[str, List[Any]]]) -> Callable:
"""Create a callback that records the evaluation history into ``eval_result``.
Parameters
----------
eval_result : dict
Dictionary used to store all evaluation results of all validation sets.
This should be initialized outside of your call to ``record_evaluation()`` and should be empty.
Any initial contents of the dictionary will be deleted.
.. rubric:: Example
With two validation sets named 'eval' and 'train', and one evaluation metric named 'logloss'
this dictionary after finishing a model training process will have the following structure:
.. code-block::
{
'train':
{
'logloss': [0.48253, 0.35953, ...]
},
'eval':
{
'logloss': [0.480385, 0.357756, ...]
}
}
Returns
-------
callback : callable
The callback that records the evaluation history into the passed dictionary.
"""
if not isinstance(eval_result, dict):
raise TypeError('eval_result should be a dictionary')
def _init(env: CallbackEnv) -> None:
eval_result.clear()
for data_name, eval_name, _, _ in env.evaluation_result_list:
eval_result.setdefault(data_name, collections.OrderedDict())
eval_result[data_name].setdefault(eval_name, [])
def _callback(env: CallbackEnv) -> None:
if env.iteration == env.begin_iteration:
_init(env)
for data_name, eval_name, result, _ in env.evaluation_result_list:
eval_result[data_name][eval_name].append(result)
_callback.order = 20 # type: ignore
return _callback
def reset_parameter(**kwargs: Union[list, Callable]) -> Callable:
"""Create a callback that resets the parameter after the first iteration.
.. note::
The initial parameter will still take in-effect on first iteration.
Parameters
----------
**kwargs : value should be list or callable
List of parameters for each boosting round
or a callable that calculates the parameter in terms of
current number of round (e.g. yields learning rate decay).
If list lst, parameter = lst[current_round].
If callable func, parameter = func(current_round).
Returns
-------
callback : callable
The callback that resets the parameter after the first iteration.
"""
def _callback(env: CallbackEnv) -> None:
new_parameters = {}
for key, value in kwargs.items():
if isinstance(value, list):
if len(value) != env.end_iteration - env.begin_iteration:
raise ValueError(f"Length of list {key!r} has to equal to 'num_boost_round'.")
new_param = value[env.iteration - env.begin_iteration]
else:
new_param = value(env.iteration - env.begin_iteration)
if new_param != env.params.get(key, None):
new_parameters[key] = new_param
if new_parameters:
env.model.reset_parameter(new_parameters)
env.params.update(new_parameters)
_callback.before_iteration = True # type: ignore
_callback.order = 10 # type: ignore
return _callback
def early_stopping(stopping_rounds: int, first_metric_only: bool = False, verbose: bool = True, min_delta: Union[float, List[float]] = 0.0) -> Callable:
"""Create a callback that activates early stopping.
Activates early stopping.
The model will train until the validation score doesn't improve by at least ``min_delta``.
Validation score needs to improve at least every ``stopping_rounds`` round(s)
to continue training.
Requires at least one validation data and one metric.
If there's more than one, will check all of them. But the training data is ignored anyway.
To check only the first metric set ``first_metric_only`` to True.
The index of iteration that has the best performance will be saved in the ``best_iteration`` attribute of a model.
Parameters
----------
stopping_rounds : int
The possible number of rounds without the trend occurrence.
first_metric_only : bool, optional (default=False)
Whether to use only the first metric for early stopping.
verbose : bool, optional (default=True)
Whether to log message with early stopping information.
By default, standard output resource is used.
Use ``register_logger()`` function to register a custom logger.
min_delta : float or list of float, optional (default=0.0)
Minimum improvement in score to keep training.
If float, this single value is used for all metrics.
If list, its length should match the total number of metrics.
Returns
-------
callback : callable
The callback that activates early stopping.
"""
best_score = []
best_iter = []
best_score_list: list = []
cmp_op = []
enabled = True
first_metric = ''
def _init(env: CallbackEnv) -> None:
nonlocal best_score
nonlocal best_iter
nonlocal best_score_list
nonlocal cmp_op
nonlocal enabled
nonlocal first_metric
enabled = not any(env.params.get(boost_alias, "") == 'dart' for boost_alias
in _ConfigAliases.get("boosting"))
if not enabled:
_log_warning('Early stopping is not available in dart mode')
return
if not env.evaluation_result_list:
raise ValueError('For early stopping, '
'at least one dataset and eval metric is required for evaluation')
if stopping_rounds <= 0:
raise ValueError("stopping_rounds should be greater than zero.")
if verbose:
_log_info(f"Training until validation scores don't improve for {stopping_rounds} rounds")
# reset storages
best_score = []
best_iter = []
best_score_list = []
cmp_op = []
first_metric = ''
n_metrics = len(set(m[1] for m in env.evaluation_result_list))
n_datasets = len(env.evaluation_result_list) // n_metrics
if isinstance(min_delta, list):
if not all(t >= 0 for t in min_delta):
raise ValueError('Values for early stopping min_delta must be non-negative.')
if len(min_delta) == 0:
if verbose:
_log_info('Disabling min_delta for early stopping.')
deltas = [0.0] * n_datasets * n_metrics
elif len(min_delta) == 1:
if verbose:
_log_info(f'Using {min_delta[0]} as min_delta for all metrics.')
deltas = min_delta * n_datasets * n_metrics
else:
if len(min_delta) != n_metrics:
raise ValueError('Must provide a single value for min_delta or as many as metrics.')
if first_metric_only and verbose:
_log_info(f'Using only {min_delta[0]} as early stopping min_delta.')
deltas = min_delta * n_datasets
else:
if min_delta < 0:
raise ValueError('Early stopping min_delta must be non-negative.')
if min_delta > 0 and n_metrics > 1 and not first_metric_only and verbose:
_log_info(f'Using {min_delta} as min_delta for all metrics.')
deltas = [min_delta] * n_datasets * n_metrics
# split is needed for "<dataset type> <metric>" case (e.g. "train l1")
first_metric = env.evaluation_result_list[0][1].split(" ")[-1]
for eval_ret, delta in zip(env.evaluation_result_list, deltas):
best_iter.append(0)
best_score_list.append(None)
if eval_ret[3]: # greater is better
best_score.append(float('-inf'))
cmp_op.append(partial(_gt_delta, delta=delta))
else:
best_score.append(float('inf'))
cmp_op.append(partial(_lt_delta, delta=delta))
def _final_iteration_check(env: CallbackEnv, eval_name_splitted: List[str], i: int) -> None:
nonlocal best_iter
nonlocal best_score_list
if env.iteration == env.end_iteration - 1:
if verbose:
best_score_str = '\t'.join([_format_eval_result(x) for x in best_score_list[i]])
_log_info('Did not meet early stopping. '
f'Best iteration is:\n[{best_iter[i] + 1}]\t{best_score_str}')
if first_metric_only:
_log_info(f"Evaluated only: {eval_name_splitted[-1]}")
raise EarlyStopException(best_iter[i], best_score_list[i])
def _callback(env: CallbackEnv) -> None:
nonlocal best_score
nonlocal best_iter
nonlocal best_score_list
nonlocal cmp_op
nonlocal enabled
nonlocal first_metric
if env.iteration == env.begin_iteration:
_init(env)
if not enabled:
return
for i in range(len(env.evaluation_result_list)):
score = env.evaluation_result_list[i][2]
if best_score_list[i] is None or cmp_op[i](score, best_score[i]):
best_score[i] = score
best_iter[i] = env.iteration
best_score_list[i] = env.evaluation_result_list
# split is needed for "<dataset type> <metric>" case (e.g. "train l1")
eval_name_splitted = env.evaluation_result_list[i][1].split(" ")
if first_metric_only and first_metric != eval_name_splitted[-1]:
continue # use only the first metric for early stopping
if ((env.evaluation_result_list[i][0] == "cv_agg" and eval_name_splitted[0] == "train"
or env.evaluation_result_list[i][0] == env.model._train_data_name)):
_final_iteration_check(env, eval_name_splitted, i)
continue # train data for lgb.cv or sklearn wrapper (underlying lgb.train)
elif env.iteration - best_iter[i] >= stopping_rounds:
if verbose:
eval_result_str = '\t'.join([_format_eval_result(x) for x in best_score_list[i]])
_log_info(f"Early stopping, best iteration is:\n[{best_iter[i] + 1}]\t{eval_result_str}")
if first_metric_only:
_log_info(f"Evaluated only: {eval_name_splitted[-1]}")
raise EarlyStopException(best_iter[i], best_score_list[i])
_final_iteration_check(env, eval_name_splitted, i)
_callback.order = 30 # type: ignore
return _callback
|
my_projects/mechanism/Linkages_mechanism.py | cigar666/my_manim_projects | 159 | 12621525 | <gh_stars>100-1000
from manimlib.imports import *
from my_manim_projects.my_projects.mechanism.basic_component import *
class Four_bar_linkage(Scene):
def construct(self):
O1, O2 = LEFT * 2 + DOWN, RIGHT * 2. + DOWN
dot_O1 = Dot(O1, color=PINK)
dot_O2 = Dot(O2, color=PINK)
bar_1 = Bar(O1, 140 * DEGREES, 1., color=ORANGE)
bar_2 = Bar(O2, 80 * DEGREES, 2.7, color=BLUE)
bar_3 = Rod(bar_1.get_end(), bar_2.get_end(), end_type=[2, 2], color=BLUE)
bars = VGroup(bar_1, bar_2, bar_3)
t = ValueTracker(140 * DEGREES)
w = 1
# ## 两个updater都不太行,在摇杆接近死点时经常不收敛 ##
# def update_bars(b):
# err = 1e-3
# b[0].reposition_by_angle(t.get_value() * w)
# print(t.get_value() * w / PI * 180)
#
# # b[2].reposition_by_angle(angle=None, start=b[0].get_end())
# b[2].reposition(b[0].get_end(), b[2].get_end())
# print('theta=%.2f, error=%.5f' % (t.get_value() / PI * 180, get_norm(b[1].get_end() - b[2].get_end())))
# while get_norm(b[1].get_end() - b[2].get_end()) > err:
# b[1].reposition(b[1].get_start(), b[2].get_end())
# b[2].reposition(b[0].get_end(), b[1].get_end())
# print('theta=%.2f, error=%.5f' % (t.get_value() / PI * 180, get_norm(b[1].get_end() - b[2].get_end())))
def update_bars(b):
def l_new():
return get_norm(b[1].get_end() - b[0].get_end())
err = 1e-3
delta_theta = 2 * DEGREES
a0 = 0.8
l = b[2].get_rod_length()
b[0].reposition_by_angle(t.get_value() * w)
d_l = abs(l_new() - l)
print('theta=%.2f, error=%.5f' % (t.get_value() / PI * 180, d_l))
while d_l > err:
if d_l < err * 50:
a0 = 0.6
# elif d_l < err * 15:
# a0 = 0.5
elif d_l < err * 5:
a0 = 0.4
b[1].rotate_about_start(delta_theta)
d_l_new = abs(l_new() - l)
if d_l_new < d_l:
delta_theta *= a0
d_l = d_l_new
else:
delta_theta *= -1
d_l = d_l_new
print('theta=%.2f, error=%.5f' % (t.get_value() / PI * 180, d_l))
b[2].reposition(b[0].get_end(), b[1].get_end())
bars.add_updater(update_bars)
self.add(dot_O1, dot_O2, bars, t)
self.wait()
# self.play(t.set_value, 4 * 500 * DEGREES, rate_func=linear, run_time=20)
t.add_updater(lambda t, dt: t.increment_value(2 * DEGREES))
self.wait(20)
class Four_bar_linkage_draw(Scene):
def construct(self):
grid_n = 4
dots = VGroup(*[Dot(RIGHT * i + UP * j, color=BLUE).scale(0.5) for i in range(grid_n + 1) for j in range(grid_n + 1)])
lines = VGroup(*([Line(ORIGIN, UP * grid_n, stroke_color=BLUE_B, stroke_width=1).shift(i * RIGHT) for i in range(grid_n + 1)] +
[Line(ORIGIN, RIGHT * grid_n, stroke_color=BLUE_B, stroke_width=1).shift(i * UP) for i in range(grid_n + 1)]))
grid = VGroup(dots, lines)
O1, O2 = LEFT * 2 + DOWN * 2.5, RIGHT * 2. + DOWN * 2.5
dot_O1 = Dot(O1, color=PINK)
dot_O2 = Dot(O2, color=PINK)
bar_1 = Bar(O1, 140 * DEGREES, 1., color=YELLOW)
bar_2 = Bar(O2, 80 * DEGREES, 2.7, color=BLUE)
bar_3 = Rod(bar_1.get_end(), bar_2.get_end(), end_type=[2, 2], color=BLUE)
grid.rotate_about_origin(bar_3.get_angle()).scale(bar_3.get_rod_length()/grid_n, about_point=ORIGIN).shift(bar_3.get_start())
bars = VGroup(bar_1, bar_2, bar_3)
t = ValueTracker(140 * DEGREES)
w = 1
def update_bars(b):
err = 1e-3
b[0].reposition_by_angle(t.get_value() * w)
print(t.get_value() * w / PI * 180)
# b[2].reposition_by_angle(angle=None, start=b[0].get_end())
b[2].reposition(b[0].get_end(), b[2].get_end())
print('theta=%.2f, error=%.5f' % (t.get_value() / PI * 180, get_norm(b[1].get_end() - b[2].get_end())))
while get_norm(b[1].get_end() - b[2].get_end()) > err:
b[1].reposition(b[1].get_start(), b[2].get_end())
b[2].reposition(b[0].get_end(), b[1].get_end())
print('theta=%.2f, error=%.5f' % (t.get_value() / PI * 180, get_norm(b[1].get_end() - b[2].get_end())))
bars.add_updater(update_bars)
paths = VGroup(*[TracedPath(dot.get_center, stroke_color=BLUE_B, stroke_width=1.5) for dot in dots])
self.add(dot_O1, dot_O2, bars, t, paths)
self.wait()
bar_3.add(grid)
self.play(ShowCreation(grid), run_time=1.5)
self.wait(0.5)
self.play(t.set_value, 1 * 500 * DEGREES, rate_func=linear, run_time=25)
# t.add_updater(lambda t, dt: t.increment_value(2 * DEGREES))
# self.wait(30)
self.wait(2)
class Five_bar_linkage(Scene):
def construct(self):
O1, O2 = LEFT * 2 + DOWN * 2.5, RIGHT * 2. + DOWN * 2.5
dot_O1 = Dot(O1, color=PINK)
dot_O2 = Dot(O2, color=PINK)
bar_1 = Bar(O1, 140 * DEGREES, 1., color=YELLOW)
bar_2 = Bar(O2, 80 * DEGREES, 2.7, color=BLUE)
bar_3 = Rod(bar_1.get_end(), bar_2.get_end(), end_type=[2, 2], color=BLUE)
|
var/spack/repos/builtin/packages/cmockery/package.py | kkauder/spack | 2,360 | 12621533 | <reponame>kkauder/spack<gh_stars>1000+
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Cmockery(AutotoolsPackage):
"""A lightweight library to simplify and generalize the process of
writing unit tests for C applications."""
homepage = "https://github.com/google/cmockery"
url = "https://github.com/google/cmockery/archive/v0.1.2.tar.gz"
version('0.1.2', sha256='d40135ae9179201c01bde725fa64fc32d86b5899972e0ce4ad51668d261edbae')
version('0.1.1', sha256='a801d17976f781fff6dc49042ff109e55ca4ebe8efb13757fa1a511ca52316be')
version('0.1.0', sha256='9e017d48e56ab9d2ebcf5286fa54e37d42fe308d3c01fbc367793da2b9ad95e7')
depends_on('m4', type='build')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
def autoreconf(self, spec, prefix):
bash = which('bash')
bash('./autogen.sh')
|
examples/ocr/v20181119/general_fast_ocr.py | snowxmas/tencentcloud-sdk-python | 465 | 12621534 | <gh_stars>100-1000
import os
from tencentcloud.common import credential
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.ocr.v20181119 import ocr_client, models
try:
cred = credential.Credential(
os.environ.get("TENCENTCLOUD_SECRET_ID"),
os.environ.get("TENCENTCLOUD_SECRET_KEY"))
httpProfile = HttpProfile()
httpProfile.endpoint = "ocr.tencentcloudapi.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = ocr_client.OcrClient(cred, "ap-guangzhou", clientProfile)
req = models.GeneralFastOCRRequest()
req.ImageUrl = "https://mc.qcloudimg.com/static/img/6d4f1676deba26377d4303a462ca5074/image.png"
resp = client.GeneralFastOCR(req)
print(resp.to_json_string())
except TencentCloudSDKException as err:
print(err)
|
deep_architect/contrib/communicators/mpi_communicator.py | dapatil211/deep_architect | 272 | 12621548 | <filename>deep_architect/contrib/communicators/mpi_communicator.py
from mpi4py import MPI
from deep_architect.contrib.communicators.communicator import Communicator
"""
Tags for the requests used by the communicator
"""
READY_REQ = 0
MODEL_REQ = 1
RESULTS_REQ = 2
"""
Contains implementation for MPI based Communicator. All requests used are
non-blocking unless mentioned otherwise in comments.
"""
class MPICommunicator(Communicator):
def __init__(self):
self.comm = MPI.COMM_WORLD
super(MPICommunicator, self).__init__(self.comm.Get_size() - 1,
self.comm.Get_rank())
self.done = False
if self.is_master():
self.ready_requests = [
self.comm.irecv(source=i + 1, tag=READY_REQ)
for i in range(self.num_workers)
]
self.eval_requests = [
self.comm.irecv(source=i + 1, tag=RESULTS_REQ)
for i in range(self.num_workers)
]
self.next_worker = -1
def _publish_results_to_master(self, results, evaluation_id,
searcher_eval_token):
"""
Called in worker process only.
Synchronously sends the results from worker to master. Returns nothing.
"""
self.comm.ssend((results, evaluation_id, searcher_eval_token),
dest=0,
tag=RESULTS_REQ)
def _receive_architecture_in_worker(self):
"""
Called in worker process only.
Receives architecture from master. Synchronously sends a ready request to
master signalling that worker is ready to receive new architecture. Then
does a blocking receive of the architecture sent by master and returns the
architecure. If master instead sends a kill signal, returns None for that
and any future invocations of _receive_architecture_in_worker.
"""
if self.done:
return None
self.comm.ssend([self.rank], dest=0, tag=READY_REQ)
(vs, evaluation_id, searcher_eval_token,
kill) = self.comm.recv(source=0, tag=MODEL_REQ)
if kill:
self.done = True
return None
return vs, evaluation_id, searcher_eval_token
def _is_ready_to_publish_architecture(self):
"""
Called in master process only.
Iterates through ready requests and checks if any of them have been
returned. If so, set the next worker corresponding to the ready request that
returned, and return True. If none of the workers have sent back a ready
request, return False.
"""
for idx, req in enumerate(self.ready_requests):
if req:
test, msg = req.test()
if test:
self.next_worker = idx + 1
return True
return False
def _publish_architecture_to_worker(self, vs, current_evaluation_id,
searcher_eval_token):
"""
Called in master process only.
Sends architecture to the worker that was designated as ready in
_is_ready_to_publish_architecture. Then resets the ready request for that
worker. Returns nothing.
"""
self.comm.isend((vs, current_evaluation_id, searcher_eval_token, False),
dest=self.next_worker,
tag=MODEL_REQ)
self.ready_requests[self.next_worker - 1] = (self.comm.irecv(
source=self.next_worker, tag=READY_REQ))
def _receive_results_in_master(self, src):
"""
Called in master process only.
Checks if the src worker has sent back results. If so, returns the result
and resets the request to get results in the future. Else, returns None.
"""
test, msg = self.eval_requests[src].test()
if test:
self.eval_requests[src] = self.comm.irecv(source=src + 1,
tag=RESULTS_REQ)
return msg if test else None
def _kill_worker(self):
"""
Called in master process only.
Sends a kill signal to given worker. Doesn't return anything.
"""
self.comm.isend((0, 0, 0, True), dest=self.next_worker, tag=MODEL_REQ)
self.ready_requests[self.next_worker - 1] = None
|
Grand_Central_Robot_Xylophone/code.py | gamblor21/Adafruit_Learning_System_Guides | 665 | 12621552 | <filename>Grand_Central_Robot_Xylophone/code.py
# Adafruit Grand Central Robot Xylophone Demo Program
# <NAME> and <NAME> for Adafruit Industries
# MIT License
import time
import board
from digitalio import DigitalInOut, Direction
solenoid_count = 8 # Set the total number of solenoids used
start_pin = 2 # Start at pin D2
# Create the input objects list for solenoids
solenoid = []
for k in range(start_pin, solenoid_count + start_pin + 1):
# get pin # attribute, use string formatting
this_solenoid = DigitalInOut(getattr(board, "D{}".format(k)))
this_solenoid.direction = Direction.OUTPUT
solenoid.append(this_solenoid)
STRIKE_TIME = 0.01 # Time between initiating a strike and turning it off
TIME_BETWEEN = 0.5 # Time between actions in seconds
song = [3, 4, 5, 4, 3, 3, 3, 4, 4, 4, 3, 3, 3, 3, 4, 5, 4, 3, 3, 3, 2, 2, 3, 4, 5]
def play(key, time_to_strike):
solenoid[key].value = True
time.sleep(time_to_strike)
solenoid[key].value = False
def rest(time_to_wait):
time.sleep(time_to_wait)
while True:
# Play each of the bars
for bar in range(solenoid_count):
play(bar, STRIKE_TIME)
rest(TIME_BETWEEN)
time.sleep(1.0) # Wait a bit before playing the song
# Play the notes defined in song
# simple example does not vary time between notes
for bar in range(len(song)):
play(song[bar], STRIKE_TIME)
rest(TIME_BETWEEN)
time.sleep(1.0)
|
.venv/lib/python3.8/site-packages/pandas/tests/frame/test_period.py | acrucetta/Chicago_COVI_WebApp | 115 | 12621567 | import numpy as np
from pandas import DataFrame, Index, PeriodIndex, period_range
import pandas._testing as tm
class TestPeriodIndex:
def test_as_frame_columns(self):
rng = period_range("1/1/2000", periods=5)
df = DataFrame(np.random.randn(10, 5), columns=rng)
ts = df[rng[0]]
tm.assert_series_equal(ts, df.iloc[:, 0])
# GH # 1211
repr(df)
ts = df["1/1/2000"]
tm.assert_series_equal(ts, df.iloc[:, 0])
def test_frame_setitem(self):
rng = period_range("1/1/2000", periods=5, name="index")
df = DataFrame(np.random.randn(5, 3), index=rng)
df["Index"] = rng
rs = Index(df["Index"])
tm.assert_index_equal(rs, rng, check_names=False)
assert rs.name == "Index"
assert rng.name == "index"
rs = df.reset_index().set_index("index")
assert isinstance(rs.index, PeriodIndex)
tm.assert_index_equal(rs.index, rng)
def test_frame_index_to_string(self):
index = PeriodIndex(["2011-1", "2011-2", "2011-3"], freq="M")
frame = DataFrame(np.random.randn(3, 4), index=index)
# it works!
frame.to_string()
|
selfdrive/dragonpilot/gpxd.py | kansakitw/dragonpilotamd | 251 | 12621577 | <reponame>kansakitw/dragonpilotamd
#!/usr/bin/env python3
# The MIT License
#
# Copyright (c) 2019-, <NAME>, dragonpilot community, and a number of other of contributors.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import cereal.messaging as messaging
import os
import datetime
import signal
import threading
from common.realtime import Ratekeeper
# customisable values
GPX_LOG_PATH = '/data/media/0/gpx_logs/'
LOG_HERTZ = 10 # 10 hz = 0.1 sec, higher for higher accuracy, 10hz seems fine
LOG_LENGTH = 10 # mins, higher means it keeps more data in the memory, will take more time to write into a file too.
LOST_SIGNAL_COUNT_LENGTH = 30 # secs, output log file if we lost signal for this long
# do not change
LOST_SIGNAL_COUNT_MAX = LOST_SIGNAL_COUNT_LENGTH * LOG_HERTZ # secs,
LOGS_PER_FILE = LOG_LENGTH * 60 * LOG_HERTZ # e.g. 10 * 60 * 10 = 6000 points per file
class WaitTimeHelper:
ready_event = threading.Event()
shutdown = False
def __init__(self):
signal.signal(signal.SIGTERM, self.graceful_shutdown)
signal.signal(signal.SIGINT, self.graceful_shutdown)
signal.signal(signal.SIGHUP, self.graceful_shutdown)
def graceful_shutdown(self, signum, frame):
self.shutdown = True
self.ready_event.set()
class GpxD():
def __init__(self):
self.log_count = 0
self.logs = list()
self.lost_signal_count = 0
self.wait_helper = WaitTimeHelper()
self.started_time = datetime.datetime.utcnow().isoformat()
self.v_ego_prev = 0.
self.pause = False
def log(self, sm):
gps = sm['gpsLocationExternal']
v_ego = sm['carState'].vEgo
if abs(v_ego) > 0.:
self.pause = False
# do not log when no fix or accuracy is too low, add lost_signal_count
if gps.flags % 2 == 0 or gps.accuracy > 5.:
if self.log_count > 0:
self.lost_signal_count += 1
elif self.pause:
pass
else:
self.logs.append([datetime.datetime.utcfromtimestamp(gps.timestamp*0.001).isoformat(), str(gps.latitude), str(gps.longitude), str(gps.altitude)])
self.log_count += 1
self.lost_signal_count = 0
if v_ego == 0. and abs(self.v_ego_prev) > 0.:
self.pause = True
self.v_ego_prev = v_ego
def write_log(self, force = False):
if self.log_count == 0:
return
if force or (self.log_count >= LOGS_PER_FILE or self.lost_signal_count >= LOST_SIGNAL_COUNT_MAX):
self._write_gpx()
self.lost_signal_count = 0
self.log_count = 0
self.logs.clear()
self.started_time = datetime.datetime.utcnow().isoformat()
def _write_gpx(self):
if len(self.logs) > 1:
if not os.path.exists(GPX_LOG_PATH):
os.makedirs(GPX_LOG_PATH)
filename = self.started_time.replace(':','-')
str = ''
str += "<?xml version=\"1.0\" encoding=\"utf-8\" standalone=\"yes\"?>\n"
str += "<gpx version=\"1.1\" creator=\"dragonpilot https://github.com/dragonpilot-community/dragonpilot\" xmlns=\"http://www.topografix.com/GPX/1/1\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd\">\n"
str += "<trk>\n"
str += " <name>" + self.started_time + "</name>"
str += " <trkseg>\n"
for trkpt in self.logs:
str += self._trkpt_template(trkpt[1], trkpt[2], trkpt[3], trkpt[0])
str += " </trkseg>\n"
str += "</trk>\n"
str += "</gpx>\n"
try:
f = open('%s%sZ.gpx' % (GPX_LOG_PATH, filename), 'w')
f.write(str)
f.close()
except:
pass
def _trkpt_template(self, lat, lon, ele, time):
str = ""
str += " <trkpt lat=\"" + lat + "\" lon=\"" + lon + "\">\n"
str += " <ele>" + ele + "</ele>\n"
str += " <time>" + time + "</time>\n"
str += " </trkpt>\n"
return str
def gpxd_thread(sm=None, pm=None):
if sm is None:
sm = messaging.SubMaster(['gpsLocationExternal', 'carState'])
wait_helper = WaitTimeHelper()
gpxd = GpxD()
rk = Ratekeeper(LOG_HERTZ, print_delay_threshold=None)
while True:
sm.update(0)
gpxd.log(sm)
gpxd.write_log()
if wait_helper.shutdown:
gpxd.write_log(True)
break
rk.keep_time()
def main(sm=None, pm=None):
gpxd_thread(sm, pm)
if __name__ == "__main__":
main()
|
tests/conftest.py | sdispater/poet | 367 | 12621579 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
import os
import pytest
import tempfile
import shutil
from cleo.inputs.list_input import ListInput
from cleo.outputs.console_output import ConsoleOutput
from cleo.styles import CleoStyle
from poet.console import Application
from poet.console.commands.command import Command
class DummyCommand(Command):
"""
Dummy Command.
dummy
"""
def __init__(self):
super(DummyCommand, self).__init__()
self.input = ListInput([])
self.output = CleoStyle(self.input, ConsoleOutput())
@pytest.fixture
def app():
return Application()
@pytest.fixture
def command():
return DummyCommand()
@pytest.fixture
def check_output(mocker):
outputs = {
('python', '-V'): b'Python 3.6.0'
}
patched = mocker.patch(
'subprocess.check_output',
side_effect=lambda cmd, *args, **kwargs: outputs.get(tuple(cmd), b'')
)
return patched
@pytest.fixture
def tmp_dir():
dir_ = tempfile.mkdtemp(prefix='poet_')
yield dir_
shutil.rmtree(dir_)
@pytest.fixture
def tmp_file():
fd, file_ = tempfile.mkstemp(prefix='poet_')
os.close(fd)
yield dir_
os.unlink(file_)
|
tests/test_server.py | pyGrowler/Growler | 806 | 12621609 | #
# tests/test_server.py
#
import pytest
import asyncio
import growler
@pytest.fixture
def app(event_loop):
app = growler.App(loop=event_loop)
return app
@pytest.fixture
def growler_server(app, event_loop, unused_tcp_port):
return app.create_server(host='127.0.0.1',
port=unused_tcp_port,
as_coroutine=True)
@pytest.mark.asyncio
async def test_post_request(app, growler_server, event_loop, unused_tcp_port):
body_data = None
response_data = None
did_send = False
did_receive = False
server = await growler_server
@app.post('/data')
async def post_test(req, res):
nonlocal body_data, did_receive
body_data = await req.body()
did_receive = True
res.send_text("OK")
async def http_request():
nonlocal did_send, response_data
did_send = True
r, w = await asyncio.open_connection(host='127.0.0.1',
port=unused_tcp_port)
data = b'{"somekey": "somevalue"}'
request_headers = '\r\n'.join([
'POST /data HTTP/1.1',
'HOST: localhost',
'Content-Type: application/json',
'ConTent-LENGTH: %d' % len(data),
'\r\n',
]).encode()
w.write(request_headers)
w.write(data)
w.write_eof()
response_data = await r.read()
server.close()
await http_request()
server.close()
assert did_send
# assert did_receive
assert body_data == b'{"somekey": "somevalue"}'
assert response_data.endswith(b'\r\n\r\nOK')
|
src/timeago/locales/pl.py | nmb10/timeago | 220 | 12621619 | <reponame>nmb10/timeago
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2018-11-11
@author: marcel-odya
'''
base = [
["właśnie teraz", "za chwilę"],
["%s sekund temu", "za %s sekund", "%s sekundy temu", "za %s sekundy"],
["minutę temu", "za minutę"],
["%s minut temu", "za %s minut", "%s minuty temu", "za %s minuty"],
["godzinę temu", "za godzinę"],
["%s godzin temu", "za %s godzin", "%s godziny temu", "za %s godziny"],
["1 dzień temu", "za 1 dzień"],
["%s dni temu", "za %s dni", "%s dni temu", "za %s dni"],
["tydzień temu", "za tydzień"],
["%s tygodni temu", "za %s tygodni", "%s tygodnie temu", "za %s tygodnie"],
["miesiąc temu", "za miesiąc"],
["%s miesięcy temu", "za %s miesięcy", "%s miesiące temu", "za %s miesiące"],
["rok temu", "za rok"],
["%s lat temu", "za %s lat", "%s lata temu", "za %s lata"]
]
def generate(row, y):
def formatting(time):
'''
Uses the 3rd and 4th field of the list in every 2 entries -
the ones containing %s, if the diff ends with 2, 3 or 4 but
not with 12, 13 or 14.
'''
if row % 2 == 0:
return base[row][y]
last_number = time % 10
last_two_numbers = time % 100
if last_number in range(2, 5) and last_two_numbers not in range(12, 15):
return base[row][y + 2]
return base[row][y]
return formatting
LOCALE = generate
|
RecoTracker/DeDx/python/dedxEstimatorsFromRefitter_cff.py | ckamtsikis/cmssw | 852 | 12621658 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
from RecoTracker.TrackProducer.TrackRefitter_cfi import *
RefitterForDeDx = TrackRefitter.clone(
TrajectoryInEvent = True
)
from RecoTracker.DeDx.dedxEstimators_cff import *
dedxHitInfo.tracks="RefitterForDeDx"
dedxHitInfo.trajectoryTrackAssociation = "RefitterForDeDx"
dedxHarmonic2.tracks="RefitterForDeDx"
dedxHarmonic2.trajectoryTrackAssociation = "RefitterForDeDx"
dedxTruncated40.tracks="RefitterForDeDx"
dedxTruncated40.trajectoryTrackAssociation = "RefitterForDeDx"
dedxMedian.tracks="RefitterForDeDx"
dedxMedian.trajectoryTrackAssociation = "RefitterForDeDx"
dedxUnbinned.tracks="RefitterForDeDx"
dedxUnbinned.trajectoryTrackAssociation = "RefitterForDeDx"
dedxDiscrimProd.tracks="RefitterForDeDx"
dedxDiscrimProd.trajectoryTrackAssociation = "RefitterForDeDx"
dedxDiscrimBTag.tracks="RefitterForDeDx"
dedxDiscrimBTag.trajectoryTrackAssociation = "RefitterForDeDx"
dedxDiscrimSmi.tracks="RefitterForDeDx"
dedxDiscrimSmi.trajectoryTrackAssociation = "RefitterForDeDx"
dedxDiscrimASmi.tracks="RefitterForDeDx"
dedxDiscrimASmi.trajectoryTrackAssociation = "RefitterForDeDx"
doAlldEdXEstimatorsTask = cms.Task(RefitterForDeDx, dedxTruncated40, dedxHarmonic2, dedxHitInfo )
doAlldEdXEstimators = cms.Sequence(doAlldEdXEstimatorsTask)
|
tools/sandbox/c7n_index/setup.py | al3pht/cloud-custodian | 2,415 | 12621687 | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from setuptools import setup, find_packages
setup(
name="c7n_indexer",
version='0.0.2',
description="Cloud Custodian - Metrics/Resource Indexer",
classifiers=[
"Topic :: System :: Systems Administration",
"Topic :: System :: Distributed Computing"
],
url="https://github.com/cloud-custodian/cloud-custodian",
license="Apache-2.0",
packages=find_packages(),
entry_points={
'console_scripts': [
'c7n-indexer = c7n_index.metrics:cli']},
install_requires=["c7n", "click", "influxdb", "elasticsearch"],
)
|
ocs_ci/utility/perf_dash/dashboard_api.py | srivickynesh/ocs-ci | 130 | 12621709 | <filename>ocs_ci/utility/perf_dash/dashboard_api.py
"""
API module to interact with the Performance Dashboard
In order to push results into the dashboard, and pulling data
from it to compare between results.
The DB structure is :
some tables which use for indexing values and all in the same structure:
versions : list of all OCS versions which tested
platform : list of tested platform (e.g. AWS / VSphere etc.)
az_topology : Topology of the tested environment (e.g. 3-AZ)
tests : Tests name which ran (e.g. FIO / SmallFiles etc.)
table name :
ID (int) : the index - unique number
Name (str) : the name to index
the `builds` table contain the builds of a particular OCS version
table name : builds
ID (int) : the index - unique number
version (int) : version ID - must be exists in the versions table
Name (str) : the name (or number) of the build (e.g. 254 / RC1-312 / GA)
the `results` table contain the complete data about individual test
table name : results
ID (int): the index - unique number
sample (int): number of the sample - each test can be run more the one time
version (int): version ID - must be exists in the versions table
build (int): build ID - must be exists in the builds table
platform (int): platform ID - must be exists in the platform table
az_topology (int): topology ID - must be exists in the az_topology table
test_name (int): test ID - must be exists in the tests table
es_link (str): the elasticsearch links to the individual test separated by comma
e.g. http://<es_server>:<port>/<sub-test_1>, http://<es_server>:<port>/<sub-test_2>
log_file (str): link to full test log
One single test test (e.g. FIO) can be split to few sub-tests (by parametrize),
e.g. : [CEPHBLOCKPOOL, sequential], [CEPHFILESYSTEM, sequential],
[CEPHBLOCKPOOL, random], [CEPHFILESYSTEM, random]
but in the dashboard all those tests (4 in the FIO example) are displayed as single test
"""
# Builtin modules
import logging
# 3ed party modules
import mysql.connector
# Local modules
from ocs_ci.framework import config
from ocs_ci.ocs import exceptions
log = logging.getLogger(__name__)
class PerfDash(object):
"""
The API class to connect and managing the performance dashboard database
"""
def __init__(self):
"""
Initializing the dashboard object and make a connection
Raise:
if credential file can not be open / read
if the connection failed
"""
# Reading connection information and credentials from local file
# which is not stored in the GitHub repository
self.creds = config.AUTH.get("perf_dashboard", None)
if self.creds is None:
log.error("Dashboard credentials are not defined in configuration")
raise exceptions.MissingRequiredConfigKeyError(
"The key AUTH:perf_dashboard is missing in configuration file"
)
self.creds["raise_on_warnings"] = True
self.creds["use_pure"] = True
# Connecting to the Dashboard DB
self.connect()
def connect(self):
"""
Create DB connection
Raise:
in case of connection failed - use the exception that caught
"""
log.info("Creating DB connector and connect to it")
try:
self.cnx = mysql.connector.connect(**self.creds)
self.cursor = self.cnx.cursor()
except Exception as err:
log.error(f"Can not connect to DB - [{err}]")
raise err
def _run_query(self, query=None):
"""
Run an SQL Query
Args:
query (str): sql query string
Returns:
bool: True if succeed, otherwise False
"""
log.debug(f"Try to Execute query : {query}")
try:
self.cursor.execute(query)
return True
except Exception as err:
log.error(f"Can not execute [{query}]\n{err}")
return False
def get_id_by_name(self, table=None, name=None):
"""
Query the ID of specific 'name' value from selected table
Args:
table (str): The table from which data will be query
name (str): The value for the 'name' field in the table to query
Returns:
int : the value of the ID field in the table, otherwise None
"""
query = f"SELECT id FROM {table} WHERE name = '{name}' ;"
if self._run_query(query=query):
for result_id in self.cursor:
# The query return a tuple and we need only the firs element
return result_id[0]
return None
def get_name_by_id(self, table=None, recid=None):
"""
Query the name of specific 'id' value from selected table
Args:
table (str): The table from which data will be query
recid (int): The value for the 'id' field in the table to query
Returns:
str : the value of the Name field in the table, otherwise None
"""
query = f"SELECT name FROM {table} WHERE id = {recid} ;"
if self._run_query(query=query):
for result_name in self.cursor:
# The query return a tuple and we need only the first element
return result_name[0]
return None
def insert_single_value(self, table=None, value=None):
"""
Insert a value to 'table' and return it's ID
Args:
table (str): The table to insert data into
value (str): The value to insert
Returns:
int : the ID of the value in the table, otherwise None
"""
# prevent of duplicate records for the same build
record_id = self.get_id_by_name(table=table, name=value)
if record_id is not None:
return record_id
query = f"INSERT INTO {table} (name) VALUES ('{value}') ;"
if self._run_query(query=query):
try:
rec_id = self.cursor.lastrowid
# Make sure data is committed to the database
self.cnx.commit()
return rec_id
except Exception as err:
log.error(f"Can not insert {value} into {table} - [{err}]")
return None
def get_version_id(self, version=None):
"""
Query of the ID of version number in the DB
Args:
version (str): The version number (e.g. 4.9.0)
Returns:
int : the version ID in the DB
"""
return self.get_id_by_name("versions", version)
def get_test_id(self, test=None):
"""
Query of the ID of test name in the DB
Args:
test (str): The test name (e.g. FIO)
Returns:
int : the Test ID in the DB
"""
return self.get_id_by_name("tests", test)
def get_platform_id(self, platform=None):
"""
Query of the ID of platform name in the DB
Args:
platform (str): The platform name (e.g. AWS)
Returns:
int : the Platform ID in the DB
"""
return self.get_id_by_name("platform", platform)
def get_topology_id(self, topology=None):
"""
Query of the ID of platform name in the DB
Args:
topology (str): The Topology name (e.g. 3-AZ)
Returns:
int : the Topology ID in the DB
"""
return self.get_id_by_name("az_topology", topology)
def get_version_name(self, version=0):
"""
Query of the Name of version ID in the DB
Args:
version (int): The version ID
Returns:
str : the version name in the DB
"""
return self.get_name_by_id("versions", version)
def get_test_name(self, test=0):
"""
Query of the Name of test ID in the DB
Args:
test (int): The test ID
Returns:
str : the Test Name in the DB
"""
return self.get_name_by_id("tests", test)
def get_platform_name(self, platform=0):
"""
Query of the IName of platform ID in the DB
Args:
platform (int): The platform ID
Returns:
str : the Platform Name in the DB
"""
return self.get_name_by_id("platform", platform)
def get_topology_name(self, topology=0):
"""
Query of the Name of platform ID in the DB
Args:
topology (int): The Topology ID
Returns:
str : the Topology Name in the DB
"""
return self.get_name_by_id("az_topology", topology)
def get_version_builds(self, version=None):
"""
Query the list of build in specific version
Args:
version (str): The version name (e.g. 4.9.0)
Returns:
dict : dictionary of (Name: ID), None if not exist
"""
ver_id = self.get_version_id(version)
if ver_id is None:
return None
results = {}
query = f"SELECT id, name FROM builds WHERE version = {ver_id} ;"
if self._run_query(query=query):
for (build_id, name) in self.cursor:
results[name] = build_id
return None if results == {} else results
def get_build_id(self, version, build):
"""
Getting the build ID for specific build of version.
if the build does not exist, return None
Args:
version (str): the version name (e.g. 4.9.0)
build (str): the build name (e.g. GA)
Returns:
int : the build ID
"""
all_builds = self.get_version_builds(version=version)
if all_builds:
return all_builds.get(build)
return None
def insert_build(self, version, build):
"""
Insert a new build to the DB and return it's ID
Args:
version (str): The version number as string (e.g. 4.9.0)
build (str): The build number (e.g. 180 / RC1-200 / GA)
Returns:
int : the ID of the build in the DB, otherwise None
"""
# prevent of duplicate records for the same build
build_id = self.get_build_id(version=version, build=build)
if build_id is not None:
return build_id
# Try to insert the version into the DB, it will not be inserted twice,
# If the version is exists in the DB it will just return the id of it.
ver_id = self.insert_single_value(table="versions", value=version)
query = f"INSERT INTO builds (version, name) VALUES ({ver_id}, '{build}') ;"
if self._run_query(query=query):
# Insert the data
try:
rec_id = self.cursor.lastrowid
# Make sure data is committed to the database
self.cnx.commit()
return rec_id
except Exception as err:
log.error(f"Can not insert {version}-{build} into builds - [{err}]")
return None
def get_results(self, version, build, platform, topology, test):
"""
Getting the results information (es_link, log_file) for all test samples
for a particular test configuration.
Args:
version (str): The version number (e.g. 4.9.0)
build (str): The build number (e.g. RC5-180)
platform (str): The platform (e.g. Bare-Metal)
topology (str): The topology (e.g. 3-AZ)
test (str): The test name (e.g. SmallFiles)
Returns:
dict : dictionary of all test samples as :
{sample: {es_link, log_file},}
"""
def value_verify(value, msg):
if value is None:
log.error(f"{msg} does not exist in the DB!")
return False
else:
return True
ver_id = self.get_version_id(version=version)
build_id = self.get_build_id(version=version, build=build)
platform_id = self.get_platform_id(platform=platform)
topology_id = self.get_topology_id(topology=topology)
test_id = self.get_test_id(test=test)
if not (
value_verify(ver_id, f"Version : {version}")
and value_verify(build_id, f"Build : {version}-{build}")
and value_verify(platform_id, f"Platform : {platform}")
and value_verify(topology_id, f"Topology : {topology}")
and value_verify(test_id, f"Test : {test}")
):
return None
results = {}
query = (
f"SELECT sample,es_link,log_file FROM results WHERE version = {ver_id} "
f"AND build = {build_id} AND platform = {platform_id} AND "
f"az_topology = {topology_id} AND test_name = {test_id} ;"
)
if self._run_query(query=query):
for (sample, eslink, logfile) in self.cursor:
log.debug(f"{sample}, {eslink}, {logfile}")
results[sample] = {
"eslink": eslink.rstrip("\r\n"),
"log": logfile.rstrip("\r\n"),
}
return results
def get_next_sample(self, version, build, platform, topology, test):
"""
Getting the the number of the next sample for particular test results.
if there are no results in the DB, it will return 0
Args:
version (str): The version number (e.g. 4.9.0)
build (str): The build number (e.g. RC5-180)
platform (str): The platform (e.g. Bare-Metal)
topology (str): The topology (e.g. 3-AZ)
test (str): The test name (e.g. SmallFiles)
Returns:
int : the number of the next sample to insert to the DB
"""
ver_id = self.get_version_id(version=version)
if ver_id is None:
return 0
build_id = self.get_build_id(version=version, build=build)
if build_id is None:
return 0
platform_id = self.get_platform_id(platform)
if platform_id is None:
return 0
topology_id = self.get_topology_id(topology)
if topology_id is None:
return 0
test_id = self.get_test_id(test)
if test_id is None:
return 0
results = []
query = (
f"SELECT sample FROM results WHERE version = {ver_id} AND "
f"build = {build_id} AND platform = {platform_id} AND "
f"az_topology = {topology_id} AND test_name = {test_id} ;"
)
if self._run_query(query=query):
for sample in self.cursor:
results.append(sample[0])
if len(results) == 0:
return 0
else:
return max(results) + 1
def add_results(self, version, build, platform, topology, test, eslink, logfile):
"""
Adding results information into the DB.
Args:
version (str): The version number (e.g. 4.9.0)
build (str): The build number (e.g. RC5-180)
platform (str): The platform (e.g. Bare-Metal)
topology (str): The topology (e.g. 3-AZ)
test (str): The test name (e.g. SmallFiles)
eslink (str): The elasticsearch link(s) to the results
logfile (str): The link to the test log file
Returns:
bool : True if the operation succeed otherwise False
"""
ver_id = self.get_version_id(version=version)
if ver_id is None:
ver_id = self.insert_single_value(table="versions", value=version)
if ver_id is None:
return False
build_id = self.get_build_id(version=version, build=build)
if build_id is None:
build_id = self.insert_build(version=version, build=build)
if build_id is None:
return False
platform_id = self.get_platform_id(platform)
if platform_id is None:
platform_id = self.insert_single_value(table="platform", value=platform)
if platform_id is None:
return False
topology_id = self.get_topology_id(topology)
if topology_id is None:
topology_id = self.insert_single_value(table="az_topology", value=topology)
if topology_id is None:
return False
test_id = self.get_test_id(test)
if test_id is None:
test_id = self.insert_single_value(table="tests", value=test)
if test_id is None:
return False
sample = self.get_next_sample(
version=version,
build=build,
platform=platform,
topology=topology,
test=test,
)
query = (
f"INSERT INTO results "
"(sample, version, build, platform, az_topology, test_name, es_link, log_file) "
f"VALUES ({sample}, {ver_id}, {build_id}, {platform_id}, {topology_id}, "
f"{test_id}, '{eslink}', '{logfile}') ;"
)
if self._run_query(query=query):
try:
# Make sure data is committed to the database
self.cnx.commit()
log.info("Test results pushed to the DB!")
return True
except Exception as err:
log.error(f"Can not insert result into the DB - [{err}]")
return False
def cleanup(self):
"""
Cleanup and close the DB connection
"""
log.info("Closing the DB connection")
self.cursor.close()
self.cnx.close()
|
monero/daemon.py | successor1/wownero-python | 130 | 12621716 | import six
from .backends.jsonrpc import JSONRPCDaemon
class Daemon(object):
"""Monero daemon.
Provides interface to a daemon instance.
:param backend: a daemon backend
:param \\**kwargs: arguments to initialize a :class:`JSONRPCDaemon <monero.backends.jsonrpc.JSONRPCDaemon>`
instance if no backend is given
"""
def __init__(self, backend=None, **kwargs):
if backend and len(kwargs):
raise ValueError('backend already given, other arguments are extraneous')
self._backend = backend if backend else JSONRPCDaemon(**kwargs)
def info(self):
"""
Returns basic information about the daemon.
:rtype: dict
"""
return self._backend.info()
@property
def net(self):
return self._backend.net()
def height(self):
"""
Return daemon's chain height.
:rtype: int
"""
return self._backend.info()['height']
def send_transaction(self, tx, relay=True):
"""
Sends a transaction generated by a :class:`Wallet <monero.wallet.Wallet>`.
:param tx: :class:`Transaction <monero.transaction.Transaction>`
:param relay: whether to relay the transaction to peers. If `False`, the daemon will have
to mine the transaction itself in order to have it included in the blockchain.
"""
return self._backend.send_transaction(tx.blob, relay=relay)
def mempool(self):
"""
Returns current mempool contents.
:rtype: list of :class:`Transaction <monero.transaction.Transaction>`
"""
return self._backend.mempool()
def headers(self, start_height, end_height=None):
"""
Returns block headers for given height range.
If no :param end_height: is given, it's assumed to be equal to :param start_height:
:rtype: list of dict
"""
return self._backend.headers(start_height, end_height)
def block(self, bhash=None, height=None):
"""
Returns a block of specified height or hash.
:param str bhash: block hash, or
:param int height: block height
:rtype: :class:`Block <monero.block.Block>`
"""
if height is None and bhash is None:
raise ValueError("Height or hash must be specified")
return self._backend.block(bhash=bhash, height=height)
def transactions(self, hashes):
"""
Returns transactions matching given hashes. Accepts single hash or a sequence.
:param hashes: str or list of str
"""
if isinstance(hashes, six.string_types):
hashes = [hashes]
return self._backend.transactions(hashes)
|
tests/common/test_run/ascend/transdata_matmul_run.py | tianjiashuo/akg | 286 | 12621741 | <gh_stars>100-1000
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import akg.tvm
import numpy as np
from akg.utils import kernel_exec as utils
from akg.ops.math.ascend import MatMul
from tests.common.test_run.ascend.matmul_run import *
def get_matmul_fractal_shape(x, format='zN'):
shape = x.shape
m, n = shape[-2], shape[-1]
m1, n1 = m // 16, n // 16
m0, n0 = 16, 16
needPad = m % 16 != 0 or n % 16 != 0
if format == 'zN':
transpose_axis = [2, 0, 1, 3]
new_shape = [n1, m1, m0, n0]
elif format == 'zZ':
transpose_axis = [0, 2, 1, 3]
new_shape = [m1, n1, m0, n0]
elif format == 'nZ':
transpose_axis = [0, 2, 3, 1]
new_shape = [m1, n1, n0, m0]
return new_shape
def transdata_matmul(x, y, b, out_dtype, left_format="zZ", right_format="nZ", out_format="zN", transpose_x=False,
transpose_y=False, attrs={}, target="cce"):
x_fractal_shape = get_matmul_fractal_shape(x, 'zN')
y_fractal_shape = get_matmul_fractal_shape(y, 'zN')
func = akg.tvm.get_global_func("TransData")
x = func([x], {"src_format" : "DefaultFormat", "dst_format" : "FRACTAL_NZ", "output_shape": x_fractal_shape})
y = func([y], {"src_format" : "DefaultFormat", "dst_format" : "FRACTAL_NZ", "output_shape": y_fractal_shape})
res, attrs = MatMul(x, y, b, out_dtype, left_format, right_format, out_format, transpose_x, transpose_y, attrs=attrs)
return res, attrs
def transdata_matmul_compile(shape_x, shape_y, bias, left_format, right_format, output_format, adj_x, adj_y, dtype, bias_dtype, out_dtype, kernel_name, attrs, tuning=False):
batch_tuple, m, k, n = extract_dim(shape_x, shape_y, adj_x, adj_y)
m = (m + 15) // 16 * 16
n = (n + 15) // 16 * 16
k = (k + 15) // 16 * 16
shape_xx, shape_yy, bias_shape, out_shape, k = get_converted_shapes(m, n, k, batch_tuple, adj_x, adj_y, bias,
left_format, right_format, output_format)
input_shapes = [shape_x, shape_y, bias_shape]
input_types = [dtype, dtype, bias_dtype]
has_bias = False
if bias == 1:
has_bias = True
op_attrs = [out_dtype, left_format, right_format, output_format, adj_x, adj_y, attrs]
if has_bias == False:
input_shapes = [shape_x, shape_y]
input_types = [dtype, dtype]
op_attrs = [None, out_dtype, left_format, right_format, output_format, adj_x, adj_y, attrs]
return utils.op_build_test(transdata_matmul, input_shapes, input_types, op_attrs, kernel_name, attrs=attrs, tuning=tuning)
def transdata_matmul_execute(shape_x, shape_y, bias, left_format, right_format, out_format, adj_x, adj_y, dtype, bias_dtype, out_dtype, kernel_name, attrs={}):
batch_tuple, m, k, n = extract_dim(shape_x, shape_y, adj_x, adj_y)
m = (m + 15) // 16 * 16
n = (n + 15) // 16 * 16
k = (k + 15) // 16 * 16
shape_xx, shape_yy, bias_shape, out_shape, k = get_converted_shapes(m, n, k, batch_tuple, adj_x, adj_y, bias, left_format, right_format, out_format)
mod = transdata_matmul_compile(shape_x, shape_y, bias, left_format, right_format, out_format, adj_x, adj_y, dtype, bias_dtype, out_dtype, kernel_name, attrs=attrs)
# Generate data
m_x_fractal, m_y_fractal, bench_mark, bias_data, m_x, m_y = gen_data_all(batch_tuple, m, k, n, adj_x, adj_y, dtype, bias_dtype, out_dtype, bias, left_format, right_format, out_format)
# mod launch
output = np.full(bench_mark.shape, np.nan, out_dtype)
if bias == 0:
output = utils.mod_launch(mod, (m_x, m_y, output), expect=bench_mark)
elif bias == 1:
output = utils.mod_launch(mod, (m_x, m_y, bias_data, output), expect=bench_mark)
# compare result
rtol, atol = get_rtol_atol("matmul", dtype)
compare_result = compare_tensor(output, bench_mark, rtol=rtol, atol=atol, equal_nan=True)
return (m_x, m_y), output, bench_mark, compare_result
|
ide_server.py | vmr1532/Time2Code | 332 | 12621746 | <reponame>vmr1532/Time2Code
from flask import Flask, request, render_template, Markup
import requests
from urllib.parse import urlparse
import markdown
import os
import subprocess
import socket
app = Flask(__name__)
@app.route('/')
def time2code():
text = request.args.get('code')
lang = request.args.get('lang')
straight_text = request.args.get('straight_text')
code_text = ""
if text:
r_text = requests.get(text + "?raw=true")
code_text = r_text.text
elif straight_text:
code_text = straight_text
else:
code_text = ""
if lang:
code_lang = lang
else:
code_lang = "python3"
return render_template('index.html', code_text=code_text, code_lang=code_lang)
@app.route('/tutorial')
def tutorial():
text = request.args.get('code')
lang = request.args.get('lang')
straight_text = request.args.get('straight_text')
get_tut = request.args.get('tut')
code_text = ""
tut_url = ""
mark = ""
if get_tut:
tut_url = get_tut + "?raw=true"
r_tut = requests.get(tut_url)
mark = r_tut.text
else:
tut_url = "https://raw.githubusercontent.com/JockDaRock/Time2Code/master/Sample.md?raw=true"
r_tut = requests.get(tut_url)
mark = r_tut.text
if text:
r_text = requests.get(text + "?raw=true")
code_text = r_text.text
elif straight_text:
code_text = straight_text
if lang:
code_lang = lang
else:
code_lang = "python3"
content = Markup(markdown.markdown(mark, extensions=['pymdownx.github', 'pymdownx.highlight']))
return render_template('index-tutorial.html', markdown=content, code_text=code_text, code_lang=code_lang)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=5555, debug=True)
|
tests/rest_framework/__init__.py | A-Ashiq/django-filter | 2,512 | 12621769 | <reponame>A-Ashiq/django-filter<filename>tests/rest_framework/__init__.py
default_app_config = 'tests.rest_framework.apps.RestFrameworkTestConfig'
|
corehq/sql_proxy_accessors/migrations/0047_remove_get_case_models_functions.py | dimagilg/commcare-hq | 471 | 12621790 | # -*- coding: utf-8 -*-
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('sql_proxy_accessors', '0046_get_ledger_values_for_cases_2'),
]
operations = [
migrations.RunSQL('DROP FUNCTION IF EXISTS get_case_transactions(TEXT)'),
migrations.RunSQL('DROP FUNCTION IF EXISTS get_case_attachments(TEXT)'),
migrations.RunSQL('DROP FUNCTION IF EXISTS get_case_by_id(TEXT)'),
migrations.RunSQL('DROP FUNCTION IF EXISTS check_form_exists(TEXT, TEXT)'),
migrations.RunSQL('DROP FUNCTION IF EXISTS get_case_indices(TEXT, TEXT)'),
migrations.RunSQL('DROP FUNCTION IF EXISTS get_form_attachments(TEXT)'),
migrations.RunSQL('DROP FUNCTION IF EXISTS get_form_operations(TEXT);'),
]
|
migrations/versions/2191c871434_add_snapshot_model.py | vault-the/changes | 443 | 12621793 | """Add snapshot model
Revision ID: 2191c871434
Revises: 1<PASSWORD>
Create Date: 2014-07-17 17:21:42.915797
"""
# revision identifiers, used by Alembic.
revision = '2191c871434'
down_revision = '1<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'snapshot',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('project_id', sa.GUID(), nullable=False),
sa.Column('build_id', sa.GUID(), nullable=True),
sa.Column('status', sa.Enum(), server_default='0', nullable=False),
sa.ForeignKeyConstraint(['build_id'], ['build.id'], ),
sa.ForeignKeyConstraint(['project_id'], ['project.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('snapshot')
|
Trakttv.bundle/Contents/Libraries/Shared/elapsed/__init__.py | disrupted/Trakttv.bundle | 1,346 | 12621813 | from elapsed.main import setup, reset, clock, format_report, print_report
__version__ = '1.0.0'
|
sonnet/src/parallel_linear_test.py | ScriptBox99/deepmind-sonnet | 10,287 | 12621815 | <filename>sonnet/src/parallel_linear_test.py<gh_stars>1000+
# Copyright 2019 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for sonnet.v2.src.parallel_linear."""
from sonnet.src import linear
from sonnet.src import parallel_linear
from sonnet.src import test_utils
import tensorflow as tf
class ParallelLinearTest(test_utils.TestCase):
def test_output_size_correct(self):
layer = parallel_linear.ParallelLinears(3)
outputs = layer(tf.ones([4, 2, 6]))
self.assertEqual(outputs.shape, [4, 2, 3])
def test_behaves_same_as_stacked_linears(self):
w_init = tf.random.normal((3, 5, 7))
b_init = tf.random.normal((3, 1, 7))
inputs = tf.random.normal((3, 2, 5))
parallel = parallel_linear.ParallelLinears(
7, w_init=lambda s, d: w_init, b_init=lambda s, d: b_init)
parallel_outputs = parallel(inputs)
stacked_outputs = []
for i in range(3):
layer = linear.Linear(
7,
w_init=lambda s, d, i=i: w_init[i],
b_init=lambda s, d, i=i: b_init[i])
stacked_outputs.append(layer(inputs[i]))
stacked_outputs = tf.stack(stacked_outputs, axis=0)
self.assertAllClose(parallel_outputs.numpy(), stacked_outputs.numpy())
if __name__ == '__main__':
tf.test.main()
|
src/sage/categories/graded_lie_algebras.py | bopopescu/sage | 1,742 | 12621821 | r"""
Graded Lie Algebras
AUTHORS:
- <NAME> (2018-08-16): initial version
"""
# ****************************************************************************
# Copyright (C) 2018 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.categories.category_with_axiom import CategoryWithAxiom_over_base_ring
from sage.categories.graded_modules import GradedModulesCategory
class GradedLieAlgebras(GradedModulesCategory):
r"""
Category of graded Lie algebras.
TESTS::
sage: C = LieAlgebras(QQ).Graded()
sage: TestSuite(C).run()
"""
class SubcategoryMethods:
def Stratified(self):
r"""
Return the full subcategory of stratified objects of ``self``.
A Lie algebra is stratified if it is graded and generated as a
Lie algebra by its component of degree one.
EXAMPLES::
sage: LieAlgebras(QQ).Graded().Stratified()
Category of stratified Lie algebras over Rational Field
"""
return self._with_axiom("Stratified")
class Stratified(CategoryWithAxiom_over_base_ring):
r"""
Category of stratified Lie algebras.
A graded Lie algebra `L = \bigoplus_{k=1}^M L_k` (where
possibly `M = \infty`) is called *stratified* if it is generated
by `L_1`; in other words, we have `L_{k+1} = [L_1, L_k]`.
TESTS::
sage: C = LieAlgebras(QQ).Graded().Stratified()
sage: TestSuite(C).run()
"""
class FiniteDimensional(CategoryWithAxiom_over_base_ring):
r"""
Category of finite dimensional stratified Lie algebras.
EXAMPLES::
sage: LieAlgebras(QQ).Graded().Stratified().FiniteDimensional()
Category of finite dimensional stratified Lie algebras over Rational Field
TESTS::
sage: C = LieAlgebras(QQ).Graded().Stratified().FiniteDimensional()
sage: TestSuite(C).run()
"""
def extra_super_categories(self):
"""
Implements the fact that a finite dimensional stratified Lie
algebra is nilpotent.
EXAMPLES::
sage: C = LieAlgebras(QQ).Graded().Stratified().FiniteDimensional()
sage: C.extra_super_categories()
[Category of nilpotent Lie algebras over Rational Field]
sage: C is C.Nilpotent()
True
sage: C.is_subcategory(LieAlgebras(QQ).Nilpotent())
True
"""
from sage.categories.lie_algebras import LieAlgebras
return [LieAlgebras(self.base_ring()).Nilpotent()]
|
Python/count_digit.py | OluSure/Hacktoberfest2021-1 | 215 | 12621822 | # python program to find how many digit in given integer numbers e.g. 123->3 , 737327->6 digit first we take log of base 10 then add 1.
from math import log,floor
print(floor(log(int(input()),10)+1))
|
tests/test_cfg.py | mohamedbakrey12/prpjectINDeepLearning | 122 | 12621846 | import os
import pytest
from hydra import compose, initialize
from omegaconf import DictConfig
config_files = [f.split('.')[0] for f in os.listdir('conf') if 'yaml' in f]
@pytest.mark.parametrize('config_name', config_files)
def test_cfg(config_name: str) -> None:
with initialize(config_path='../conf'):
cfg = compose(config_name=config_name, overrides=['private=default'])
assert isinstance(cfg, DictConfig)
|
tensorflow/contrib/mpi_collectives/mpi_ops_test.py | tianyapiaozi/tensorflow | 848 | 12621855 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.contrib.mpi_collectives.mpi_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import itertools
import tensorflow as tf
import tensorflow.contrib.mpi_collectives as mpi
def mpi_env_rank_and_size():
"""Get MPI rank and size from environment variables and return them as a
tuple of integers.
Most MPI implementations have an `mpirun` or `mpiexec` command that will
run an MPI executable and set up all communication necessary between the
different processors. As part of that set up, they will set environment
variables that contain the rank and size of the MPI_COMM_WORLD
communicator. We can read those environment variables from Python in order
to ensure that `mpi.rank()` and `mpi.size()` return the expected values.
Since MPI is just a standard, not an implementation, implementations
typically choose their own environment variable names. This function tries
to support several different implementation, but really it only needs to
support whatever implementation we want to use for the TensorFlow test
suite.
If this is not running under MPI, then defaults of rank zero and size one
are returned. (This is appropriate because when you call MPI_Init in an
application not started with mpirun, it will create a new independent
communicator with only one process in it.)
"""
rank_env = "PMI_RANK OMPI_COMM_WORLD_RANK".split()
size_env = "PMI_SIZE OMPI_COMM_WORLD_SIZE".split()
for rank_var, size_var in zip(rank_env, size_env):
rank = os.environ.get(rank_var)
size = os.environ.get(size_var)
if rank is not None and size is not None:
return int(rank), int(size)
# Default to rank zero and size one if there are no environment variables
return 0, 1
class MPITests(tf.test.TestCase):
"""
Tests for MPI ops in tensorflow.contrib.mpi_collectives.
"""
def test_mpi_rank(self):
"""Test that the rank returned by mpi.rank() is correct."""
true_rank, _ = mpi_env_rank_and_size()
with self.test_session() as session:
rank = session.run(mpi.rank())
self.assertEqual(true_rank, rank)
def test_mpi_size(self):
"""Test that the size returned by mpi.size() is correct."""
_, true_size = mpi_env_rank_and_size()
with self.test_session() as session:
size = session.run(mpi.size())
self.assertEqual(true_size, size)
def test_mpi_allreduce_cpu(self):
"""Test on CPU that the allreduce correctly sums 1D, 2D, 3D tensors."""
with self.test_session() as session:
size = session.run(mpi.size())
dtypes = [tf.int32, tf.float32]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tf.set_random_seed(1234)
tensor = tf.random_uniform([17] * dim, -100, 100, dtype=dtype)
summed = mpi.allreduce(tensor, average=False)
multiplied = tensor * size
max_difference = tf.reduce_max(tf.abs(summed - multiplied))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
diff = session.run(max_difference)
self.assertTrue(diff <= threshold,
"mpi.allreduce produces incorrect results")
def test_mpi_allreduce_gpu(self):
"""Test that the allreduce works on GPUs.
This test will crash badly if used with an MPI implementation that does
not support GPU memory transfers directly, as it will call MPI_Send on
a GPU data pointer."""
# Only do this test if there are GPUs available.
if not tf.test.is_gpu_available(cuda_only=True):
return
no_gpus = tf.GPUOptions(visible_device_list="")
cpu_config = tf.ConfigProto(gpu_options=no_gpus)
with self.test_session(config=cpu_config) as session:
local_rank = session.run(mpi.local_rank())
one_gpu = tf.GPUOptions(visible_device_list=str(local_rank))
gpu_config = tf.ConfigProto(gpu_options=one_gpu)
with self.test_session(config=gpu_config) as session:
size = session.run(mpi.size())
dtype = tf.float32
dim = 3
with tf.device("/gpu:0"):
tf.set_random_seed(1234)
tensor = tf.random_uniform([17] * dim, -100, 100, dtype=dtype)
summed = mpi.allreduce(tensor, average=False)
multiplied = tensor * size
max_difference = tf.reduce_max(tf.abs(summed - multiplied))
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
return
diff = session.run(max_difference)
self.assertTrue(diff <= threshold,
"mpi.allreduce on GPU produces incorrect results")
def test_mpi_allreduce_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different rank or dimension."""
with self.test_session() as session:
rank = session.run(mpi.rank())
size = session.run(mpi.size())
# This test does not apply if there is only one worker.
if size == 1:
return
# Same rank, different dimension
tf.set_random_seed(1234)
dims = [17 + rank] * 3
tensor = tf.random_uniform(dims, -1.0, 1.0)
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(mpi.allreduce(tensor))
# Same number of elements, different rank
tf.set_random_seed(1234)
if rank == 0:
dims = [17, 23 * 57]
else:
dims = [17, 23, 57]
tensor = tf.random_uniform(dims, -1.0, 1.0)
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(mpi.allreduce(tensor))
def test_mpi_allreduce_type_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different type."""
with self.test_session() as session:
rank = session.run(mpi.rank())
size = session.run(mpi.size())
# This test does not apply if there is only one worker.
if size == 1:
return
# Same rank, different dimension
dims = [17] * 3
tensor = tf.ones(dims, dtype=tf.int32 if rank % 2 == 0 else tf.float32)
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(mpi.allreduce(tensor))
def test_mpi_allgather(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors."""
with self.test_session() as session:
size = session.run(mpi.size())
rank = session.run(mpi.rank())
dtypes = tf.int32, tf.float32
dims = 1, 2, 3
for dtype, dim in itertools.product(dtypes, dims):
tensor = tf.ones([17] * dim, dtype=dtype) * rank
gathered = mpi.allgather(tensor)
gathered_tensor = session.run(gathered)
self.assertEqual(list(gathered_tensor.shape),
[17 * size] + [17] * (dim - 1))
for i in range(size):
rank_tensor = tf.slice(gathered_tensor, [i * 17] + [0] * (dim - 1),
[17] + [-1] * (dim - 1))
self.assertEqual(list(rank_tensor.shape), [17] * dim)
self.assertTrue(session.run(tf.reduce_all(tf.equal(rank_tensor, i))),
"mpi.allgather produces incorrect gathered tensor")
def test_mpi_allgather_variable_size(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors,
even if those tensors have different sizes along the first dim."""
with self.test_session() as session:
size = session.run(mpi.size())
rank = session.run(mpi.rank())
dtypes = tf.int32, tf.float32
dims = 1, 2, 3
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5
tensor_sizes = tensor_sizes[:size]
tensor = tf.ones([tensor_sizes[rank]] + [17] * (dim - 1),
dtype=dtype) * rank
gathered = mpi.allgather(tensor)
gathered_tensor = session.run(gathered)
expected_size = sum(tensor_sizes)
self.assertEqual(list(gathered_tensor.shape),
[expected_size] + [17] * (dim - 1))
for i in range(size):
rank_size = [tensor_sizes[i]] + [17] * (dim - 1)
rank_tensor = tf.slice(gathered,
[sum(tensor_sizes[:i])] + [0] * (dim - 1),
rank_size)
self.assertEqual(list(rank_tensor.shape), rank_size)
self.assertTrue(session.run(tf.reduce_all(tf.equal(rank_tensor, i))),
"mpi.allgather produces incorrect gathered tensor")
def test_mpi_allgather_error(self):
"""Test that the allgather returns an error if any dimension besides
the first is different among the tensors being gathered."""
with self.test_session() as session:
rank = session.run(mpi.rank())
size = session.run(mpi.size())
# This test does not apply if there is only one worker.
if size == 1:
return
tensor_size = [17] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = tf.ones(tensor_size, dtype=tf.float32) * rank
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(mpi.allgather(tensor))
def test_mpi_allgather_type_error(self):
"""Test that the allgather returns an error if the types being gathered
differ among the processes"""
with self.test_session() as session:
rank = session.run(mpi.rank())
size = session.run(mpi.size())
# This test does not apply if there is only one worker.
if size == 1:
return
tensor_size = [17] * 3
dtype = tf.int32 if rank % 2 == 0 else tf.float32
tensor = tf.ones(tensor_size, dtype=dtype) * rank
with self.assertRaises(tf.errors.FailedPreconditionError):
session.run(mpi.allgather(tensor))
if __name__ == '__main__':
tf.test.main()
|
testing/example_scripts/acceptance/fixture_mock_integration.py | markshao/pytest | 9,225 | 12621880 | <gh_stars>1000+
"""Reproduces issue #3774"""
from unittest import mock
import pytest
config = {"mykey": "ORIGINAL"}
@pytest.fixture(scope="function")
@mock.patch.dict(config, {"mykey": "MOCKED"})
def my_fixture():
return config["mykey"]
def test_foobar(my_fixture):
assert my_fixture == "MOCKED"
|
core/git_mixins/branches.py | timfjord/GitSavvy | 2,058 | 12621934 | from collections import namedtuple
import re
from GitSavvy.core.git_command import mixin_base
MYPY = False
if MYPY:
from typing import Dict, Iterable, Optional, Sequence
BRANCH_DESCRIPTION_RE = re.compile(r"^branch\.(.*?)\.description (.*)$")
Branch = namedtuple("Branch", (
"name",
"remote",
"name_with_remote",
"commit_hash",
"commit_msg",
"tracking",
"tracking_status",
"active",
"description"
))
class BranchesMixin(mixin_base):
def get_current_branch(self):
# type: () -> Optional[Branch]
for branch in self.get_local_branches():
if branch.active:
return branch
return None
def get_current_branch_name(self):
# type: () -> Optional[str]
"""
Return the name of the current branch.
"""
branch = self.get_current_branch()
if branch:
return branch.name
return None
def get_upstream_for_active_branch(self):
# type: () -> Optional[str]
"""
Return ref for remote tracking branch.
"""
return self.git(
"rev-parse",
"--abbrev-ref",
"--symbolic-full-name",
"@{u}",
throw_on_error=False
).strip() or None
def get_remote_for_branch(self, branch_name):
# type: (str) -> Optional[str]
return self.git(
"config",
"--get",
"branch.{}.remote".format(branch_name),
throw_on_error=False
).strip() or None
def get_local_branch_by_name(self, branch_name):
# type: (str) -> Optional[Branch]
"""
Get a local Branch tuple from branch name.
"""
for branch in self.get_local_branches():
if branch.name == branch_name:
return branch
return None
def get_local_branches(self):
# type: () -> Iterable[Branch]
return self.get_branches(refs=["refs/heads"])
def get_branches(
self, *,
sort_by_recent=False,
fetch_descriptions=False,
refs=["refs/heads", "refs/remotes"]
):
# type: (bool, bool, Sequence[str]) -> Iterable[Branch]
"""
Return a list of all local and remote branches.
"""
stdout = self.git(
"for-each-ref",
"--format=%(HEAD)%00%(refname)%00%(upstream)%00%(upstream:track)%00%(objectname)%00%(contents:subject)",
"--sort=-committerdate" if sort_by_recent else None,
*refs
)
branches = (
branch
for branch in (
self._parse_branch_line(line)
for line in stdout.split("\n")
)
if branch and branch.name != "HEAD"
)
if not fetch_descriptions:
return branches
descriptions = self.fetch_branch_description_subjects()
return (
branch._replace(description=descriptions.get(branch.name_with_remote, ""))
for branch in branches
)
def fetch_branch_description_subjects(self):
# type: () -> Dict[str, str]
rv = {}
for line in self.git(
"config",
"--get-regex",
r"branch\..*\.description",
throw_on_error=False
).strip("\n").splitlines():
match = BRANCH_DESCRIPTION_RE.match(line)
if match is None:
continue
branch_name, description = match.group(1), match.group(2)
rv[branch_name] = description
return rv
def _parse_branch_line(self, line):
# type: (str) -> Optional[Branch]
line = line.strip()
if not line:
return None
head, ref, tracking_branch, tracking_status, commit_hash, commit_msg = line.split("\x00")
active = head == "*"
is_remote = ref.startswith("refs/remotes/")
branch_name = ref[13:] if is_remote else ref[11:]
remote = ref[13:].split("/", 1)[0] if is_remote else None
tracking_branch = tracking_branch[13:]
if tracking_status:
# remove brackets
tracking_status = tracking_status[1:len(tracking_status) - 1]
return Branch(
"/".join(branch_name.split("/")[1:]) if is_remote else branch_name,
remote,
branch_name,
commit_hash,
commit_msg,
tracking_branch,
tracking_status,
active,
description=""
)
def merge(self, branch_names):
"""
Merge `branch_names` into active branch.
"""
self.git("merge", *branch_names)
def branches_containing_commit(self, commit_hash, local_only=True, remote_only=False):
"""
Return a list of branches which contain a particular commit.
"""
branches = self.git(
"branch",
"-a" if not local_only and not remote_only else None,
"-r" if remote_only else None,
"--contains",
commit_hash
).strip().split("\n")
return [branch.strip() for branch in branches]
def validate_branch_name(self, branch):
ref = "refs/heads/{}".format(branch)
return self.git("check-ref-format", "--branch", ref, throw_on_error=False).strip()
|
packages/engine/src/worker/runner/python/fbs/TaskID.py | mschrader15/hash | 219 | 12621939 | <gh_stars>100-1000
# automatically generated by the FlatBuffers compiler, do not modify
# namespace:
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class TaskId(object):
__slots__ = ['_tab']
@classmethod
def SizeOf(cls):
return 16
# TaskId
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# TaskId
def Inner(self): return [self._tab.Get(flatbuffers.number_types.Int8Flags, self._tab.Pos + flatbuffers.number_types.UOffsetTFlags.py_type(0 + i * 1)) for i in range(16)]
# TaskId
def InnerLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(0))
if o != 0:
return self._tab.VectorLen(o)
return 0
# TaskId
def InnerIsNone(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(0))
return o == 0
def CreateTaskId(builder, inner):
builder.Prep(1, 16)
for _idx0 in range(16 , 0, -1):
builder.PrependInt8(inner[_idx0-1])
return builder.Offset()
|
tda/orders/__init__.py | zhangted/tda-api | 986 | 12621969 | from enum import Enum
from . import common
from . import equities
from . import generic
from . import options
import sys
assert sys.version_info[0] >= 3
__error_message = (
'EquityOrderBuilder has been deleted from the library. Please use ' +
'OrderBuilder and its associated templates instead. See here for ' +
'details: https://tda-api.readthedocs.io/en/latest/' +
'order-templates.html#what-happened-to-equityorderbuilder')
if sys.version_info[1] >= 7:
def __getattr__(name):
if name == 'EquityOrderBuilder':
raise ImportError(__error_message)
raise AttributeError(name)
else: # pragma: no cover
class EquityOrderBuilder:
def __init__(self, *args, **kwargs):
raise NotImplementedError(globals()['__error_message'])
|
release/stubs.min/Autodesk/Revit/DB/__init___parts/WorksharingUtils.py | htlcnn/ironpython-stubs | 182 | 12622027 | class WorksharingUtils(object,IDisposable):
""" A static class that contains utility functions related to worksharing. """
@staticmethod
def CheckoutElements(document,elementsToCheckout,options=None):
"""
CheckoutElements(document: Document,elementsToCheckout: ICollection[ElementId]) -> ICollection[ElementId]
CheckoutElements(document: Document,elementsToCheckout: ISet[ElementId],options: TransactWithCentralOptions) -> ISet[ElementId]
"""
pass
@staticmethod
def CheckoutWorksets(document,worksetsToCheckout,options=None):
"""
CheckoutWorksets(document: Document,worksetsToCheckout: ICollection[WorksetId]) -> ICollection[WorksetId]
CheckoutWorksets(document: Document,worksetsToCheckout: ISet[WorksetId],options: TransactWithCentralOptions) -> ISet[WorksetId]
"""
pass
@staticmethod
def CreateNewLocal(sourcePath,targetPath):
"""
CreateNewLocal(sourcePath: ModelPath,targetPath: ModelPath)
Takes a path to a central model and copies the model into a new local file for
the current user.
sourcePath: The path to the central model.
targetPath: The path to put the new local file.
"""
pass
def Dispose(self):
""" Dispose(self: WorksharingUtils) """
pass
@staticmethod
def GetCheckoutStatus(document,elementId,owner=None):
"""
GetCheckoutStatus(document: Document,elementId: ElementId) -> (CheckoutStatus,str)
Gets the ownership status and outputs the owner of an element.
document: The document containing the element.
elementId: The id of the element.
Returns: An indication of whether the element is unowned,owned by the current user,or
owned by another user.
GetCheckoutStatus(document: Document,elementId: ElementId) -> CheckoutStatus
Gets the ownership status of an element.
document: The document containing the element.
elementId: The id of the element.
Returns: A summary of whether the element is unowned,owned by the current user,or
owned by another user.
"""
pass
@staticmethod
def GetModelUpdatesStatus(document,elementId):
"""
GetModelUpdatesStatus(document: Document,elementId: ElementId) -> ModelUpdatesStatus
Gets the status of a single element in the central model.
document: The document containing the element.
elementId: The id of the element.
Returns: The status of the element in the local session versus the central model.
"""
pass
@staticmethod
def GetUserWorksetInfo(path):
"""
GetUserWorksetInfo(path: ModelPath) -> IList[WorksetPreview]
Gets information about user worksets in a workshared model file,without fully
opening the file.
path: The path to the workshared model.
Returns: Information about all the user worksets in the model.
The list is sorted by
workset id.
"""
pass
@staticmethod
def GetWorksharingTooltipInfo(document,elementId):
"""
GetWorksharingTooltipInfo(document: Document,elementId: ElementId) -> WorksharingTooltipInfo
Gets worksharing information about an element to display in an in-canvas
tooltip.
document: The document containing the element
elementId: The id of the element in question
Returns: Worksharing information about the specified element.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: WorksharingUtils,disposing: bool) """
pass
@staticmethod
def RelinquishOwnership(document,generalCategories,options):
"""
RelinquishOwnership(document: Document,generalCategories: RelinquishOptions,options: TransactWithCentralOptions) -> RelinquishedItems
Relinquishes ownership by the current user of as many specified elements and
worksets as possible,
and grants element ownership requested by other users
on a first-come,first-served basis.
document: The document containing the elements and worksets.
generalCategories: General categories of items to relinquish. See RelinquishOptions for details.
options: Options to customize access to the central model.
ll is allowed and means
no customization.
Returns: The elements and worksets that were relinquished.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: WorksharingUtils) -> bool
"""
|
anchore/anchore-modules/queries/list-content-search-matches.py | berez23/anchore | 401 | 12622032 | <reponame>berez23/anchore
#!/usr/bin/env python
import sys
import os
import re
import json
import traceback
import anchore.anchore_utils
# main routine
try:
config = anchore.anchore_utils.init_query_cmdline(sys.argv, "params: <filename> <filename> ...\nhelp: use 'all' to show all content search match filenames")
except Exception as err:
print str(err)
sys.exit(1)
if not config:
sys.exit(0)
if len(config['params']) <= 0:
print "Query requires input: all ..."
imageId = config['imgid']
outlist = list()
warns = list()
outlist.append(["Image_Id", "Repo_Tags", "File", "Match_Regexp", "Match_Line_Numbers"])
try:
# handle the good case, something is found resulting in data matching the required columns
results = anchore.anchore_utils.load_analysis_output(imageId, 'content_search', 'regexp_matches.all')
for thefile in results.keys():
data = json.loads(results[thefile])
for b64regexp in data:
theregexp = b64regexp.decode('base64')
thelinenos = ','.join([str(x) for x in data[b64regexp]])
outlist.append([config['meta']['shortId'], config['meta']['humanname'], thefile, theregexp, thelinenos])
except Exception as err:
# handle the case where something wrong happened
import traceback
traceback.print_exc()
warns.append("Query failed for image ("+str(config['imgid'])+") with exception: " + str(err))
# handle the no match case
if len(outlist) < 1:
#outlist.append(["NOMATCH", "NOMATCH", "NOMATCH"])
pass
anchore.anchore_utils.write_kvfile_fromlist(config['output'], outlist)
if len(warns) > 0:
anchore.anchore_utils.write_plainfile_fromlist(config['output_warns'], warns)
sys.exit(0)
|
tests/manage/z_cluster/nodes/test_worker_nodes_network_failures.py | annagitel/ocs-ci | 130 | 12622034 | <gh_stars>100-1000
import logging
import pytest
from concurrent.futures import ThreadPoolExecutor
from time import sleep
from ocs_ci.framework import config
from ocs_ci.framework.pytest_customization.marks import (
skipif_aws_i3,
skipif_vsphere_ipi,
skipif_ibm_power,
)
from ocs_ci.framework.testlib import ignore_leftovers, ManageTest, tier4, tier4c
from ocs_ci.ocs import constants, machine, node
from ocs_ci.ocs.exceptions import ResourceWrongStatusException
from ocs_ci.ocs.resources import pod
from ocs_ci.utility.utils import ceph_health_check
from ocs_ci.helpers import helpers
logger = logging.getLogger(__name__)
@tier4
@tier4c
@skipif_aws_i3
@skipif_vsphere_ipi
@skipif_ibm_power
@ignore_leftovers
class TestWorkerNodesFailure(ManageTest):
"""
Test all worker nodes simultaneous abrupt network failure for ~300 seconds
"""
pvc_size = 10 # size in Gi
short_nw_fail_time = 300 # Duration in seconds for short network failure
@pytest.fixture()
def setup(
self,
request,
scenario,
nodes,
multi_pvc_factory,
service_account_factory,
dc_pod_factory,
):
"""
Identify the nodes and start multiple dc pods for the test
Args:
scenario (str): Scenario of app pods running on OCS or dedicated nodes
(eg., 'colocated', 'dedicated')
nodes: A fixture to get instance of the relevant platform nodes class
multi_pvc_factory: A fixture create a set of new PVCs
service_account_factory: A fixture to create a service account
dc_pod_factory: A fixture to create dc pod
Returns:
list: dc pod objs
"""
worker_nodes = node.get_worker_nodes()
ocs_nodes = machine.get_labeled_nodes(constants.OPERATOR_NODE_LABEL)
non_ocs_nodes = list(set(worker_nodes) - set(ocs_nodes))
def finalizer():
helpers.remove_label_from_worker_node(
node_list=worker_nodes, label_key="nodetype"
)
# Check ceph health
ceph_health_check(tries=80)
request.addfinalizer(finalizer)
if (scenario == "dedicated") and len(non_ocs_nodes) == 0:
if config.ENV_DATA.get("deployment_type").lower() == "ipi":
machines = machine.get_machinesets()
node.add_new_node_and_label_it(
machines[0], num_nodes=1, mark_for_ocs_label=False
)
else:
if (
config.ENV_DATA.get("platform").lower()
== constants.VSPHERE_PLATFORM
):
pytest.skip(
"Skipping add node in VSPHERE due to https://bugzilla.redhat.com/show_bug.cgi?id=1844521"
)
is_rhel = config.ENV_DATA.get("rhel_workers") or config.ENV_DATA.get(
"rhel_user"
)
node_type = constants.RHEL_OS if is_rhel else constants.RHCOS
node.add_new_node_and_label_upi(
node_type=node_type, num_nodes=1, mark_for_ocs_label=False
)
non_ocs_nodes = list(set(node.get_worker_nodes()) - set(ocs_nodes))
app_pod_nodes = ocs_nodes if (scenario == "colocated") else non_ocs_nodes
# Label nodes to be able to run app pods
helpers.label_worker_node(
node_list=app_pod_nodes, label_key="nodetype", label_value="app-pod"
)
access_modes_rbd = [
constants.ACCESS_MODE_RWO,
f"{constants.ACCESS_MODE_RWX}-Block",
]
access_modes_cephfs = [constants.ACCESS_MODE_RWO, constants.ACCESS_MODE_RWX]
pvcs_rbd = multi_pvc_factory(
interface=constants.CEPHBLOCKPOOL,
size=self.pvc_size,
access_modes=access_modes_rbd,
status=constants.STATUS_BOUND,
num_of_pvc=len(access_modes_rbd),
)
project = pvcs_rbd[0].project
pvcs_cephfs = multi_pvc_factory(
interface=constants.CEPHFILESYSTEM,
project=project,
size=self.pvc_size,
access_modes=access_modes_cephfs,
status=constants.STATUS_BOUND,
num_of_pvc=len(access_modes_cephfs),
)
pvcs = pvcs_cephfs + pvcs_rbd
# Set volume mode on PVC objects
for pvc_obj in pvcs:
pvc_info = pvc_obj.get()
setattr(pvc_obj, "volume_mode", pvc_info["spec"]["volumeMode"])
sa_obj = service_account_factory(project=project)
pods = []
# Create pods
for pvc_obj in pvcs:
if constants.CEPHFS_INTERFACE in pvc_obj.storageclass.name:
interface = constants.CEPHFILESYSTEM
else:
interface = constants.CEPHBLOCKPOOL
num_pods = 2 if pvc_obj.access_mode == constants.ACCESS_MODE_RWX else 1
logger.info("Creating app pods")
for _ in range(num_pods):
pods.append(
dc_pod_factory(
interface=interface,
pvc=pvc_obj,
node_selector={"nodetype": "app-pod"},
raw_block_pv=pvc_obj.volume_mode == "Block",
sa_obj=sa_obj,
)
)
logger.info(
f"Created {len(pods)} pods using {len(pvcs_cephfs)} cephfs, {len(pvcs_rbd)} rbd PVCs."
)
return pods
@pytest.mark.parametrize(
argnames=["scenario"],
argvalues=[
pytest.param(*["colocated"], marks=pytest.mark.polarion_id("OCS-1432")),
pytest.param(*["dedicated"], marks=pytest.mark.polarion_id("OCS-1433")),
],
)
def test_all_worker_nodes_short_network_failure(
self, nodes, setup, node_restart_teardown
):
"""
OCS-1432/OCS-1433:
- Start DeploymentConfig based app pods
- Make all the worker nodes unresponsive by doing abrupt network failure
- Reboot the unresponsive node after short duration of ~300 seconds
- When unresponsive node recovers, app pods and ceph cluster should recover
- Again run IOs from app pods
"""
pod_objs = setup
worker_nodes = node.get_worker_nodes()
# Run IO on pods
logger.info(f"Starting IO on {len(pod_objs)} app pods")
with ThreadPoolExecutor() as executor:
for pod_obj in pod_objs:
logger.info(f"Starting IO on pod {pod_obj.name}")
storage_type = "block" if pod_obj.pvc.volume_mode == "Block" else "fs"
executor.submit(
pod_obj.run_io,
storage_type=storage_type,
size="2G",
runtime=30,
fio_filename=f"{pod_obj.name}_io_f1",
)
logger.info(f"IO started on all {len(pod_objs)} app pods")
# Wait for IO results
for pod_obj in pod_objs:
pod.get_fio_rw_iops(pod_obj)
# Induce network failure on all worker nodes
with ThreadPoolExecutor() as executor:
for node_name in worker_nodes:
executor.submit(node.node_network_failure, node_name, False)
node.wait_for_nodes_status(
node_names=worker_nodes, status=constants.NODE_NOT_READY
)
logger.info(f"Waiting for {self.short_nw_fail_time} seconds")
sleep(self.short_nw_fail_time)
# Reboot the worker nodes
logger.info(f"Stop and start the worker nodes: {worker_nodes}")
nodes.restart_nodes_by_stop_and_start(node.get_node_objs(worker_nodes))
try:
node.wait_for_nodes_status(
node_names=worker_nodes, status=constants.NODE_READY
)
logger.info("Verifying StorageCluster pods are in running/completed state")
pod.wait_for_storage_pods(timeout=720)
except ResourceWrongStatusException:
# Restart nodes
nodes.restart_nodes(node.get_node_objs(worker_nodes))
assert ceph_health_check(tries=80), "Ceph cluster health is not OK"
logger.info("Ceph cluster health is OK")
# Get current info of app pods
new_pod_objs = list()
for pod_obj in pod_objs:
pod_label = pod_obj.labels.get("deploymentconfig")
pods_data = pod.get_pods_having_label(
f"deploymentconfig={pod_label}", pod_obj.namespace
)
current_pods = [
pod_data.get("metadata").get("name")
for pod_data in pods_data
if "-deploy" not in pod_data.get("metadata").get("name")
]
logger.info(f"Pods with label {pod_label}: {current_pods}")
# Remove the older pod from the list if pod is rescheduled
if len(current_pods) > 1:
current_pods.remove(pod_obj.name)
new_pod_obj = pod.get_pod_obj(current_pods.pop(), pod_obj.namespace)
new_pod_obj.pvc = pod_obj.pvc
new_pod_objs.append(new_pod_obj)
logger.info("Wait for app pods are in running state")
for pod_obj in new_pod_objs:
pod_obj.ocp.wait_for_resource(
condition=constants.STATUS_RUNNING,
resource_name=pod_obj.name,
timeout=720,
sleep=20,
)
logger.info("All the app pods reached running state")
# Run more IOs on app pods
with ThreadPoolExecutor() as executor:
for pod_obj in new_pod_objs:
logger.info(f"Starting IO on pod {pod_obj.name}")
pod_obj.wl_setup_done = False
storage_type = "block" if pod_obj.pvc.volume_mode == "Block" else "fs"
executor.submit(
pod_obj.run_io,
storage_type=storage_type,
size="1G",
runtime=30,
fio_filename=f"{pod_obj.name}_io_f2",
)
for pod_obj in new_pod_objs:
pod.get_fio_rw_iops(pod_obj)
|
src/gluonts/model/rotbaum/_model.py | Xiaoxiong-Liu/gluon-ts | 2,648 | 12622049 | <reponame>Xiaoxiong-Liu/gluon-ts
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import Dict, List, Optional, Union
import copy
import numpy as np
import pandas as pd
import xgboost
import gc
from collections import defaultdict
from gluonts.core.component import validated
class QRF:
@validated()
def __init__(self, params: Optional[dict] = None):
"""
Implements Quantile Random Forests using skgarden.
"""
from skgarden import RandomForestQuantileRegressor
self.model = RandomForestQuantileRegressor(**params)
def fit(self, x_train, y_train):
self.model.fit(np.array(x_train), np.array(y_train))
def predict(self, x_test, quantile):
return self.model.predict(x_test, quantile=100 * quantile)
class QuantileReg:
@validated()
def __init__(self, quantiles: List, params: Optional[dict] = None):
"""
Implements quantile regression using lightgbm.
"""
from lightgbm import LGBMRegressor
self.quantiles = quantiles
self.models = dict(
(
quantile,
LGBMRegressor(objective="quantile", alpha=quantile, **params),
)
for quantile in quantiles
)
def fit(self, x_train, y_train):
for model in self.models.values():
model.fit(np.array(x_train), np.array(y_train))
def predict(self, x_test, quantile):
return self.models[quantile].predict(x_test)
class QRX:
@validated()
def __init__(
self,
model=None,
xgboost_params: Optional[dict] = None,
min_bin_size: int = 100,
):
"""
QRX is an algorithm that takes a point estimate algorithm and turns it
into a probabilistic forecasting algorithm. By default it uses XGBoost.
You fit it once, and choose the quantile to predict only at
prediction time.
Prediction is done by taking empirical quantiles of *true values*
associated with point estimate predictions close to the point
estimate of the given point. The minimal number of associated true
values is determined by min_bin_size.
The algorithm is (loosely) inspired by quantile regression
forests, in that it is predicts quantiles based on associated true
values, where the association is based on a point estimate algorithm.
Parameters
----------
model
Any point estimate algorithm with .fit and .predict functions.
xgboost_params
If None, then it uses
{"max_depth": 5, "n_jobs": -1, "verbosity": 1,
"objective": "reg:squarederror"}
min_bin_size
Hyperparameter that determines the minimal size of the list of
true values associated with each prediction.
"""
if model:
self.model = copy.deepcopy(model)
else:
self.model = self._create_xgboost_model(xgboost_params)
self.min_bin_size = min_bin_size
self.sorted_train_preds = None
self.x_train_is_dataframe = None
self.id_to_bins = None
self.preds_to_id = None
self.quantile_dicts = defaultdict(dict)
@staticmethod
def _create_xgboost_model(model_params: Optional[dict] = None):
"""
Creates an xgboost model using specified or default parameters.
"""
if model_params is None:
model_params = {
"max_depth": 5,
"n_jobs": -1,
"verbosity": 1,
"objective": "reg:squarederror",
}
return xgboost.sklearn.XGBModel(**model_params)
def fit(
self,
x_train: Union[pd.DataFrame, List],
y_train: Union[pd.Series, List],
max_sample_size: Optional[
int
] = None, # If not None, choose without replacement
# replacement min(max_sample_size, len(x_train)) many datapoints
# to train on.
seed: int = 1,
x_train_is_dataframe: bool = False, # This should be False for
# XGBoost, but True if one uses lightgbm.
model_is_already_trained: bool = False, # True if there is no need to
# train self.model
**kwargs
):
"""
Fits self.model and partitions R^n into cells. More accurately,
it creates two dictionaries: self.preds_to_ids whose keys are the
predictions of the training dataset and whose values are the ids of
their associated bins, and self.ids_to_bins whose keys are the ids
of the bins and whose values are associated lists of true values.
"""
self.x_train_is_dataframe = x_train_is_dataframe
self.quantile_dicts = defaultdict(dict)
if not x_train_is_dataframe:
x_train, y_train = np.array(x_train), np.array(y_train) # xgboost
# doens't like lists
if max_sample_size and x_train_is_dataframe:
assert max_sample_size > 0
sample_size = min(max_sample_size, len(x_train))
x_train = x_train.sample(
n=min(sample_size, len(x_train)),
replace=False,
random_state=seed,
)
y_train = y_train[x_train.index]
elif max_sample_size:
assert max_sample_size > 0
sample_size = min(max_sample_size, len(x_train))
np.random.seed(seed)
idx = np.random.choice(
np.arange(len(x_train)), sample_size, replace=False
)
x_train = x_train[idx]
y_train = y_train[idx]
if not model_is_already_trained:
self.model.fit(x_train, y_train, **kwargs)
y_train_pred = self.model.predict(x_train)
df = pd.DataFrame(
{
"y_true": y_train,
"y_pred": y_train_pred,
}
).reset_index(drop=True)
self.sorted_train_preds = sorted(df["y_pred"].unique())
cell_values_dict = self.preprocess_df(
df, min_bin_size=self.min_bin_size
)
del df
gc.collect()
cell_values_dict_df = pd.DataFrame(
cell_values_dict.items(), columns=["keys", "values"]
)
cell_values_dict_df["id"] = cell_values_dict_df["values"].apply(id)
self.id_to_bins = (
cell_values_dict_df.groupby("id")["values"].first().to_dict()
)
self.preds_to_id = (
cell_values_dict_df.groupby("keys")["id"].first().to_dict()
)
del cell_values_dict_df
del cell_values_dict
gc.collect()
df = pd.DataFrame({"preds": self.sorted_train_preds})
df["bin_ids"] = df["preds"].apply(lambda x: self.preds_to_id[x])
bin_ids = df["bin_ids"].drop_duplicates().values
final_id, penultimate_id = bin_ids[-1], bin_ids[-2]
if len(self.id_to_bins[final_id]) < self.min_bin_size:
self.id_to_bins[final_id] += self.id_to_bins[penultimate_id]
@staticmethod
def clump(
dic: Dict, min_num: int, sorted_keys: Optional[List] = None
) -> Dict:
"""
Returns a new dictionary whose keys are the same as dic's keys.
Runs over dic's keys, from smallest to largest, and every time that
the sum of the lengths of the values goes over min_num, it makes the
new dictionary's values for the associated keys reference a single
list object whose elements are the with-multiplicity union of the
lists that appear as values in dic.
Note that in the dictionary that is being output by this function,
while the keys are the same number of keys as in dic, the number of
objects in the values can be significantly smaller.
Examples:
>>> QRX.clump({0.1: [3, 3], 0.3: [0], 1.5: [-8]}, 0)
{0.1: [3, 3], 0.3: [0], 1.5: [-8]}
>>> QRX.clump({0.1: [3, 3], 0.3: [0], 1.5: [-8]}, 1)
{0.1: [3, 3], 0.3: [0, -8], 1.5: [0, -8]}
>>> QRX.clump({0.1: [3, 3], 0.3: [0], 1.5: [-8]}, 2)
{0.1: [3, 3, 0], 0.3: [3, 3, 0], 1.5: [-8]}
Parameters
----------
dic: dict
float to list
min_num: int
minimal number of clump size.
sorted_keys: list
sorted(dic.keys()) or None
Returns
-------
dict
float to list; with the values often having the same list object
appear multiple times
"""
if sorted_keys is None:
sorted_keys = sorted(dic)
new_dic = {}
iter_length = 0
iter_list = []
for key in sorted_keys:
iter_length += len(dic[key])
iter_list.extend(dic[key])
new_dic[key] = iter_list # Note that iter_list may change in the
# future, and this will change the value of new_dic[key]. This
# is intentional.
if iter_length > min_num:
iter_length = 0
iter_list = [] # This line, of course, doesn't change any
# value of new_dic, as it makes iter_list reference a new
# list object.
return new_dic
def preprocess_df(self, df: pd.DataFrame, min_bin_size: int = 100) -> Dict:
"""
Associates true values to each prediction that appears in train. For
the nature of this association, see details in .clump.
Parameters
----------
df: pd.DataFrame
Dataframe with columns 'y_true' and 'y_pred', of true and
predicted values on the training set.
min_bin_size
Size of clumps to associate to each prediction in the set of
predictions on the training set.
Returns
-------
dict
going from predictions from the set of predictions on the
training set to lists of associated true values, with the length
of each being at least min_bin_size.
"""
dic = dict(df.groupby("y_pred")["y_true"].apply(list))
dic = self.clump(dic, min_bin_size, self.sorted_train_preds)
return dic
@classmethod
def get_closest_pt(cls, sorted_list: List, num: int) -> int:
"""
Given a sorted list of floats, returns the number closest to num.
Implements a binary search.
"""
assert sorted_list
if len(sorted_list) == 1:
return sorted_list[0]
else:
halfway_indx = (len(sorted_list) - 1) // 2
if sorted_list[halfway_indx] > num:
return cls.get_closest_pt(sorted_list[: halfway_indx + 1], num)
elif sorted_list[halfway_indx + 1] < num:
return cls.get_closest_pt(sorted_list[halfway_indx + 1 :], num)
elif abs(sorted_list[halfway_indx] - num) < abs(
sorted_list[halfway_indx + 1] - num
):
return sorted_list[halfway_indx]
else:
return sorted_list[halfway_indx + 1]
def _get_and_cache_quantile_computation(
self, feature_vector_in_train: List, quantile: float
):
"""
Updates self.quantile_dicts[quantile][feature_vector_in_train] to be the quantile of the associated true value bin.
Parameters
----------
feature_vector_in_train: list
Feature vector that appears in the training data.
quantile: float
Returns
-------
float
The quantile of the associated true value bin.
"""
if feature_vector_in_train not in self.quantile_dicts[quantile]:
self.quantile_dicts[quantile][
feature_vector_in_train
] = np.percentile(
self.id_to_bins[self.preds_to_id[feature_vector_in_train]],
quantile * 100,
)
return self.quantile_dicts[quantile][feature_vector_in_train]
def predict(
self, x_test: Union[pd.DataFrame, List], quantile: float
) -> List:
"""
Quantile prediction.
Parameters
----------
x_test: pd.DataFrame if self.x_train_is_dataframe, else list of
lists
quantile: float
Returns
-------
list
list of floats
"""
if self.x_train_is_dataframe:
preds = self.model.predict(x_test)
predicted_values = [
self._get_and_cache_quantile_computation(
self.get_closest_pt(self.sorted_train_preds, pred),
quantile,
)
for pred in preds
]
else:
predicted_values = []
for pt in x_test:
pred = self.model.predict(np.array([pt]))[
0
] # xgboost doesn't like lists
closest_pred = self.get_closest_pt(
self.sorted_train_preds, pred
)
predicted_values.append(
self._get_and_cache_quantile_computation(
closest_pred, quantile
)
)
return predicted_values
def estimate_dist(self, x_test: List[List[float]]) -> List:
"""
Get estimate of sampling of Y|X=x for each x in x_test
Parameters
----------
x_test
Returns
-------
list
list of lists
"""
predicted_samples = []
for pt in x_test:
pred = self.model.predict(np.array([pt]))[0]
closest_pred = self.get_closest_pt(self.sorted_train_preds, pred)
predicted_samples.append(
self.id_to_bins[self.preds_to_id[closest_pred]]
)
return predicted_samples
LSF = QRX # LSF stands for "Level Set Forecaster". This name emphasizes that
# the underlying algorithm can be used with any point forecasting algorithm,
# not just XGBoost.
|
src/ralph/__main__.py | DoNnMyTh/ralph | 1,668 | 12622055 | #!/usr/bin/env python
import os
import sys
def main(settings_module='ralph.settings', force=False):
if force:
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
else:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', settings_module)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
def dev():
main('ralph.settings.dev')
def test():
# test only with test settings, not local (or any set by environment
# variable DJANGO_SETTINGS_MODULE)
main('ralph.settings.test', force=True)
def prod():
main('ralph.settings.prod')
if __name__ == '__main__':
main('ralph.settings.prod')
|
tests/test_models/test_necks/test_necks.py | gopi231091/mmdetection3d | 217 | 12622071 | import pytest
import torch
from mmdet3d.models.builder import build_backbone, build_neck
def test_centerpoint_fpn():
second_cfg = dict(
type='SECOND',
in_channels=64,
out_channels=[64, 128, 256],
layer_nums=[3, 5, 5],
layer_strides=[2, 2, 2],
norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),
conv_cfg=dict(type='Conv2d', bias=False))
second = build_backbone(second_cfg)
# centerpoint usage of fpn
centerpoint_fpn_cfg = dict(
type='SECONDFPN',
in_channels=[64, 128, 256],
out_channels=[128, 128, 128],
upsample_strides=[0.5, 1, 2],
norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),
upsample_cfg=dict(type='deconv', bias=False),
use_conv_for_no_stride=True)
# original usage of fpn
fpn_cfg = dict(
type='SECONDFPN',
in_channels=[64, 128, 256],
upsample_strides=[1, 2, 4],
out_channels=[128, 128, 128])
second_fpn = build_neck(fpn_cfg)
centerpoint_second_fpn = build_neck(centerpoint_fpn_cfg)
input = torch.rand([4, 64, 512, 512])
sec_output = second(input)
centerpoint_output = centerpoint_second_fpn(sec_output)
second_output = second_fpn(sec_output)
assert centerpoint_output[0].shape == torch.Size([4, 384, 128, 128])
assert second_output[0].shape == torch.Size([4, 384, 256, 256])
def test_imvoxel_neck():
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
neck_cfg = dict(
type='OutdoorImVoxelNeck', in_channels=64, out_channels=256)
neck = build_neck(neck_cfg).cuda()
inputs = torch.rand([1, 64, 216, 248, 12], device='cuda')
outputs = neck(inputs)
assert outputs[0].shape == (1, 256, 248, 216)
|
data structures/stack/python/stack.py | iabhimanyu/Algorithms | 715 | 12622096 | class Stack:
def __init__(self):
self.list = []
def push(self, element):
self.list.append(element)
def pop(self):
assert len(self.list) > 0, "Stack is empty"
return self.list.pop()
def isEmpty(self):
return len(self.list) == 0
|
bookwyrm/tests/models/test_fields.py | mouse-reeve/fedireads | 270 | 12622103 | """ testing models """
from io import BytesIO
from collections import namedtuple
from dataclasses import dataclass
import json
import pathlib
import re
from typing import List
from unittest.mock import patch
from PIL import Image
import responses
from django.core.exceptions import ValidationError
from django.core.files.base import ContentFile
from django.db import models
from django.test import TestCase
from django.utils import timezone
from bookwyrm import activitypub
from bookwyrm.activitypub.base_activity import ActivityObject
from bookwyrm.models import fields, User, Status, Edition
from bookwyrm.models.base_model import BookWyrmModel
from bookwyrm.models.activitypub_mixin import ActivitypubMixin
from bookwyrm.settings import DOMAIN
# pylint: disable=too-many-public-methods
@patch("bookwyrm.suggested_users.rerank_suggestions_task.delay")
@patch("bookwyrm.activitystreams.populate_stream_task.delay")
@patch("bookwyrm.lists_stream.populate_lists_task.delay")
class ModelFields(TestCase):
"""overwrites standard model feilds to work with activitypub"""
def test_validate_remote_id(self, *_):
"""should look like a url"""
self.assertIsNone(fields.validate_remote_id("http://www.example.com"))
self.assertIsNone(fields.validate_remote_id("https://www.example.com"))
self.assertIsNone(fields.validate_remote_id("http://exle.com/dlg-23/x"))
self.assertRaises(
ValidationError, fields.validate_remote_id, "http:/example.com/dlfjg-23/x"
)
self.assertRaises(
ValidationError, fields.validate_remote_id, "www.example.com/dlfjg-23/x"
)
self.assertRaises(
ValidationError,
fields.validate_remote_id,
"http://www.example.com/dlfjg 23/x",
)
def test_activitypub_field_mixin(self, *_):
"""generic mixin with super basic to and from functionality"""
instance = fields.ActivitypubFieldMixin()
self.assertEqual(instance.field_to_activity("fish"), "fish")
self.assertEqual(instance.field_from_activity("fish"), "fish")
self.assertFalse(instance.deduplication_field)
instance = fields.ActivitypubFieldMixin(
activitypub_wrapper="endpoints", activitypub_field="outbox"
)
self.assertEqual(instance.field_to_activity("fish"), {"outbox": "fish"})
self.assertEqual(instance.field_from_activity({"outbox": "fish"}), "fish")
self.assertEqual(instance.get_activitypub_field(), "endpoints")
instance = fields.ActivitypubFieldMixin()
instance.name = "snake_case_name"
self.assertEqual(instance.get_activitypub_field(), "snakeCaseName")
def test_set_field_from_activity(self, *_):
"""setter from entire json blob"""
@dataclass
class TestModel:
"""real simple mock"""
field_name: str
mock_model = TestModel(field_name="bip")
TestActivity = namedtuple("test", ("fieldName", "unrelated"))
data = TestActivity(fieldName="hi", unrelated="bfkjh")
instance = fields.ActivitypubFieldMixin()
instance.name = "field_name"
instance.set_field_from_activity(mock_model, data)
self.assertEqual(mock_model.field_name, "hi")
def test_set_activity_from_field(self, *_):
"""set json field given entire model"""
@dataclass
class TestModel:
"""real simple mock"""
field_name: str
unrelated: str
mock_model = TestModel(field_name="bip", unrelated="field")
instance = fields.ActivitypubFieldMixin()
instance.name = "field_name"
data = {}
instance.set_activity_from_field(data, mock_model)
self.assertEqual(data["fieldName"], "bip")
def test_remote_id_field(self, *_):
"""just sets some defaults on charfield"""
instance = fields.RemoteIdField()
self.assertEqual(instance.max_length, 255)
self.assertTrue(instance.deduplication_field)
with self.assertRaises(ValidationError):
instance.run_validators("http://www.example.com/dlfjg 23/x")
def test_username_field(self, *_):
"""again, just setting defaults on username field"""
instance = fields.UsernameField()
self.assertEqual(instance.activitypub_field, "preferredUsername")
self.assertEqual(instance.max_length, 150)
self.assertEqual(instance.unique, True)
with self.assertRaises(ValidationError):
instance.run_validators("mouse")
instance.run_validators("mouseexample.com")
instance.run_validators("<EMAIL>")
instance.run_validators("@example.com")
instance.run_validators("<EMAIL>")
instance.run_validators("one [email protected]")
instance.run_validators("<EMAIL>")
instance.run_validators("<EMAIL> ")
self.assertIsNone(instance.run_validators("<EMAIL>"))
self.assertIsNone(instance.run_validators("<EMAIL>"))
self.assertIsNone(instance.run_validators("<EMAIL>"))
self.assertEqual(instance.field_to_activity("<EMAIL>"), "test")
def test_privacy_field_defaults(self, *_):
"""post privacy field's many default values"""
instance = fields.PrivacyField()
self.assertEqual(instance.max_length, 255)
self.assertEqual(
[c[0] for c in instance.choices],
["public", "unlisted", "followers", "direct"],
)
self.assertEqual(instance.default, "public")
self.assertEqual(
instance.public, "https://www.w3.org/ns/activitystreams#Public"
)
def test_privacy_field_set_field_from_activity(self, *_):
"""translate between to/cc fields and privacy"""
with patch("bookwyrm.models.user.set_remote_server.delay"):
test_user = User.objects.create_user(
username="<EMAIL>",
local=False,
remote_id="https://example.com/test_user",
inbox="https://example.com/users/test_user/inbox",
followers_url="https://example.com/users/test_user/followers",
)
@dataclass(init=False)
class TestActivity(ActivityObject):
"""real simple mock"""
# pylint: disable=invalid-name
to: List[str]
cc: List[str]
id: str = "http://hi.com"
type: str = "Test"
attributedTo: str = test_user.remote_id
class TestPrivacyModel(ActivitypubMixin, BookWyrmModel):
"""real simple mock model because BookWyrmModel is abstract"""
privacy_field = fields.PrivacyField()
mention_users = fields.TagField(User)
user = fields.ForeignKey(User, on_delete=models.CASCADE)
public = "https://www.w3.org/ns/activitystreams#Public"
data = TestActivity(
to=[public],
cc=["bleh"],
)
model_instance = TestPrivacyModel(privacy_field="direct")
self.assertEqual(model_instance.privacy_field, "direct")
instance = fields.PrivacyField()
instance.name = "privacy_field"
instance.set_field_from_activity(model_instance, data)
self.assertEqual(model_instance.privacy_field, "public")
data.to = ["bleh"]
data.cc = []
instance.set_field_from_activity(model_instance, data)
self.assertEqual(model_instance.privacy_field, "direct")
data.to = ["bleh"]
data.cc = [public, "waah"]
instance.set_field_from_activity(model_instance, data)
self.assertEqual(model_instance.privacy_field, "unlisted")
data.to = [test_user.followers_url]
data.cc = []
instance.set_field_from_activity(model_instance, data)
self.assertEqual(model_instance.privacy_field, "followers")
data.to = ["http://user_remote/followers"]
data.cc = ["http://mentioned_user/remote_id"]
instance.set_field_from_activity(model_instance, data)
self.assertEqual(model_instance.privacy_field, "followers")
@patch("bookwyrm.models.activitypub_mixin.ObjectMixin.broadcast")
@patch("bookwyrm.activitystreams.add_status_task.delay")
def test_privacy_field_set_activity_from_field(self, *_):
"""translate between to/cc fields and privacy"""
user = User.objects.create_user(
"rat", "<EMAIL>", "ratword", local=True, localname="rat"
)
public = "https://www.w3.org/ns/activitystreams#Public"
followers = f"{user.remote_id}/followers"
instance = fields.PrivacyField()
instance.name = "privacy_field"
model_instance = Status.objects.create(user=user, content="hi")
activity = {}
instance.set_activity_from_field(activity, model_instance)
self.assertEqual(activity["to"], [public])
self.assertEqual(activity["cc"], [followers])
model_instance = Status.objects.create(
user=user, content="hi", privacy="unlisted"
)
activity = {}
instance.set_activity_from_field(activity, model_instance)
self.assertEqual(activity["to"], [followers])
self.assertEqual(activity["cc"], [public])
model_instance = Status.objects.create(
user=user, content="hi", privacy="followers"
)
activity = {}
instance.set_activity_from_field(activity, model_instance)
self.assertEqual(activity["to"], [followers])
self.assertEqual(activity["cc"], [])
model_instance = Status.objects.create(
user=user,
content="hi",
privacy="direct",
)
model_instance.mention_users.set([user])
activity = {}
instance.set_activity_from_field(activity, model_instance)
self.assertEqual(activity["to"], [user.remote_id])
self.assertEqual(activity["cc"], [])
def test_foreign_key(self, *_):
"""should be able to format a related model"""
instance = fields.ForeignKey("User", on_delete=models.CASCADE)
Serializable = namedtuple("Serializable", ("to_activity", "remote_id"))
item = Serializable(lambda: {"a": "b"}, "https://e.b/c")
# returns the remote_id field of the related object
self.assertEqual(instance.field_to_activity(item), "https://e.b/c")
@responses.activate
def test_foreign_key_from_activity_str(self, *_):
"""create a new object from a foreign key"""
instance = fields.ForeignKey(User, on_delete=models.CASCADE)
datafile = pathlib.Path(__file__).parent.joinpath("../data/ap_user.json")
userdata = json.loads(datafile.read_bytes())
# don't try to load the user icon
del userdata["icon"]
# it shouldn't match with this unrelated user:
unrelated_user = User.objects.create_user(
"rat", "<EMAIL>", "ratword", local=True, localname="rat"
)
# test receiving an unknown remote id and loading data
responses.add(
responses.GET,
"https://example.com/user/mouse",
json=userdata,
status=200,
)
with patch("bookwyrm.models.user.set_remote_server.delay"):
value = instance.field_from_activity("https://example.com/user/mouse")
self.assertIsInstance(value, User)
self.assertNotEqual(value, unrelated_user)
self.assertEqual(value.remote_id, "https://example.com/user/mouse")
self.assertEqual(value.name, "MOUSE?? MOUSE!!")
def test_foreign_key_from_activity_dict(self, *_):
"""test recieving activity json"""
instance = fields.ForeignKey(User, on_delete=models.CASCADE)
datafile = pathlib.Path(__file__).parent.joinpath("../data/ap_user.json")
userdata = json.loads(datafile.read_bytes())
# don't try to load the user icon
del userdata["icon"]
# it shouldn't match with this unrelated user:
unrelated_user = User.objects.create_user(
"rat", "<EMAIL>", "ratword", local=True, localname="rat"
)
with patch("bookwyrm.models.user.set_remote_server.delay"):
value = instance.field_from_activity(activitypub.Person(**userdata))
self.assertIsInstance(value, User)
self.assertNotEqual(value, unrelated_user)
self.assertEqual(value.remote_id, "https://example.com/user/mouse")
self.assertEqual(value.name, "MOUSE?? MOUSE!!")
# et cetera but we're not testing serializing user json
def test_foreign_key_from_activity_dict_existing(self, *_):
"""test receiving a dict of an existing object in the db"""
instance = fields.ForeignKey(User, on_delete=models.CASCADE)
datafile = pathlib.Path(__file__).parent.joinpath("../data/ap_user.json")
userdata = json.loads(datafile.read_bytes())
user = User.objects.create_user(
"mouse", "<EMAIL>", "mouseword", local=True, localname="mouse"
)
user.remote_id = "https://example.com/user/mouse"
user.save(broadcast=False, update_fields=["remote_id"])
User.objects.create_user(
"rat", "<EMAIL>", "ratword", local=True, localname="rat"
)
with patch("bookwyrm.models.activitypub_mixin.ObjectMixin.broadcast"):
value = instance.field_from_activity(activitypub.Person(**userdata))
self.assertEqual(value, user)
def test_foreign_key_from_activity_str_existing(self, *_):
"""test receiving a remote id of an existing object in the db"""
instance = fields.ForeignKey(User, on_delete=models.CASCADE)
user = User.objects.create_user(
"mouse", "<EMAIL>", "mouseword", local=True, localname="mouse"
)
User.objects.create_user(
"rat", "<EMAIL>", "ratword", local=True, localname="rat"
)
value = instance.field_from_activity(user.remote_id)
self.assertEqual(value, user)
def test_one_to_one_field(self, *_):
"""a gussied up foreign key"""
instance = fields.OneToOneField("User", on_delete=models.CASCADE)
Serializable = namedtuple("Serializable", ("to_activity", "remote_id"))
item = Serializable(lambda: {"a": "b"}, "https://e.b/c")
self.assertEqual(instance.field_to_activity(item), {"a": "b"})
def test_many_to_many_field(self, *_):
"""lists!"""
instance = fields.ManyToManyField("User")
Serializable = namedtuple("Serializable", ("to_activity", "remote_id"))
Queryset = namedtuple("Queryset", ("all", "instance"))
item = Serializable(lambda: {"a": "b"}, "https://e.b/c")
another_item = Serializable(lambda: {}, "example.com")
items = Queryset(lambda: [item], another_item)
self.assertEqual(instance.field_to_activity(items), ["https://e.b/c"])
instance = fields.ManyToManyField("User", link_only=True)
instance.name = "snake_case"
self.assertEqual(instance.field_to_activity(items), "example.com/snake_case")
@responses.activate
def test_many_to_many_field_from_activity(self, *_):
"""resolve related fields for a list, takes a list of remote ids"""
instance = fields.ManyToManyField(User)
datafile = pathlib.Path(__file__).parent.joinpath("../data/ap_user.json")
userdata = json.loads(datafile.read_bytes())
# don't try to load the user icon
del userdata["icon"]
# test receiving an unknown remote id and loading data
responses.add(
responses.GET, "https://example.com/user/mouse", json=userdata, status=200
)
with patch("bookwyrm.models.user.set_remote_server.delay"):
value = instance.field_from_activity(
["https://example.com/user/mouse", "bleh"]
)
self.assertIsInstance(value, list)
self.assertEqual(len(value), 1)
self.assertIsInstance(value[0], User)
def test_tag_field(self, *_):
"""a special type of many to many field"""
instance = fields.TagField("User")
Serializable = namedtuple(
"Serializable", ("to_activity", "remote_id", "name_field", "name")
)
Queryset = namedtuple("Queryset", ("all", "instance"))
item = Serializable(lambda: {"a": "b"}, "https://e.b/c", "name", "Name")
another_item = Serializable(lambda: {}, "example.com", "", "")
items = Queryset(lambda: [item], another_item)
result = instance.field_to_activity(items)
self.assertIsInstance(result, list)
self.assertEqual(len(result), 1)
self.assertEqual(result[0].href, "https://e.b/c")
self.assertEqual(result[0].name, "Name")
self.assertEqual(result[0].type, "Serializable")
def test_tag_field_from_activity(self, *_):
"""loadin' a list of items from Links"""
# TODO
@patch("bookwyrm.models.activitypub_mixin.ObjectMixin.broadcast")
@patch("bookwyrm.suggested_users.remove_user_task.delay")
def test_image_field_to_activity(self, *_):
"""serialize an image field to activitypub"""
user = User.objects.create_user(
"mouse", "<EMAIL>", "mouseword", local=True, localname="mouse"
)
image_file = pathlib.Path(__file__).parent.joinpath(
"../../static/images/default_avi.jpg"
)
image = Image.open(image_file)
output = BytesIO()
image.save(output, format=image.format)
user.avatar.save("test.jpg", ContentFile(output.getvalue()))
instance = fields.ImageField()
output = instance.field_to_activity(user.avatar)
self.assertIsNotNone(
re.match(
rf"https:\/\/{DOMAIN}\/.*\.jpg",
output.url,
)
)
self.assertEqual(output.name, "")
self.assertEqual(output.type, "Document")
@responses.activate
def test_image_field_from_activity(self, *_):
"""load an image from activitypub"""
image_file = pathlib.Path(__file__).parent.joinpath(
"../../static/images/default_avi.jpg"
)
instance = fields.ImageField()
with open(image_file, "rb") as image_data:
responses.add(
responses.GET,
"http://www.example.com/image.jpg",
body=image_data.read(),
status=200,
content_type="image/jpeg",
stream=True,
)
loaded_image = instance.field_from_activity("http://www.example.com/image.jpg")
self.assertIsInstance(loaded_image, list)
self.assertIsInstance(loaded_image[1], ContentFile)
@responses.activate
def test_image_field_set_field_from_activity(self, *_):
"""update a model instance from an activitypub object"""
image_file = pathlib.Path(__file__).parent.joinpath(
"../../static/images/default_avi.jpg"
)
instance = fields.ImageField(activitypub_field="cover", name="cover")
with open(image_file, "rb") as image_data:
responses.add(
responses.GET,
"http://www.example.com/image.jpg",
body=image_data.read(),
content_type="image/jpeg",
status=200,
stream=True,
)
book = Edition.objects.create(title="hello")
MockActivity = namedtuple("MockActivity", ("cover"))
mock_activity = MockActivity("http://www.example.com/image.jpg")
instance.set_field_from_activity(book, mock_activity)
self.assertIsNotNone(book.cover.name)
@responses.activate
def test_image_field_set_field_from_activity_no_overwrite_no_cover(self, *_):
"""update a model instance from an activitypub object"""
image_file = pathlib.Path(__file__).parent.joinpath(
"../../static/images/default_avi.jpg"
)
instance = fields.ImageField(activitypub_field="cover", name="cover")
with open(image_file, "rb") as image_data:
responses.add(
responses.GET,
"http://www.example.com/image.jpg",
body=image_data.read(),
status=200,
content_type="image/jpeg",
stream=True,
)
book = Edition.objects.create(title="hello")
MockActivity = namedtuple("MockActivity", ("cover"))
mock_activity = MockActivity("http://www.example.com/image.jpg")
instance.set_field_from_activity(book, mock_activity, overwrite=False)
self.assertIsNotNone(book.cover.name)
@responses.activate
def test_image_field_set_field_from_activity_no_overwrite_with_cover(self, *_):
"""update a model instance from an activitypub object"""
image_file = pathlib.Path(__file__).parent.joinpath(
"../../static/images/default_avi.jpg"
)
image = Image.open(image_file)
output = BytesIO()
image.save(output, format=image.format)
another_image_file = pathlib.Path(__file__).parent.joinpath(
"../../static/images/logo.png"
)
another_image = Image.open(another_image_file)
another_output = BytesIO()
another_image.save(another_output, format=another_image.format)
instance = fields.ImageField(activitypub_field="cover", name="cover")
responses.add(
responses.GET,
"http://www.example.com/image.jpg",
body=another_image.tobytes(),
status=200,
)
book = Edition.objects.create(title="hello")
book.cover.save("test.jpg", ContentFile(output.getvalue()))
cover_size = book.cover.size
self.assertIsNotNone(cover_size)
MockActivity = namedtuple("MockActivity", ("cover"))
mock_activity = MockActivity("http://www.example.com/image.jpg")
instance.set_field_from_activity(book, mock_activity, overwrite=False)
# same cover as before
self.assertEqual(book.cover.size, cover_size)
@responses.activate
def test_image_field_set_field_from_activity_with_overwrite_with_cover(self, *_):
"""update a model instance from an activitypub object"""
image_file = pathlib.Path(__file__).parent.joinpath(
"../../static/images/default_avi.jpg"
)
image = Image.open(image_file)
output = BytesIO()
image.save(output, format=image.format)
book = Edition.objects.create(title="hello")
book.cover.save("test.jpg", ContentFile(output.getvalue()))
cover_size = book.cover.size
self.assertIsNotNone(cover_size)
another_image_file = pathlib.Path(__file__).parent.joinpath(
"../../static/images/logo.png"
)
instance = fields.ImageField(activitypub_field="cover", name="cover")
with open(another_image_file, "rb") as another_image:
responses.add(
responses.GET,
"http://www.example.com/image.jpg",
body=another_image.read(),
status=200,
content_type="image/jpeg",
stream=True,
)
MockActivity = namedtuple("MockActivity", ("cover"))
mock_activity = MockActivity("http://www.example.com/image.jpg")
instance.set_field_from_activity(book, mock_activity, overwrite=True)
# new cover
self.assertIsNotNone(book.cover.name)
self.assertNotEqual(book.cover.size, cover_size)
def test_datetime_field(self, *_):
"""this one is pretty simple, it just has to use isoformat"""
instance = fields.DateTimeField()
now = timezone.now()
self.assertEqual(instance.field_to_activity(now), now.isoformat())
self.assertEqual(instance.field_from_activity(now.isoformat()), now)
self.assertEqual(instance.field_from_activity("bip"), None)
def test_array_field(self, *_):
"""idk why it makes them strings but probably for a good reason"""
instance = fields.ArrayField(fields.IntegerField)
self.assertEqual(instance.field_to_activity([0, 1]), ["0", "1"])
def test_html_field(self, *_):
"""sanitizes html, the sanitizer has its own tests"""
instance = fields.HtmlField()
self.assertEqual(
instance.field_from_activity("<marquee><p>hi</p></marquee>"), "<p>hi</p>"
)
|
Attacks/AttackMethods/DEEPFOOL.py | jiannanWang/DEEPSEC | 178 | 12622106 | <filename>Attacks/AttackMethods/DEEPFOOL.py
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# References: S.-M. Moosavi-Dezfooli, et al., "Deepfool: A simple and accurate method to fool deep neural networks," in CVPR, 2016
# Reference Implementation from Authors: https://github.com/LTS4/DeepFool/tree/master/Python
# **************************************
# @Time : 2018/10/20 23:31
# @Author : <NAME> and <NAME>
# @Lab : nesa.zju.edu.cn
# @File : DEEPFOOL.py
# **************************************
import numpy as np
import torch
from torch.autograd.gradcheck import zero_gradients
from Attacks.AttackMethods.AttackUtils import tensor2variable
from Attacks.AttackMethods.attacks import Attack
class DeepFoolAttack(Attack):
def __init__(self, model=None, overshoot=0.02, max_iters=50):
"""
:param model:
:param overshoot:
:param max_iters:
"""
super(DeepFoolAttack, self).__init__(model=model)
self.model = model
self.overshoot = overshoot
self.max_iterations = max_iters
def perturbation_single(self, sample, device):
"""
:param sample:
:param device:
:return:
"""
assert sample.shape[0] == 1, 'only perturbing one sample'
copy_sample = np.copy(sample)
var_sample = tensor2variable(torch.from_numpy(copy_sample), device=device, requires_grad=True).float()
self.model.eval()
prediction = self.model(var_sample)
original = torch.max(prediction, 1)[1]
current = original
# indices of predication in descending order
I = np.argsort(prediction.data.cpu().numpy() * -1)
perturbation_r_tot = np.zeros(copy_sample.shape, dtype=np.float32)
iteration = 0
while (original == current) and (iteration < self.max_iterations):
# predication for the adversarial example in i-th iteration
zero_gradients(var_sample)
self.model.eval()
f_kx = self.model(var_sample)
current = torch.max(f_kx, 1)[1]
# gradient of the original example
f_kx[0, I[0, 0]].backward(retain_graph=True)
grad_original = np.copy(var_sample.grad.data.cpu().numpy())
# calculate the w_k and f_k for every class label
closest_dist = 1e10
for k in range(1, 10):
# gradient of adversarial example for k-th label
zero_gradients(var_sample)
f_kx[0, I[0, k]].backward(retain_graph=True)
grad_current = var_sample.grad.data.cpu().numpy().copy()
# update w_k and f_k
w_k = grad_current - grad_original
f_k = (f_kx[0, I[0, k]] - f_kx[0, I[0, 0]]).detach().data.cpu().numpy()
# find the closest distance and the corresponding w_k
dist_k = np.abs(f_k) / (np.linalg.norm(w_k.flatten()) + 1e-15)
if dist_k < closest_dist:
closest_dist = dist_k
closest_w = w_k
# accumulation of perturbation
r_i = (closest_dist + 1e-4) * closest_w / np.linalg.norm(closest_w)
perturbation_r_tot = perturbation_r_tot + r_i
tmp_sample = np.clip((1 + self.overshoot) * perturbation_r_tot + sample, 0.0, 1.0)
var_sample = tensor2variable(torch.from_numpy(tmp_sample), device=device, requires_grad=True)
iteration += 1
adv = np.clip(sample + (1 + self.overshoot) * perturbation_r_tot, 0.0, 1.0)
return adv, perturbation_r_tot, iteration
def perturbation(self, xs, device):
"""
:param xs: batch of samples
:param device:
:return: batch of adversarial samples
"""
print('\nThe DeepFool attack perturbs the samples one by one ......\n')
adv_samples = []
for i in range(len(xs)):
adv_image, _, _ = self.perturbation_single(sample=xs[i: i + 1], device=device)
adv_samples.extend(adv_image)
return np.array(adv_samples)
|
server/api/blueprints/teacher.py | brayest/testcode | 652 | 12622118 | from datetime import datetime
from functools import wraps
import flask
import requests
from flask import Blueprint
from flask_babel import gettext
from flask_login import current_user, login_required, logout_user
from flask_weasyprint import HTML, render_pdf
from loguru import logger
from sqlalchemy import and_
from server.api.blueprints.login import create_user_from_data
from server.api.database.models import (
Day,
Appointment,
Payment,
PaymentType,
Report,
ReportType,
Student,
Teacher,
User,
WorkDay,
Kilometer,
Car,
CarType,
)
from server.api.push_notifications import FCM
from server.api.utils import jsonify_response, paginate
from server.consts import RECEIPT_URL, RECEIPTS_DEVELOPER_EMAIL, WORKDAY_DATE_FORMAT
from server.error_handling import NotificationError, RouteError
teacher_routes = Blueprint("teacher", __name__, url_prefix="/teacher")
def init_app(app):
app.register_blueprint(teacher_routes)
def teacher_required(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
if not current_user.teacher:
raise RouteError("User is not a teacher.", 401)
return func(*args, **kwargs)
return func_wrapper
def like_filter(model, key, value):
return getattr(model, key).like(f"%{value}%")
@teacher_routes.route("/", methods=["GET"])
@jsonify_response
@paginate
def teachers():
try:
extra_filters = {User: {"name": like_filter}}
query = Teacher.query.filter_by(is_approved=True)
return Teacher.filter_and_sort(
flask.request.args,
extra_filters=extra_filters,
query=query,
with_pagination=True,
)
except ValueError:
raise RouteError("Wrong parameters passed.")
@teacher_routes.route("/work_days", methods=["GET"])
@jsonify_response
@login_required
@teacher_required
def work_days():
""" return work days with filter - only on a specific date,
or with no date at all"""
try:
return {
"data": [
day.to_dict()
for day in current_user.teacher.filter_work_days(flask.request.args)
]
}
except ValueError:
raise RouteError("Wrong parameters passed.")
@teacher_routes.route("/work_days", methods=["POST"])
@jsonify_response
@login_required
@teacher_required
def update_work_days():
data = flask.request.get_json()
""" example data:
0: [{from_hour: 8, from_minutes: 0, to_hour: 14}], 1: {}....
OR
"03-15-2019": [{from_hour: 8}], "03-16-2019": []....
"""
logger.debug(f"WORK DAYS - got the following data")
logger.debug(data)
for day, hours_list in data.items():
# first, let's delete all current data with this date
# TODO better algorithm for that
try:
day = int(day)
params = dict(day=day, teacher=current_user.teacher)
WorkDay.query.filter_by(**params).delete()
except ValueError:
# probably a date
params = dict(
on_date=datetime.strptime(day, WORKDAY_DATE_FORMAT),
teacher=current_user.teacher,
)
WorkDay.query.filter_by(**params).delete()
for hours in hours_list:
from_hour = max(min(int(hours.get("from_hour")), 24), 0)
to_hour = max(min(int(hours.get("to_hour")), 24), 0)
from_minutes = max(min(int(hours.get("from_minutes")), 60), 0)
to_minutes = max(min(int(hours.get("to_minutes")), 60), 0)
car = current_user.teacher.cars.filter_by(id=hours.get("car_id")).first()
if not car:
car = current_user.teacher.cars.first()
if from_hour >= to_hour:
raise RouteError(
"There must be a bigger difference between the two times."
)
current_user.teacher.work_days.append(
WorkDay(
from_hour=from_hour,
from_minutes=from_minutes,
to_hour=to_hour,
to_minutes=to_minutes,
car=car,
**params,
)
)
current_user.save()
return {"message": "Days updated."}
@teacher_routes.route("/work_days/<int:day_id>", methods=["POST"])
@jsonify_response
@login_required
@teacher_required
def edit_work_day(day_id):
day = current_user.teacher.work_days.filter_by(id=day_id).first()
if not day:
raise RouteError("Day does not exist", 404)
data = flask.request.get_json()
from_hour = data.get("from_hour", day.from_hour)
to_hour = data.get("to_hour", day.to_hour)
day.update(from_hour=from_hour, to_hour=to_hour)
return {"message": "Day updated successfully."}
@teacher_routes.route("/work_days/<int:day_id>", methods=["DELETE"])
@jsonify_response
@login_required
@teacher_required
def delete_work_day(day_id):
day = current_user.teacher.work_days.filter_by(id=day_id).first()
if not day:
raise RouteError("Day does not exist", 404)
day.delete()
return {"message": "Day deleted."}
@teacher_routes.route("/<int:teacher_id>/available_hours", methods=["POST"])
@jsonify_response
@login_required
def available_hours(teacher_id):
data = flask.request.get_json()
teacher = Teacher.get_by_id(teacher_id)
duration = data.get("duration")
if duration:
duration = int(duration)
only_approved = False
student = None
if current_user.teacher:
only_approved = True
else:
student = current_user.student
places = (data.get("meetup_place_id", None), data.get("dropoff_place_id", None))
return {
"data": list(
teacher.available_hours(
datetime.strptime(data.get("date"), WORKDAY_DATE_FORMAT),
student=student,
duration=duration,
only_approved=only_approved,
places=places,
)
)
}
@teacher_routes.route("/add_payment", methods=["POST"])
@jsonify_response
@login_required
@teacher_required
def add_payment():
data = flask.request.get_json()
student = Student.get_by_id(data.get("student_id"))
amount = data.get("amount")
details = data.get("details")
if not student:
raise RouteError("Student does not exist.")
if not amount:
raise RouteError("Amount must not be empty.")
if not details:
raise RouteError("Details must not be empty.")
payment = Payment.create(
teacher=current_user.teacher,
student=student,
amount=amount,
payment_type=getattr(PaymentType, data.get("payment_type", ""), 1),
details=details,
crn=int(data.get("crn")) if data.get("crn") else None,
)
# send notification to student
if student.user.firebase_token:
logger.debug(f"sending fcm to {student.user} for new payment")
try:
FCM.notify(
token=student.user.firebase_token,
title=gettext("New Payment"),
body=gettext(
"%(user)s charged you for %(amount)s",
user=current_user.name,
amount=amount,
),
)
except NotificationError:
pass
return {"data": payment.to_dict()}, 201
@teacher_routes.route("/students", methods=["GET"])
@jsonify_response
@login_required
@teacher_required
@paginate
def students():
"""allow filtering by name / area of student, and sort by balance,
lesson number"""
try:
query = current_user.teacher.students
args = flask.request.args
extra_filters = {User: {"name": like_filter, "area": like_filter}}
return Student.filter_and_sort(
args, query, extra_filters=extra_filters, with_pagination=True
)
except ValueError:
raise RouteError("Wrong parameters passed.")
@teacher_routes.route("/edit_data", methods=["POST"])
@jsonify_response
@login_required
@teacher_required
def edit_data():
post_data = flask.request.get_json()
teacher = current_user.teacher
fields = ("price", "lesson_duration")
for field in fields:
if post_data.get(field):
setattr(teacher, field, post_data.get(field))
teacher.save()
return {"data": current_user.to_dict()}
@teacher_routes.route("/<int:teacher_id>/approve", methods=["GET"])
@jsonify_response
@login_required
def approve(teacher_id):
if not current_user.is_admin:
raise RouteError("Not authorized.", 401)
teacher = Teacher.get_by_id(teacher_id)
teacher.update(is_approved=True)
return {"data": teacher.to_dict()}
@teacher_routes.route("/ezcount_user", methods=["GET"])
@jsonify_response
@login_required
@teacher_required
def create_ezcount_user():
# https://docs.google.com/document/d/1me6u9CpJtydTIEdMkY3OH1dresZkrPRCK0_xw5Rn0Do/edit#
teacher = current_user.teacher
if not teacher.crn:
return
if teacher.invoice_api_key:
raise RouteError("Teacher already has an invoice account.")
api_key = flask.current_app.config.get("RECEIPTS_API_KEY")
payload = {
"api_key": api_key,
"api_email": RECEIPTS_DEVELOPER_EMAIL,
"developer_email": RECEIPTS_DEVELOPER_EMAIL,
"create_signature": 1,
"company_crn": teacher.crn,
"company_email": current_user.email,
"user_key": str(current_user.id),
"company_name": current_user.name,
"company_type": 1,
}
resp = requests.post(RECEIPT_URL + "api/user/create", json=payload)
resp_json = resp.json()
if resp_json["success"]:
teacher.update(
invoice_api_key=resp_json["u_api_key"], invoice_api_uid=resp_json["u_uuid"]
)
return {"message": "EZCount user created successfully."}
raise RouteError(resp_json["errMsg"])
@teacher_routes.route("/payments/<int:payment_id>/receipt", methods=["GET"])
@jsonify_response
@login_required
@teacher_required
def add_receipt(payment_id):
# https://docs.google.com/document/d/1_kSH5xViiZi5Y1tZtWpNrkKiq4Htym7V23TuhL7KlSU/edit#
payment = Payment.get_by_id(payment_id)
if not payment or payment.teacher != current_user.teacher:
raise RouteError("Payment not found.", 404)
if not payment.teacher.invoice_api_key:
raise RouteError("Teacher does not have an invoice account.")
api_key = flask.current_app.config.get("RECEIPTS_API_KEY")
payload = {
"api_key": payment.teacher.invoice_api_key,
"developer_email": RECEIPTS_DEVELOPER_EMAIL,
"created_by_api_key": api_key,
"transaction_id": payment.id,
"type": 320,
"customer_name": payment.student.user.name,
"customer_email": payment.student.user.email,
"customer_crn": payment.crn,
"item": {
1: {
"details": payment.details,
"amount": "1",
"price": payment.amount,
"price_inc_vat": 1, # this price include the VAT
}
},
"payment": {
1: {"payment_type": payment.payment_type.value, "payment": payment.amount}
},
"price_total": payment.amount, # /*THIS IS A MUST ONLY IN INVOICE RECIEPT*/
}
resp = requests.post(RECEIPT_URL + "api/createDoc", json=payload)
resp_json = resp.json()
if resp_json["success"]:
payment.update(pdf_link=resp_json["pdf_link"])
return {"pdf_link": resp_json["pdf_link"]}
raise RouteError(resp_json["errMsg"])
@teacher_routes.route("/ezcount", methods=["GET"])
@jsonify_response
@login_required
@teacher_required
def login_to_ezcount():
# https://docs.google.com/document/d/1me6u9CpJtydTIEdMkY3OH1dresZkrPRCK0_xw5Rn0Do/edit#
if not current_user.teacher.invoice_api_key:
raise RouteError("Teacher does not have an invoice account.")
redirect = flask.request.args.get("redirect", "")
resp = requests.post(
RECEIPT_URL + f"api/getClientSafeUrl/login?redirectTo={redirect}",
json={
"api_key": current_user.teacher.invoice_api_key,
"api_email": current_user.email,
"developer_email": RECEIPTS_DEVELOPER_EMAIL,
},
)
return {"url": resp.json()["url"]}
@teacher_routes.route("/reports", methods=["POST"])
@jsonify_response
@login_required
@teacher_required
def create_report():
post_data = flask.request.get_json()
try:
report_type = ReportType[post_data.get("report_type")]
except KeyError:
raise RouteError("Report type was not found.")
car = current_user.teacher.cars.filter_by(id=post_data.get("car")).first()
dates = dict()
if report_type.name in Report.DATES_REQUIRED:
dates["since"] = post_data.get("since")
dates["until"] = post_data.get("until")
try:
dates["since"] = datetime.strptime(
dates["since"], WORKDAY_DATE_FORMAT
).replace(second=0, microsecond=0)
dates["until"] = datetime.strptime(
dates["until"], WORKDAY_DATE_FORMAT
).replace(second=0, microsecond=0)
except (ValueError, TypeError):
raise RouteError("Dates are not valid.")
report = Report.create(
report_type=report_type.value, teacher=current_user.teacher, car=car, **dates
)
return {"data": report.to_dict()}
@teacher_routes.route("/reports/<uuid>", methods=["GET"])
def show_report(uuid):
REPORTS = {
"students": lambda report: report.teacher.students.filter_by(is_active=True)
.join(User, Student.user)
.order_by(User.name.asc()),
"lessons": lambda report: report.teacher.lessons.filter(
and_(
Appointment.is_approved == True,
Appointment.date < report.until,
Appointment.date > report.since,
)
),
"kilometers": lambda report: report.teacher.kilometers.filter(
and_(
Kilometer.date < report.until,
Kilometer.date > report.since,
Kilometer.car == report.car,
)
),
}
report = Report.query.filter_by(uuid=uuid).first()
if not report:
raise RouteError("Report was not found.")
report_data = REPORTS.get(report.report_type.name)
html = flask.render_template(
f"reports/{report.report_type.name}.html",
data=report_data(report).all(),
teacher=report.teacher,
report=report,
)
return render_pdf(HTML(string=html))
# return html
@teacher_routes.route("/create_student", methods=["POST"])
@jsonify_response
@login_required
@teacher_required
def create_bot_student():
teacher = current_user.teacher
data = flask.request.values
user = create_user_from_data(data, required=["email", "name", "phone"])
car = teacher.cars.filter_by(id=data.get("car_id")).first()
if not car:
raise RouteError("Car does not exist.")
try:
price = int(data.get("price", ""))
except ValueError:
price = None
student = Student.create(
user=user,
teacher=teacher,
creator=current_user,
price=price,
car=car,
is_approved=True,
)
return {"data": student.user.to_dict()}, 201
@teacher_routes.route("/<int:teacher_id>/cars", methods=["GET"])
@jsonify_response
@login_required
def cars(teacher_id):
teacher = Teacher.get_by_id(teacher_id)
if not teacher:
raise RouteError("Teacher not found.")
return {
"data": [car.to_dict() for car in Car.query.filter_by(teacher=teacher).all()]
}
@teacher_routes.route("/cars", methods=["POST"])
@jsonify_response
@login_required
@teacher_required
def register_car():
"""register a new car for a teacher"""
data = flask.request.get_json()
number = data.get("number")
if not number:
raise RouteError("Car number is required.")
# if this number already exist, raise error
exists = current_user.teacher.cars.filter_by(number=number).first()
if exists:
raise RouteError("Car already exists.")
try:
type_ = CarType[data.get("type", "")]
except KeyError:
type_ = CarType.manual
color = data.get("color")
car = Car.create(
name=data.get("name"),
type=type_.value,
number=number,
teacher=current_user.teacher,
color=color[:6] if color else None,
)
return {"data": car.to_dict()}, 201
@teacher_routes.route("/cars/<int:id_>", methods=["POST"])
@jsonify_response
@login_required
@teacher_required
def update_car(id_):
data = flask.request.get_json()
car = current_user.teacher.cars.filter_by(id=id_).first()
if not car:
raise RouteError("Car does not exist.")
number = data.get("number")
if not number:
raise RouteError("Car number is required.")
try:
type_ = CarType[data.get("type", "")]
except KeyError:
type_ = CarType.manual
color = data.get("color")
car.update(
name=data.get("name"),
type=type_.value,
number=number,
color=color[:6] if color else None,
)
return {"data": car.to_dict()}
@teacher_routes.route("/cars/<int:id_>", methods=["DELETE"])
@jsonify_response
@login_required
@teacher_required
def delete_car(id_):
car = current_user.teacher.cars.filter_by(id=id_).first()
if not car:
raise RouteError("Car does not exist.")
car.delete()
return {"message": "Car deleted."}
@teacher_routes.route("/cars/<int:id_>/kilometer", methods=["POST"])
@jsonify_response
@login_required
@teacher_required
def update_kilometer(id_):
"""update kilometer for a specific date"""
car = current_user.teacher.cars.filter_by(id=id_).first()
if not car:
raise RouteError("Car does not exist.")
data = flask.request.get_json()
try:
date = datetime.strptime(data.get("date"), WORKDAY_DATE_FORMAT)
except (ValueError, TypeError):
raise RouteError("Date is not valid.")
# if this date exist, delete it first
exists = current_user.teacher.kilometers.filter_by(date=date).first()
if exists:
exists.delete()
start, end = data.get("start"), data.get("end")
if not start or not end:
raise RouteError("All kilometer distances are required.")
if end < start:
raise RouteError("Ending value must be bigger than starting value.")
kilometer = Kilometer.create(
date=date,
personal=data.get("personal", 0),
start_of_day=start,
end_of_day=end,
car=car,
teacher=current_user.teacher,
)
return {"data": kilometer.to_dict()}, 201
|
dfirtrack_main/views/company_views.py | thomas-kropeit/dfirtrack | 273 | 12622119 | from django.contrib import messages
from django.contrib.auth.mixins import LoginRequiredMixin
from django.http import HttpResponse
from django.shortcuts import redirect, render
from django.urls import reverse
from django.views.generic import DetailView, ListView
from django.views.generic.edit import CreateView, UpdateView
from dfirtrack_main.forms import CompanyForm
from dfirtrack_main.logger.default_logger import debug_logger
from dfirtrack_main.models import Company
class CompanyList(LoginRequiredMixin, ListView):
login_url = '/login'
model = Company
template_name = 'dfirtrack_main/company/company_list.html'
context_object_name = 'company_list'
def get_queryset(self):
debug_logger(str(self.request.user), " COMPANY_LIST_ENTERED")
return Company.objects.order_by('company_name')
class CompanyDetail(LoginRequiredMixin, DetailView):
login_url = '/login'
model = Company
template_name = 'dfirtrack_main/company/company_detail.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
company = self.object
company.logger(str(self.request.user), " COMPANY_DETAIL_ENTERED")
return context
class CompanyCreate(LoginRequiredMixin, CreateView):
login_url = '/login'
model = Company
form_class = CompanyForm
template_name = 'dfirtrack_main/generic_form.html'
def get(self, request, *args, **kwargs):
form = self.form_class()
debug_logger(str(request.user), " COMPANY_ADD_ENTERED")
return render(
request,
self.template_name,
{
'form': form,
'title': 'Add',
'object_type': 'company',
},
)
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
company = form.save(commit=False)
company.save()
company.logger(str(request.user), " COMPANY_ADD_EXECUTED")
messages.success(request, 'Company added')
return redirect(reverse('company_detail', args=(company.company_id,)))
else:
return render(
request,
self.template_name,
{
'form': form,
'title': 'Add',
'object_type': 'company',
},
)
class CompanyCreatePopup(LoginRequiredMixin, CreateView):
login_url = '/login'
model = Company
form_class = CompanyForm
template_name = 'dfirtrack_main/generic_form_popup.html'
def get(self, request, *args, **kwargs):
form = self.form_class()
debug_logger(str(request.user), " COMPANY_ADD_POPUP_ENTERED")
return render(
request,
self.template_name,
{
'form': form,
'title': 'Add',
'object_type': 'company',
},
)
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
company = form.save(commit=False)
company.save()
company.logger(str(request.user), " COMPANY_ADD_POPUP_EXECUTED")
messages.success(request, 'Company added')
return HttpResponse(
'<script type="text/javascript">window.close();</script>'
)
else:
return render(
request,
self.template_name,
{
'form': form,
'title': 'Add',
'object_type': 'company',
},
)
class CompanyUpdate(LoginRequiredMixin, UpdateView):
login_url = '/login'
model = Company
form_class = CompanyForm
template_name = 'dfirtrack_main/generic_form.html'
def get(self, request, *args, **kwargs):
company = self.get_object()
form = self.form_class(instance=company)
company.logger(str(request.user), " COMPANY_EDIT_ENTERED")
return render(
request,
self.template_name,
{
'form': form,
'title': 'Edit',
'object_type': 'company',
'object_name': company.company_name,
},
)
def post(self, request, *args, **kwargs):
company = self.get_object()
form = self.form_class(request.POST, instance=company)
if form.is_valid():
company = form.save(commit=False)
company.save()
company.logger(str(request.user), " COMPANY_EDIT_EXECUTED")
messages.success(request, 'Company edited')
return redirect(reverse('company_detail', args=(company.company_id,)))
else:
return render(
request,
self.template_name,
{
'form': form,
'title': 'Edit',
'object_type': 'company',
'object_name': company.company_name,
},
)
|
examples/python/contrasts.py | CCHiggins/statsmodels | 6,931 | 12622125 | <filename>examples/python/contrasts.py
#!/usr/bin/env python
# coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook contrasts.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # Contrasts Overview
import numpy as np
import statsmodels.api as sm
# This document is based heavily on this excellent resource from UCLA
# http://www.ats.ucla.edu/stat/r/library/contrast_coding.htm
# A categorical variable of K categories, or levels, usually enters a
# regression as a sequence of K-1 dummy variables. This amounts to a linear
# hypothesis on the level means. That is, each test statistic for these
# variables amounts to testing whether the mean for that level is
# statistically significantly different from the mean of the base category.
# This dummy coding is called Treatment coding in R parlance, and we will
# follow this convention. There are, however, different coding methods that
# amount to different sets of linear hypotheses.
#
# In fact, the dummy coding is not technically a contrast coding. This is
# because the dummy variables add to one and are not functionally
# independent of the model's intercept. On the other hand, a set of
# *contrasts* for a categorical variable with `k` levels is a set of `k-1`
# functionally independent linear combinations of the factor level means
# that are also independent of the sum of the dummy variables. The dummy
# coding is not wrong *per se*. It captures all of the coefficients, but it
# complicates matters when the model assumes independence of the
# coefficients such as in ANOVA. Linear regression models do not assume
# independence of the coefficients and thus dummy coding is often the only
# coding that is taught in this context.
#
# To have a look at the contrast matrices in Patsy, we will use data from
# UCLA ATS. First let's load the data.
# #### Example Data
import pandas as pd
url = "https://stats.idre.ucla.edu/stat/data/hsb2.csv"
hsb2 = pd.read_table(url, delimiter=",")
hsb2.head(10)
# It will be instructive to look at the mean of the dependent variable,
# write, for each level of race ((1 = Hispanic, 2 = Asian, 3 = African
# American and 4 = Caucasian)).
hsb2.groupby("race")["write"].mean()
# #### Treatment (Dummy) Coding
# Dummy coding is likely the most well known coding scheme. It compares
# each level of the categorical variable to a base reference level. The base
# reference level is the value of the intercept. It is the default contrast
# in Patsy for unordered categorical factors. The Treatment contrast matrix
# for race would be
from patsy.contrasts import Treatment
levels = [1, 2, 3, 4]
contrast = Treatment(reference=0).code_without_intercept(levels)
print(contrast.matrix)
# Here we used `reference=0`, which implies that the first level,
# Hispanic, is the reference category against which the other level effects
# are measured. As mentioned above, the columns do not sum to zero and are
# thus not independent of the intercept. To be explicit, let's look at how
# this would encode the `race` variable.
hsb2.race.head(10)
print(contrast.matrix[hsb2.race - 1, :][:20])
pd.get_dummies(hsb2.race.values, drop_first=False)
# This is a bit of a trick, as the `race` category conveniently maps to
# zero-based indices. If it does not, this conversion happens under the
# hood, so this will not work in general but nonetheless is a useful
# exercise to fix ideas. The below illustrates the output using the three
# contrasts above
from statsmodels.formula.api import ols
mod = ols("write ~ C(race, Treatment)", data=hsb2)
res = mod.fit()
print(res.summary())
# We explicitly gave the contrast for race; however, since Treatment is
# the default, we could have omitted this.
# ### Simple Coding
# Like Treatment Coding, Simple Coding compares each level to a fixed
# reference level. However, with simple coding, the intercept is the grand
# mean of all the levels of the factors. Patsy does not have the Simple
# contrast included, but you can easily define your own contrasts. To do so,
# write a class that contains a code_with_intercept and a
# code_without_intercept method that returns a patsy.contrast.ContrastMatrix
# instance
from patsy.contrasts import ContrastMatrix
def _name_levels(prefix, levels):
return ["[%s%s]" % (prefix, level) for level in levels]
class Simple(object):
def _simple_contrast(self, levels):
nlevels = len(levels)
contr = -1.0 / nlevels * np.ones((nlevels, nlevels - 1))
contr[1:][np.diag_indices(nlevels - 1)] = (nlevels - 1.0) / nlevels
return contr
def code_with_intercept(self, levels):
contrast = np.column_stack(
(np.ones(len(levels)), self._simple_contrast(levels)))
return ContrastMatrix(contrast, _name_levels("Simp.", levels))
def code_without_intercept(self, levels):
contrast = self._simple_contrast(levels)
return ContrastMatrix(contrast, _name_levels("Simp.", levels[:-1]))
hsb2.groupby("race")["write"].mean().mean()
contrast = Simple().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Simple)", data=hsb2)
res = mod.fit()
print(res.summary())
# ### Sum (Deviation) Coding
# Sum coding compares the mean of the dependent variable for a given level
# to the overall mean of the dependent variable over all the levels. That
# is, it uses contrasts between each of the first k-1 levels and level k In
# this example, level 1 is compared to all the others, level 2 to all the
# others, and level 3 to all the others.
from patsy.contrasts import Sum
contrast = Sum().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Sum)", data=hsb2)
res = mod.fit()
print(res.summary())
# This corresponds to a parameterization that forces all the coefficients
# to sum to zero. Notice that the intercept here is the grand mean where the
# grand mean is the mean of means of the dependent variable by each level.
hsb2.groupby("race")["write"].mean().mean()
# ### Backward Difference Coding
# In backward difference coding, the mean of the dependent variable for a
# level is compared with the mean of the dependent variable for the prior
# level. This type of coding may be useful for a nominal or an ordinal
# variable.
from patsy.contrasts import Diff
contrast = Diff().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Diff)", data=hsb2)
res = mod.fit()
print(res.summary())
# For example, here the coefficient on level 1 is the mean of `write` at
# level 2 compared with the mean at level 1. Ie.,
res.params["C(race, Diff)[D.1]"]
hsb2.groupby("race").mean()["write"][2] - hsb2.groupby(
"race").mean()["write"][1]
# ### Helmert Coding
# Our version of Helmert coding is sometimes referred to as Reverse
# Helmert Coding. The mean of the dependent variable for a level is compared
# to the mean of the dependent variable over all previous levels. Hence, the
# name 'reverse' being sometimes applied to differentiate from forward
# Helmert coding. This comparison does not make much sense for a nominal
# variable such as race, but we would use the Helmert contrast like so:
from patsy.contrasts import Helmert
contrast = Helmert().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Helmert)", data=hsb2)
res = mod.fit()
print(res.summary())
# To illustrate, the comparison on level 4 is the mean of the dependent
# variable at the previous three levels taken from the mean at level 4
grouped = hsb2.groupby("race")
grouped.mean()["write"][4] - grouped.mean()["write"][:3].mean()
# As you can see, these are only equal up to a constant. Other versions of
# the Helmert contrast give the actual difference in means. Regardless, the
# hypothesis tests are the same.
k = 4
1.0 / k * (grouped.mean()["write"][k] - grouped.mean()["write"][:k - 1].mean())
k = 3
1.0 / k * (grouped.mean()["write"][k] - grouped.mean()["write"][:k - 1].mean())
# ### Orthogonal Polynomial Coding
# The coefficients taken on by polynomial coding for `k=4` levels are the
# linear, quadratic, and cubic trends in the categorical variable. The
# categorical variable here is assumed to be represented by an underlying,
# equally spaced numeric variable. Therefore, this type of encoding is used
# only for ordered categorical variables with equal spacing. In general, the
# polynomial contrast produces polynomials of order `k-1`. Since `race` is
# not an ordered factor variable let's use `read` as an example. First we
# need to create an ordered categorical from `read`.
hsb2["readcat"] = np.asarray(pd.cut(hsb2.read, bins=4))
hsb2["readcat"] = hsb2["readcat"].astype(object)
hsb2.groupby("readcat").mean()["write"]
from patsy.contrasts import Poly
levels = hsb2.readcat.unique()
contrast = Poly().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(readcat, Poly)", data=hsb2)
res = mod.fit()
print(res.summary())
# As you can see, readcat has a significant linear effect on the dependent
# variable `write` but the quadratic and cubic effects are insignificant.
|
src/Entity/__init__.py | QWERTSKIHACK/peniot | 143 | 12622168 | """
This package contains the following entities:
1) Protocol
2) Attack Suite
3) Attack
4) Input Format
In this module, we have the backend entities to represent and structure our code
And these entities have the following relations in between: (Connection endpoints represent cardinality of entity)
- Protocol 1----------* Attack Suite
- Attack suite 1----------* Attack
- Attack 1----------* Input format
""" |
Clients/ParaView/Testing/Python/VolumeCellSelection.py | xj361685640/ParaView | 815 | 12622193 | from paraview.simple import *
from paraview import smtesting
import vtk
from paraview.vtk.vtkCommonCore import vtkCollection;
import vtk.vtkRenderingVolume
import os
paraview.simple._DisableFirstRenderCameraReset()
smtesting.ProcessCommandLineArguments()
smtesting.LoadServerManagerState(smtesting.StateXMLFileName)
view = GetRenderView()
view.ViewSize = [400, 400]
view.RemoteRenderThreshold = 0
SetActiveView(view)
Render()
# Select cells from a wavelet volume
wav = FindSource('Wavelet1')
SetActiveSource(wav)
rep = vtkCollection()
sources = vtkCollection()
view.SelectSurfaceCells([0, 0, 200, 200], rep, sources)
sel = sources.GetItemAsObject(0)
selPyProxy = servermanager._getPyProxy(sel)
extract = ExtractSelection(Selection = selPyProxy)
Show()
# Hide the volume and show the ExtractSelection filter
wav_rep = GetRepresentation(wav)
wav_rep.Visibility = False
extract_rep = GetRepresentation(extract)
extract_rep.Visibility = True
## Compare with baseline
if not smtesting.DoRegressionTesting(view.SMProxy):
raise smtesting.TestError ('Test failed.')
print ('\nTest passed.')
|
python_modules/dagster-graphql/dagster_graphql_tests/graphql/test_partition_backfill.py | dbatten5/dagster | 4,606 | 12622199 | import os
from dagster.core.execution.backfill import BulkActionStatus
from dagster.seven import get_system_temp_directory
from dagster_graphql.client.query import LAUNCH_PARTITION_BACKFILL_MUTATION
from dagster_graphql.test.utils import (
execute_dagster_graphql,
execute_dagster_graphql_and_finish_runs,
infer_repository_selector,
)
from .graphql_context_test_suite import ExecutingGraphQLContextTestMatrix
PARTITION_PROGRESS_QUERY = """
query PartitionProgressQuery($backfillId: String!) {
partitionBackfillOrError(backfillId: $backfillId) {
... on PartitionBackfill {
__typename
backfillId
status
numRequested
numTotal
fromFailure
reexecutionSteps
}
... on PythonError {
message
stack
}
}
}
"""
CANCEL_BACKFILL_MUTATION = """
mutation($backfillId: String!) {
cancelPartitionBackfill(backfillId: $backfillId) {
... on CancelBackfillSuccess {
__typename
backfillId
}
... on PythonError {
message
stack
}
}
}
"""
RESUME_BACKFILL_MUTATION = """
mutation($backfillId: String!) {
resumePartitionBackfill(backfillId: $backfillId) {
... on ResumeBackfillSuccess {
__typename
backfillId
}
... on PythonError {
message
stack
}
}
}
"""
class TestDaemonPartitionBackfill(ExecutingGraphQLContextTestMatrix):
def test_launch_full_pipeline_backfill(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "integer_partition",
},
"partitionNames": ["2", "3"],
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
result = execute_dagster_graphql(
graphql_context, PARTITION_PROGRESS_QUERY, variables={"backfillId": backfill_id}
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "REQUESTED"
assert result.data["partitionBackfillOrError"]["numRequested"] == 0
assert result.data["partitionBackfillOrError"]["numTotal"] == 2
def test_launch_partial_backfill(self, graphql_context):
# execute a full pipeline, without the failure environment variable
repository_selector = infer_repository_selector(graphql_context)
partition_set_selector = {
"repositorySelector": repository_selector,
"partitionSetName": "chained_integer_partition",
}
# reexecute a partial pipeline
partial_steps = ["after_failure"]
result = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": partition_set_selector,
"partitionNames": ["2", "3"],
"reexecutionSteps": partial_steps,
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
result = execute_dagster_graphql(
graphql_context, PARTITION_PROGRESS_QUERY, variables={"backfillId": backfill_id}
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "REQUESTED"
assert result.data["partitionBackfillOrError"]["numRequested"] == 0
assert result.data["partitionBackfillOrError"]["numTotal"] == 2
assert result.data["partitionBackfillOrError"]["reexecutionSteps"] == ["after_failure"]
def test_cancel_backfill(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "integer_partition",
},
"partitionNames": ["2", "3"],
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
result = execute_dagster_graphql(
graphql_context, PARTITION_PROGRESS_QUERY, variables={"backfillId": backfill_id}
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "REQUESTED"
assert result.data["partitionBackfillOrError"]["numRequested"] == 0
assert result.data["partitionBackfillOrError"]["numTotal"] == 2
result = execute_dagster_graphql(
graphql_context, CANCEL_BACKFILL_MUTATION, variables={"backfillId": backfill_id}
)
assert result.data
assert result.data["cancelPartitionBackfill"]["__typename"] == "CancelBackfillSuccess"
result = execute_dagster_graphql(
graphql_context, PARTITION_PROGRESS_QUERY, variables={"backfillId": backfill_id}
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "CANCELED"
def test_resume_backfill(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
result = execute_dagster_graphql(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": {
"repositorySelector": repository_selector,
"partitionSetName": "integer_partition",
},
"partitionNames": ["2", "3"],
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
result = execute_dagster_graphql(
graphql_context, PARTITION_PROGRESS_QUERY, variables={"backfillId": backfill_id}
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "REQUESTED"
assert result.data["partitionBackfillOrError"]["numRequested"] == 0
assert result.data["partitionBackfillOrError"]["numTotal"] == 2
# manually mark as failed
backfill = graphql_context.instance.get_backfill(backfill_id)
graphql_context.instance.update_backfill(backfill.with_status(BulkActionStatus.FAILED))
result = execute_dagster_graphql(
graphql_context, RESUME_BACKFILL_MUTATION, variables={"backfillId": backfill_id}
)
assert result.data
assert result.data["resumePartitionBackfill"]["__typename"] == "ResumeBackfillSuccess"
result = execute_dagster_graphql(
graphql_context, PARTITION_PROGRESS_QUERY, variables={"backfillId": backfill_id}
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "REQUESTED"
class TestLaunchDaemonBackfillFromFailure(ExecutingGraphQLContextTestMatrix):
def test_launch_from_failure(self, graphql_context):
repository_selector = infer_repository_selector(graphql_context)
partition_set_selector = {
"repositorySelector": repository_selector,
"partitionSetName": "chained_integer_partition",
}
# trigger failure in the conditionally_fail solid
output_file = os.path.join(
get_system_temp_directory(), "chained_failure_pipeline_conditionally_fail"
)
try:
with open(output_file, "w"):
result = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": partition_set_selector,
"partitionNames": ["2", "3"],
}
},
)
finally:
os.remove(output_file)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
# re-execute from failure (without the failure file)
result = execute_dagster_graphql_and_finish_runs(
graphql_context,
LAUNCH_PARTITION_BACKFILL_MUTATION,
variables={
"backfillParams": {
"selector": partition_set_selector,
"partitionNames": ["2", "3"],
"fromFailure": True,
}
},
)
assert not result.errors
assert result.data
assert result.data["launchPartitionBackfill"]["__typename"] == "LaunchBackfillSuccess"
backfill_id = result.data["launchPartitionBackfill"]["backfillId"]
result = execute_dagster_graphql(
graphql_context, PARTITION_PROGRESS_QUERY, variables={"backfillId": backfill_id}
)
assert not result.errors
assert result.data
assert result.data["partitionBackfillOrError"]["__typename"] == "PartitionBackfill"
assert result.data["partitionBackfillOrError"]["status"] == "REQUESTED"
assert result.data["partitionBackfillOrError"]["numRequested"] == 0
assert result.data["partitionBackfillOrError"]["numTotal"] == 2
assert result.data["partitionBackfillOrError"]["fromFailure"]
|
addons/dropbox/settings/defaults.py | gaybro8777/osf.io | 628 | 12622215 | # OAuth app keys
DROPBOX_KEY = None
DROPBOX_SECRET = None
DROPBOX_AUTH_CSRF_TOKEN = 'dropbox-auth-csrf-token'
# Max file size permitted by frontend in megabytes
MAX_UPLOAD_SIZE = 150
|
tests/lazy/test_solve_lazy.py | rodluger/starry | 116 | 12622217 | # -*- coding: utf-8 -*-
"""
Linear solve / likelihood tests.
"""
import starry
import numpy as np
from scipy.linalg import cho_solve
from scipy.stats import multivariate_normal
import pytest
import itertools
@pytest.fixture(autouse=True)
def data():
# Instantiate a dipole map
map = starry.Map(ydeg=1, reflected=True)
amp_true = 0.75
inc_true = 60
y_true = np.array([1, 0.1, 0.2, 0.3])
map.amp = amp_true
map[1, :] = y_true[1:]
map.inc = inc_true
# Generate a synthetic light curve with just a little noise
theta = np.linspace(0, 360, 100)
phi = 3.5 * theta
xs = np.cos(phi * np.pi / 180)
ys = 0.1 * np.cos(phi * np.pi / 180)
zs = np.sin(phi * np.pi / 180)
kwargs = dict(theta=theta, xs=xs, ys=ys, zs=zs)
flux = map.flux(**kwargs).eval()
sigma = 1e-5
np.random.seed(1)
flux += np.random.randn(len(theta)) * sigma
return (map, kwargs, amp_true, inc_true, y_true, sigma, flux)
# Parameter combinations we'll test
vals = ["scalar", "vector", "matrix", "cholesky"]
woodbury = [False, True]
solve_inputs = itertools.product(vals, vals)
lnlike_inputs = itertools.product(vals, vals, woodbury)
@pytest.mark.parametrize("L,C", solve_inputs)
def test_solve(L, C, data):
map, kwargs, amp_true, inc_true, y_true, sigma, flux = data
# Place a generous prior on the map coefficients
if L == "scalar":
map.set_prior(L=1)
elif L == "vector":
map.set_prior(L=np.ones(map.Ny))
elif L == "matrix":
map.set_prior(L=np.eye(map.Ny))
elif L == "cholesky":
map.set_prior(cho_L=np.eye(map.Ny))
# Provide the dataset
if C == "scalar":
map.set_data(flux, C=sigma ** 2)
elif C == "vector":
map.set_data(flux, C=np.ones(len(flux)) * sigma ** 2)
elif C == "matrix":
map.set_data(flux, C=np.eye(len(flux)) * sigma ** 2)
elif C == "cholesky":
map.set_data(flux, cho_C=np.eye(len(flux)) * sigma)
# Solve the linear problem
map.inc = inc_true
mu, cho_cov = map.solve(**kwargs)
mu = mu.eval()
cho_cov = cho_cov.eval()
# Ensure the likelihood of the true value is close to that of
# the MAP solution
cov = cho_cov.dot(cho_cov.T)
LnL0 = multivariate_normal.logpdf(mu, mean=mu, cov=cov)
LnL = multivariate_normal.logpdf(amp_true * y_true, mean=mu, cov=cov)
assert LnL0 - LnL < 5.00
# Check that we can draw from the posterior
map.draw()
@pytest.mark.parametrize("L,C,woodbury", lnlike_inputs)
def test_lnlike(L, C, woodbury, data):
"""Test the log marginal likelihood method."""
map, kwargs, amp_true, inc_true, y_true, sigma, flux = data
# Place a generous prior on the map coefficients
if L == "scalar":
map.set_prior(L=1)
elif L == "vector":
map.set_prior(L=np.ones(map.Ny))
elif L == "matrix":
map.set_prior(L=np.eye(map.Ny))
elif L == "cholesky":
map.set_prior(cho_L=np.eye(map.Ny))
# Provide the dataset
if C == "scalar":
map.set_data(flux, C=sigma ** 2)
elif C == "vector":
map.set_data(flux, C=np.ones(len(flux)) * sigma ** 2)
elif C == "matrix":
map.set_data(flux, C=np.eye(len(flux)) * sigma ** 2)
elif C == "cholesky":
map.set_data(flux, cho_C=np.eye(len(flux)) * sigma)
# Compute the marginal log likelihood for different inclinations
incs = [15, 30, 45, 60, 75, 90]
ll = np.zeros_like(incs, dtype=float)
for i, inc in enumerate(incs):
map.inc = inc
ll[i] = map.lnlike(woodbury=woodbury, **kwargs).eval()
# Verify that we get the correct inclination
assert incs[np.argmax(ll)] == 60
assert np.allclose(ll[np.argmax(ll)], 974.221605) # benchmarked
|
niftynet/engine/windows_aggregator_grid.py | tdml13/NiftyNet | 1,403 | 12622230 | <gh_stars>1000+
# -*- coding: utf-8 -*-
"""
windows aggregator decode sampling grid coordinates and image id from
batch data, forms image level output and write to hard drive.
"""
from __future__ import absolute_import, division, print_function
import os
from collections import OrderedDict
import numpy as np
import pandas as pd
# pylint: disable=too-many-nested-blocks
# pylint: disable=too-many-branches
import niftynet.io.misc_io as misc_io
from niftynet.engine.windows_aggregator_base import ImageWindowsAggregator
from niftynet.layer.discrete_label_normalisation import \
DiscreteLabelNormalisationLayer
from niftynet.layer.pad import PadLayer
class GridSamplesAggregator(ImageWindowsAggregator):
"""
This class keeps record of the currently cached image,
initialised as all zeros, and the values are replaced
by image window data decoded from batch.
"""
def __init__(self,
image_reader,
name='image',
output_path=os.path.join('.', 'output'),
window_border=(),
interp_order=0,
postfix='niftynet_out',
fill_constant=0.0):
ImageWindowsAggregator.__init__(
self, image_reader=image_reader, output_path=output_path)
self.name = name
self.image_out = None
self.csv_out = None
self.window_border = window_border
self.output_interp_order = interp_order
self.postfix = postfix
self.fill_constant = fill_constant
def decode_batch(self, window, location):
"""
Function used to save multiple outputs listed in the window
dictionary. For the fields that have the keyword 'window' in the
dictionary key, it will be saved as image. The rest will be saved as
csv. CSV files will contain at saving a first line of 0 (to be
changed into the header by the user), the first column being the
index of the window, followed by the list of output and the location
array for each considered window
:param window: dictionary of output
:param location: location of the input
:return:
"""
n_samples = location.shape[0]
location_cropped = {}
for key in window:
if 'window' in key: # all outputs to be created as images should
# contained the keyword "window"
window[key], location_cropped[key] = self.crop_batch(
window[key], location, self.window_border)
for batch_id in range(n_samples):
image_id = location[batch_id, 0]
if image_id != self.image_id:
# image name changed:
# save current result and create an empty result file
self._save_current_image()
self._save_current_csv()
if self._is_stopping_signal(location[batch_id]):
return False
self.image_out, self.csv_out = {}, {}
for key in window:
if 'window' in key:
# to be saved as image
self.image_out[key] = self._initialise_empty_image(
image_id=image_id,
n_channels=window[key].shape[-1],
dtype=window[key].dtype)
else:
# to be saved as csv file
n_elements = np.int64(
np.asarray(window[key]).size / n_samples)
table_header = [
'{}_{}'.format(key, idx)
for idx in range(n_elements)
] if n_elements > 1 else ['{}'.format(key)]
table_header += [
'coord_{}'.format(idx)
for idx in range(location.shape[-1])
]
self.csv_out[key] = self._initialise_empty_csv(
key_names=table_header)
for key in window:
if 'window' in key:
x_start, y_start, z_start, x_end, y_end, z_end = \
location_cropped[key][batch_id, 1:]
self.image_out[key][
x_start:x_end, y_start:y_end, z_start:z_end, ...] = \
window[key][batch_id, ...]
else:
window[key] = np.asarray(window[key]).reshape(
[n_samples, -1])
window_save = window[key][batch_id:batch_id + 1, :]
window_loc = location[batch_id:batch_id + 1, :]
csv_row = np.concatenate([window_save, window_loc], 1)
csv_row = csv_row.ravel()
key_names = self.csv_out[key].columns
self.csv_out[key] = self.csv_out[key].append(
OrderedDict(zip(key_names, csv_row)),
ignore_index=True)
return True
def _initialise_empty_image(self, image_id, n_channels, dtype=np.float):
"""
Initialise an empty image in which to populate the output
:param image_id: image_id to be used in the reader
:param n_channels: numbers of channels of the saved output (for
multimodal output)
:param dtype: datatype used for the saving
:return: the initialised empty image
"""
self.image_id = image_id
spatial_shape = self.input_image[self.name].shape[:3]
output_image_shape = spatial_shape + (n_channels, )
empty_image = np.zeros(output_image_shape, dtype=dtype)
for layer in self.reader.preprocessors:
if isinstance(layer, PadLayer):
empty_image, _ = layer(empty_image)
if self.fill_constant != 0.0:
empty_image[:] = self.fill_constant
return empty_image
def _initialise_empty_csv(self, key_names):
"""
Initialise a csv output file with a first line of zeros
:param n_channel: number of saved fields
:return: empty first line of the array to be saved as csv
"""
return pd.DataFrame(columns=key_names)
def _save_current_image(self):
"""
For all the outputs to be saved as images, go through the dictionary
and save the resulting output after reversing the initial preprocessing
:return:
"""
if self.input_image is None:
return
for layer in reversed(self.reader.preprocessors):
if isinstance(layer, PadLayer):
for i in self.image_out:
self.image_out[i], _ = layer.inverse_op(self.image_out[i])
if isinstance(layer, DiscreteLabelNormalisationLayer):
for i in self.image_out:
self.image_out[i], _ = layer.inverse_op(self.image_out[i])
subject_name = self.reader.get_subject_id(self.image_id)
for i in self.image_out:
filename = "{}_{}_{}.nii.gz".format(i, subject_name, self.postfix)
source_image_obj = self.input_image[self.name]
misc_io.save_data_array(self.output_path, filename,
self.image_out[i], source_image_obj,
self.output_interp_order)
self.log_inferred(subject_name, filename)
return
def _save_current_csv(self):
"""
For all output to be saved as csv, loop through the dictionary of
output and create the csv
:return:
"""
if self.input_image is None:
return
subject_name = self.reader.get_subject_id(self.image_id)
for i in self.csv_out:
filename = "{}_{}_{}.csv".format(i, subject_name, self.postfix)
misc_io.save_csv_array(self.output_path, filename, self.csv_out[i])
self.log_inferred(subject_name, filename)
return
|
convoy/data.py | bureado/batch-shipyard | 279 | 12622234 | # Copyright (c) Microsoft Corporation
#
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# compat imports
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from builtins import ( # noqa
bytes, dict, int, list, object, range, str, ascii, chr, hex, input,
next, oct, open, pow, round, super, filter, map, zip)
# stdlib imports
import datetime
import fnmatch
import logging
import math
import os
try:
import pathlib2 as pathlib
except ImportError:
import pathlib
try:
from shlex import quote as shellquote
except ImportError:
from pipes import quote as shellquote
import threading
import time
# non-stdlib imports
import azure.batch.models as batchmodels
# local imports
from . import crypto
from . import resource
from . import settings
from . import storage
from . import util
from .version import __version__
# create logger
logger = logging.getLogger(__name__)
util.setup_logger(logger)
# global defines
_BLOBXFER_VERSION = '1.9.4'
_MEGABYTE = 1048576
_MAX_READ_BLOCKSIZE_BYTES = 4194304
_FILE_SPLIT_PREFIX = '_shipyard-'
def _get_gluster_paths(config):
# type: (dict) -> Tuple[str, str]
"""Get Gluster paths
:param dict config: configuration dict
:rtype: tuple
:return: (gluster host path, gluster container path)
"""
gluster_host = None
gluster_container = None
sdv = settings.global_resources_shared_data_volumes(config)
for sdvkey in sdv:
if settings.is_shared_data_volume_gluster_on_compute(sdv, sdvkey):
gluster_host = '{}/{}'.format(
settings.get_host_mounts_path(False),
settings.get_gluster_on_compute_volume())
gluster_container = settings.shared_data_volume_container_path(
sdv, sdvkey).rstrip('/')
break
return (gluster_host, gluster_container)
def _convert_filter_to_blobxfer_option(includes, excludes):
# type: (list, list) -> str
"""Converts filters to blobxfer options
:param list includes: includes
:param list excludes: excludes
:rtype: str
:return: blobxfer options
"""
if util.is_not_empty(includes):
src_incl = []
for include in includes:
src_incl.append('--include \"{}\"'.format(include))
else:
src_incl = None
if util.is_not_empty(excludes):
src_excl = []
for exclude in excludes:
src_excl.append('--exclude \"{}\"'.format(exclude))
else:
src_excl = None
return '{} {}'.format(
' '.join(src_incl) if src_incl is not None else '',
' '.join(src_excl) if src_excl is not None else '',
).rstrip()
def _process_storage_input_data(config, input_data, on_task):
# type: (dict, dict, bool) -> str
"""Process Azure storage input data to ingress
:param dict config: configuration dict
:param dict input_data: config spec with input_data
:param bool on_task: if this is originating from a task spec
:rtype: list
:return: args to pass to blobxfer script
"""
# get gluster host/container paths
gluster_host, gluster_container = _get_gluster_paths(config)
# parse storage input data blocks
encrypt = settings.batch_shipyard_encryption_enabled(config)
args = []
for xfer in input_data:
storage_settings = settings.credentials_storage(
config, settings.data_storage_account_settings(xfer))
remote_path = settings.data_remote_path(xfer)
# derive container from remote_path
container = settings.data_container_from_remote_path(
xfer, rp=remote_path)
eo = settings.data_blobxfer_extra_options(xfer)
# append appropriate option for fshare
if settings.data_is_file_share(xfer) and '--mode file' not in eo:
eo = '--mode file {}'.format(eo)
if '--mode file' in eo:
# create saskey for file share with rl perm
saskey = storage.create_file_share_saskey(
storage_settings, container, 'ingress')
else:
# create saskey for container with rl perm
saskey = storage.create_blob_container_saskey(
storage_settings, container, 'ingress')
includes = settings.data_include(xfer)
excludes = settings.data_exclude(xfer)
# convert include/excludes into extra options
filters = _convert_filter_to_blobxfer_option(includes, excludes)
local_path = settings.data_local_path(xfer, on_task)
# auto replace container path for gluster with host path
if (util.is_not_empty(gluster_container) and
local_path.startswith(gluster_container)):
local_path = local_path.replace(gluster_container, gluster_host, 1)
# construct argument
# kind:encrypted:<sa:ep:saskey:remote_path>:local_path:eo
creds = crypto.encrypt_string(
encrypt,
'{},{},{},{}'.format(
storage_settings.account, storage_settings.endpoint,
saskey, remote_path),
config)
args.append('"{bxver},i,{enc},{creds},{lp},{eo}"'.format(
bxver=_BLOBXFER_VERSION,
enc=encrypt,
creds=creds,
lp=local_path,
eo=' '.join((filters, eo)).lstrip(),
))
return args
def _process_batch_input_data(config, input_data, on_task):
# type: (dict, dict, bool) -> str
"""Process Azure batch input data to ingress
:param dict config: configuration dict
:param dict input_data: config spec with input_data
:param bool on_task: if this is originating from a task spec
:rtype: list
:return: args to pass to task file mover
"""
# get batch creds
bc = settings.credentials_batch(config)
# fail (for now) if aad is being used
if util.is_none_or_empty(bc.account_key):
raise RuntimeError(
'cannot move Azure Batch task input data without an account key')
# construct arg
encrypt = settings.batch_shipyard_encryption_enabled(config)
args = []
for xfer in input_data:
jobid = settings.input_data_job_id(xfer)
taskid = settings.input_data_task_id(xfer)
include = settings.data_include(xfer)
if util.is_not_empty(include):
include = ';'.join(include)
else:
include = ''
exclude = settings.data_exclude(xfer)
if util.is_not_empty(exclude):
exclude = ';'.join(exclude)
else:
exclude = ''
local_path = settings.data_local_path(xfer, on_task)
creds = crypto.encrypt_string(
encrypt,
'{};{};{}'.format(
bc.account, bc.account_service_url, bc.account_key),
config)
# construct argument
# encrypt,creds,jobid,taskid,incl,excl,lp
args.append('"{},{},{},{},{},{},{}"'.format(
encrypt, creds, jobid, taskid, include, exclude, local_path))
return args
def process_input_data(config, bxfile, spec, on_task=False):
# type: (dict, tuple, dict, bool) -> str
"""Process input data to ingress
:param dict config: configuration dict
:param tuple bxfile: blobxfer script
:param dict spec: config spec with input_data
:param bool on_task: if this is originating from a task spec
:rtype: str
:return: additonal command
"""
tfmimage = 'mcr.microsoft.com/azure-batch/shipyard:{}-cargo'.format(
__version__)
is_windows = settings.is_windows_pool(config)
if is_windows:
bxcmd = ('powershell -ExecutionPolicy Unrestricted -command '
'%AZ_BATCH_NODE_STARTUP_DIR%\\wd\\{} {{}}').format(bxfile[0])
tfmimage = '{}-windows'.format(tfmimage)
tfmbind = (
'-v %AZ_BATCH_NODE_ROOT_DIR%:%AZ_BATCH_NODE_ROOT_DIR% '
'-w %AZ_BATCH_TASK_WORKING_DIR% '
'-e "AZ_BATCH_NODE_STARTUP_DIR='
'%AZ_BATCH_NODE_STARTUP_DIR%" '
)
tfmcmd = 'C:\\batch-shipyard\\task_file_mover.cmd'
tfmpre = ''
tfmpost = ''
else:
bxcmd = 'set -f; $AZ_BATCH_NODE_STARTUP_DIR/wd/{} {{}}; set +f'.format(
bxfile[0])
tfmbind = (
'-v $AZ_BATCH_NODE_ROOT_DIR:$AZ_BATCH_NODE_ROOT_DIR '
'-w $AZ_BATCH_TASK_WORKING_DIR '
'-e "AZ_BATCH_NODE_STARTUP_DIR='
'$AZ_BATCH_NODE_STARTUP_DIR" '
)
tfmcmd = '/opt/batch-shipyard/task_file_mover.sh'
tfmpre = 'set -f; '
tfmpost = '; set +f'
ret = []
input_data = settings.input_data(spec)
if util.is_not_empty(input_data):
for key in input_data:
if key == 'azure_storage':
args = _process_storage_input_data(
config, input_data[key], on_task)
if is_windows:
cmds = []
for arg in args:
cmds.append('""{}""'.format(arg))
args = cmds
ret.append(bxcmd.format(' '.join(args)))
elif key == 'azure_batch':
args = _process_batch_input_data(
config, input_data[key], on_task)
if is_windows:
cmds = []
for arg in args:
cmds.append('""{}""'.format(arg))
args = cmds
ret.append(
('{tfmpre}docker run --rm -t {tfmbind} {tfmimage} '
'{tfmcmd} {args}{tfmpost}').format(
tfmpre=tfmpre, tfmbind=tfmbind, tfmimage=tfmimage,
tfmcmd=tfmcmd, tfmpost=tfmpost,
args=' '.join(args))
)
else:
raise ValueError(
'unknown input_data method: {}'.format(key))
if len(ret) > 0:
return ';'.join(ret)
else:
return None
def _generate_batch_output_file_spec(
is_windows, separator, storage_settings, saskey, container,
remote_path, condition, local_path, include):
# type: (bool, str, settings.StorageCredentialsSettings, str, str, str,
# str, str, str) -> batchmodels.OutputFile
"""Generate Batch output file spec with given local path and filter
:param bool is_windows: is windows pool
:param str separator: dir separator
:param settings.StorageCredentialsSettings storage_settings:
storage settings
:param str saskey: sas key
:param str container: container
:param str remote_path: remote path
:param str condition: upload condition
:param str local_path: task local path
:param str include: include filter
:rtype: batchmodels.OutputFile
:return: Batch output file spec
"""
# set file pattern
if local_path.endswith(separator):
fp = ''.join((local_path, include))
else:
fp = separator.join((local_path, include))
# set upload condition
if condition == 'taskcompletion':
buc = batchmodels.OutputFileUploadCondition.task_completion
elif condition == 'taskfailure':
buc = batchmodels.OutputFileUploadCondition.task_failure
elif condition == 'tasksuccess':
buc = batchmodels.OutputFileUploadCondition.task_success
# strip container from remote path
rp = remote_path.split('/')
if len(rp) > 1:
rp = rp[1:]
if '*' not in fp and '?' not in fp:
# limited resolution of file path/pattern
if is_windows:
tmp = fp.replace(
'%AZ_BATCH_TASK_DIR%\\', '').replace(
'%AZ_BATCH_TASK_WORKING_DIR%\\', 'wd\\')
else:
tmp = fp.replace(
'$AZ_BATCH_TASK_DIR/', '').replace(
'$AZ_BATCH_TASK_WORKING_DIR/', 'wd/')
rp.append(tmp)
rp = '/'.join(rp)
else:
rp = ''
# generate spec
outfile = batchmodels.OutputFile(
file_pattern=fp,
destination=batchmodels.OutputFileDestination(
container=batchmodels.OutputFileBlobContainerDestination(
path=rp,
container_url='{}?{}'.format(
storage.generate_blob_container_uri(
storage_settings, container),
saskey)
)
),
upload_options=batchmodels.OutputFileUploadOptions(
upload_condition=buc
),
)
return outfile
def _process_storage_output_data(config, native, is_windows, output_data):
# type: (dict, bool, bool, dict) -> str
"""Process output data to egress to Azure storage
:param dict config: configuration dict
:param bool native: is native container pool
:param bool is_windows: is windows pool
:param dict output_data: config spec with output_data
:rtype: list
:return: OutputFiles or args to pass to blobxfer script
"""
# get gluster host/container paths and encryption settings
gluster_host, gluster_container = _get_gluster_paths(config)
encrypt = settings.batch_shipyard_encryption_enabled(config)
# parse storage output data blocks
args = []
for xfer in output_data:
storage_settings = settings.credentials_storage(
config, settings.data_storage_account_settings(xfer))
remote_path = settings.data_remote_path(xfer)
# derive container from remote_path
container = settings.data_container_from_remote_path(
xfer, rp=remote_path)
eo = settings.data_blobxfer_extra_options(xfer)
if native and util.is_not_empty(eo):
raise ValueError(
'native container pool does not support '
'blobxfer_extra_options')
# append appropriate option for fshare
if settings.data_is_file_share(xfer) and '--mode file' not in eo:
eo = '--mode file {}'.format(eo)
if '--mode file' in eo:
if native:
raise ValueError(
'native container pool does not support fileshares')
# create saskey for file share with rwdl perm
saskey = storage.create_file_share_saskey(
storage_settings, container, 'egress', create_share=True)
else:
# create saskey for container with rwdl perm
saskey = storage.create_blob_container_saskey(
storage_settings, container, 'egress', create_container=True)
includes = settings.data_include(xfer)
excludes = settings.data_exclude(xfer)
condition = settings.data_condition(xfer)
local_path = settings.data_local_path(xfer, True, task_wd=False)
# auto replace container path for gluster with host path
if (util.is_not_empty(gluster_container) and
local_path.startswith(gluster_container)):
local_path = local_path.replace(gluster_container, gluster_host, 1)
if native:
if util.is_not_empty(excludes):
raise ValueError(
'native container pool does not support excludes')
if is_windows:
separator = '\\'
else:
separator = '/'
if util.is_none_or_empty(includes):
includes = ['**{}*'.format(separator)]
for include in includes:
args.append(_generate_batch_output_file_spec(
is_windows, separator, storage_settings, saskey,
container, remote_path, condition, local_path, include))
else:
# convert include/excludes into extra options
filters = _convert_filter_to_blobxfer_option(includes, excludes)
# construct argument
# kind:encrypted:<sa:ep:saskey:remote_path>:local_path:eo
creds = crypto.encrypt_string(
encrypt,
'{},{},{},{}'.format(
storage_settings.account, storage_settings.endpoint,
saskey, remote_path),
config)
args.append('"{bxver},e,{enc},{creds},{lp},{eo},{cond}"'.format(
bxver=_BLOBXFER_VERSION,
enc=encrypt,
creds=creds,
lp=local_path,
eo=' '.join((filters, eo)).lstrip(),
cond=condition,
))
return args
def process_output_data(config, bxfile, spec):
# type: (dict, tuple, dict) -> str
"""Process output data to egress
:param dict config: configuration dict
:param tuple bxfile: blobxfer script
:param dict spec: config spec with input_data
:rtype: str or list
:return: additonal commands or list of OutputFiles
"""
native = settings.is_native_docker_pool(config)
is_windows = settings.is_windows_pool(config)
if is_windows:
bxcmd = ('powershell -ExecutionPolicy Unrestricted -command '
'%AZ_BATCH_NODE_STARTUP_DIR%\\wd\\{} {{}}').format(bxfile[0])
else:
bxcmd = 'set -f; $AZ_BATCH_NODE_STARTUP_DIR/wd/{} {{}}; set +f'.format(
bxfile[0])
ret = []
output_data = settings.output_data(spec)
if util.is_not_empty(output_data):
for key in output_data:
if key == 'azure_storage':
args = _process_storage_output_data(
config, native, is_windows, output_data[key])
if native:
ret.extend(args)
else:
if is_windows:
cmds = []
for arg in args:
cmds.append('""{}""'.format(arg))
args = cmds
ret.append(bxcmd.format(' '.join(args)))
else:
raise ValueError(
'unknown output_data method: {}'.format(key))
if len(ret) > 0:
if native:
return ret
else:
return ';'.join(ret)
else:
return None
def _singlenode_transfer(dest, src, dst, username, ssh_private_key, rls):
# type: (DestinationSettings, str, str, pathlib.Path, dict) -> None
"""Transfer data to a single node
:param DestinationSettings dest: destination settings
:param str src: source path
:param str dst: destination path
:param str username: username
:param pathlib.Path: ssh private key
:param dict rls: remote login settings
"""
# get remote settings
_rls = next(iter(rls.values()))
ip = _rls.remote_login_ip_address
port = _rls.remote_login_port
del _rls
# modify dst with relative dest
if util.is_not_empty(dest.relative_destination_path):
dst = '{}{}'.format(dst, dest.relative_destination_path)
# create relative path on host
logger.debug('creating remote directory: {}'.format(dst))
dirs = ['mkdir -p {}'.format(dst)]
mkdircmd = ('ssh -T -x -o StrictHostKeyChecking=no '
'-o UserKnownHostsFile={} -i {} -p {} {}@{} {}'.format(
os.devnull, ssh_private_key, port, username, ip,
util.wrap_commands_in_shell(dirs)))
rc = util.subprocess_with_output(
mkdircmd, shell=True, suppress_output=True)
if rc == 0:
logger.info('remote directories created on {}'.format(dst))
else:
logger.error('remote directory creation failed')
return
del dirs
# determine if recursive flag must be set
psrc = pathlib.Path(src)
recursive = '-r' if psrc.is_dir() else ''
# set command source path and adjust dst path
if recursive:
cmdsrc = '.'
else:
cmdsrc = shellquote(src)
# transfer data
if dest.data_transfer.method == 'scp':
cmd = ('scp -o StrictHostKeyChecking=no '
'-o UserKnownHostsFile={} -p {} {} -i {} '
'-P {} {} {}@{}:"{}"'.format(
os.devnull, dest.data_transfer.scp_ssh_extra_options,
recursive, ssh_private_key.resolve(), port, cmdsrc,
username, ip, shellquote(dst)))
elif dest.data_transfer.method == 'rsync+ssh':
cmd = ('rsync {} {} -e "ssh -T -x -o StrictHostKeyChecking=no '
'-o UserKnownHostsFile={} {} -i {} -p {}" {} {}@{}:"{}"'.format(
dest.data_transfer.rsync_extra_options, recursive,
os.devnull, dest.data_transfer.scp_ssh_extra_options,
ssh_private_key.resolve(), port, cmdsrc, username, ip,
shellquote(dst)))
else:
raise ValueError('Unknown transfer method: {}'.format(
dest.data_transfer.method))
logger.info('begin ingressing data from {} to {}'.format(
src, dst))
start = datetime.datetime.now()
rc = util.subprocess_with_output(
cmd, shell=True, cwd=src if recursive else None)
diff = datetime.datetime.now() - start
if rc == 0:
logger.info(
'finished ingressing data from {0} to {1} in {2:.2f} sec'.format(
src, dst, diff.total_seconds()))
else:
logger.error(
'data ingress from {} to {} failed with return code: {}'.format(
src, dst, rc))
def _multinode_transfer(
method, dest, source, dst, username, ssh_private_key, rls, mpt):
# type: (str, DestinationSettings, SourceSettings, str, str,
# pathlib.Path, dict, int) -> None
"""Transfer data to multiple destination nodes simultaneously
:param str method: transfer method
:param DestinationSettings dest: destination settings
:param SourceSettings source: source settings
:param str dst: destination path
:param str username: username
:param pathlib.Path: ssh private key
:param dict rls: remote login settings
:param int mpt: max parallel transfers per node
"""
src = source.path
src_incl = source.include
src_excl = source.exclude
psrc = pathlib.Path(src)
# if source isn't a directory, convert it using src_incl
if not psrc.is_dir():
src_excl = None
src_incl = [src]
src = str(psrc.parent)
psrc = psrc.parent
# if split is specified, force to multinode_scp
if (dest.data_transfer.split_files_megabytes is not None and
method != 'multinode_scp'):
logger.warning('forcing transfer method to multinode_scp with split')
method = 'multinode_scp'
buckets = {}
files = {}
rcodes = {}
spfiles = []
spfiles_count = {}
spfiles_count_lock = threading.Lock()
for rkey in rls:
buckets[rkey] = 0
files[rkey] = []
rcodes[rkey] = None
# walk the directory structure
# 1. construct a set of dirs to create on the remote side
# 2. binpack files to different nodes
total_files = 0
dirs = set()
if dest.relative_destination_path is not None:
dirs.add(dest.relative_destination_path)
for entry in util.scantree(src):
rel = pathlib.Path(entry.path).relative_to(psrc)
sparent = str(pathlib.Path(entry.path).relative_to(psrc).parent)
if entry.is_file():
srel = str(rel)
# check filters
if src_excl is not None:
inc = not any([fnmatch.fnmatch(srel, x) for x in src_excl])
else:
inc = True
if src_incl is not None:
inc = any([fnmatch.fnmatch(srel, x) for x in src_incl])
if not inc:
logger.debug('skipping file {} due to filters'.format(
entry.path))
continue
if dest.relative_destination_path is None:
dstpath = '{}{}'.format(dst, rel)
else:
dstpath = '{}{}/{}'.format(
dst, dest.relative_destination_path, rel)
# get key of min bucket values
fsize = entry.stat().st_size
if (dest.data_transfer.split_files_megabytes is not None and
fsize > dest.data_transfer.split_files_megabytes):
nsplits = int(math.ceil(
fsize / dest.data_transfer.split_files_megabytes))
lpad = int(math.log10(nsplits)) + 1
spfiles.append(dstpath)
spfiles_count[dstpath] = nsplits
n = 0
curr = 0
while True:
end = curr + dest.data_transfer.split_files_megabytes
if end > fsize:
end = fsize
key = min(buckets, key=buckets.get)
buckets[key] += (end - curr)
if n == 0:
dstfname = dstpath
else:
dstfname = '{}.{}{}'.format(
dstpath, _FILE_SPLIT_PREFIX, str(n).zfill(lpad))
files[key].append((entry.path, dstfname, curr, end))
if end == fsize:
break
curr = end
n += 1
else:
key = min(buckets, key=buckets.get)
buckets[key] += fsize
files[key].append((entry.path, dstpath, None, None))
total_files += 1
# add directory to create
if sparent != '.':
if dest.relative_destination_path is None:
dirs.add(sparent)
else:
dirs.add('{}/{}'.format(
dest.relative_destination_path, sparent))
total_size = sum(buckets.values())
if total_files == 0:
logger.error('no files to ingress')
return
# create remote directories via ssh
if len(dirs) == 0:
logger.debug('no remote directories to create')
else:
logger.debug('creating remote directories: {}'.format(dirs))
dirs = ['mkdir -p {}'.format(x) for x in list(dirs)]
dirs.insert(0, 'cd {}'.format(dst))
_rls = next(iter(rls.values()))
ip = _rls.remote_login_ip_address
port = _rls.remote_login_port
del _rls
mkdircmd = ('ssh -T -x -o StrictHostKeyChecking=no '
'-o UserKnownHostsFile={} -i {} -p {} {}@{} {}'.format(
os.devnull, ssh_private_key, port, username, ip,
util.wrap_commands_in_shell(dirs)))
rc = util.subprocess_with_output(
mkdircmd, shell=True, suppress_output=True)
if rc == 0:
logger.info('remote directories created on {}'.format(dst))
else:
logger.error('remote directory creation failed')
return
del ip
del port
logger.info(
'ingress data: {0:.4f} MiB in {1} files to transfer, using {2} max '
'parallel transfers per node'.format(
total_size / _MEGABYTE, total_files, mpt))
logger.info('begin ingressing data from {} to {}'.format(src, dst))
nodekeys = list(buckets.keys())
threads = []
start = datetime.datetime.now()
for i in range(0, len(buckets)):
nkey = nodekeys[i]
thr = threading.Thread(
target=_multinode_thread_worker,
args=(method, mpt, nkey, rcodes, files[nkey],
spfiles_count, spfiles_count_lock,
rls[nkey].remote_login_ip_address,
rls[nkey].remote_login_port, username, ssh_private_key,
dest.data_transfer.scp_ssh_extra_options,
dest.data_transfer.rsync_extra_options)
)
threads.append(thr)
thr.start()
for i in range(0, len(buckets)):
threads[i].join()
diff = datetime.datetime.now() - start
del threads
success = True
for nkey in rcodes:
if rcodes[nkey] != 0:
logger.error('data ingress failed to node: {}'.format(nkey))
success = False
if success:
logger.info(
'finished ingressing {0:.4f} MB of data in {1} files from {2} to '
'{3} in {4:.2f} sec ({5:.3f} Mbit/s)'.format(
total_size / _MEGABYTE, total_files, src, dst,
diff.total_seconds(),
(total_size * 8 / 1e6) / diff.total_seconds()))
def _spawn_next_transfer(
method, file, ip, port, username, ssh_private_key, eo, reo,
procs, psprocs, psdst):
# type: (str, tuple, str, int, str, pathlib.Path, str, str, list,
# list, list) -> None
"""Spawn the next transfer given a file tuple
:param str method: transfer method
:param tuple file: file tuple
:param str ip: ip address
:param int port: port
:param str username: username
:param pathlib.Path: ssh private key
:param str eo: extra options
:param str reo: rsync extra options
:param list procs: process list
:param list psprocs: split files process list
:param list psdst: split files dstpath list
"""
src = file[0]
dst = file[1]
begin = file[2]
end = file[3]
if method == 'multinode_scp':
if begin is None and end is None:
cmd = ('scp -o StrictHostKeyChecking=no '
'-o UserKnownHostsFile={} -p {} -i {} '
'-P {} {} {}@{}:"{}"'.format(
os.devnull, eo, ssh_private_key.resolve(), port,
shellquote(src), username, ip, shellquote(dst)))
else:
cmd = ('ssh -T -x -o StrictHostKeyChecking=no '
'-o UserKnownHostsFile={} {} -i {} '
'-p {} {}@{} \'cat > "{}"\''.format(
os.devnull, eo, ssh_private_key.resolve(), port,
username, ip, shellquote(dst)))
elif method == 'multinode_rsync+ssh':
if begin is not None or end is not None:
raise RuntimeError('cannot rsync with file offsets')
cmd = ('rsync {} -e "ssh -T -x -o StrictHostKeyChecking=no '
'-o UserKnownHostsFile={} {} -i {} -p {}" {} {}@{}:"{}"'.format(
reo, os.devnull, eo, ssh_private_key.resolve(), port,
shellquote(src), username, ip, shellquote(dst)))
else:
raise ValueError('Unknown transfer method: {}'.format(method))
if begin is None and end is None:
procs.append(util.subprocess_nowait(cmd, shell=True))
else:
proc = util.subprocess_attach_stdin(cmd, shell=True)
with open(src, 'rb') as f:
f.seek(begin, 0)
curr = begin
while curr < end:
buf = f.read(_MAX_READ_BLOCKSIZE_BYTES)
if buf is None or len(buf) == 0:
break
proc.stdin.write(buf)
curr += len(buf)
proc.stdin.close()
psprocs.append(proc)
dstsp = dst.split('.')
if dstsp[-1].startswith(_FILE_SPLIT_PREFIX):
dstpath = '.'.join(dstsp[:-1])
else:
dstpath = dst
psdst.append(dstpath)
def _multinode_thread_worker(
method, mpt, node_id, rcodes, files, spfiles_count,
spfiles_count_lock, ip, port, username, ssh_private_key, eo, reo):
# type: (str, int, str, dict, list, dict, threading.Lock, str, int, str,
# pathlib.Path, str, str) -> None
"""Worker thread code for data transfer to a node with a file list
:param str method: transfer method
:param int mpt: max parallel transfers per node
:param str node_id: node id
:param dict rcodes: return codes dict
:param list files: list of files to copy
:param dict spfiles_count: split files count dict
:param threading.Lock spfiles_count_lock: split files count lock
:param str ip: ip address
:param int port: port
:param str username: username
:param pathlib.Path: ssh private key
:param str eo: extra options
:param str reo: rsync extra options
"""
procs = []
psprocs = []
psdst = []
completed = 0
i = 0
while completed != len(files):
xfers = len(procs) + len(psprocs)
while xfers < mpt and i < len(files):
file = files[i]
_spawn_next_transfer(
method, file, ip, port, username, ssh_private_key, eo, reo,
procs, psprocs, psdst)
xfers = len(procs) + len(psprocs)
i += 1
plist, n, rc = util.subprocess_wait_multi(psprocs, procs)
if rc != 0:
logger.error(
'data ingress to {} failed with return code: {}'.format(
node_id, rc))
rcodes[node_id] = rc
return
if plist == psprocs:
dstpath = psdst[n]
del psdst[n]
del psprocs[n]
join = False
with spfiles_count_lock:
spfiles_count[dstpath] = spfiles_count[dstpath] - 1
if spfiles_count[dstpath] == 0:
join = True
if join:
logger.debug('joining files on compute node to {}'.format(
dstpath))
cmds = [
'cat {}.* >> {}'.format(dstpath, dstpath),
'rm -f {}.*'.format(dstpath)
]
joincmd = ('ssh -T -x -o StrictHostKeyChecking=no '
'-o UserKnownHostsFile={} -i {} '
'-p {} {}@{} {}'.format(
os.devnull, ssh_private_key, port, username,
ip, util.wrap_commands_in_shell(cmds)))
procs.append(
util.subprocess_nowait(joincmd, shell=True))
else:
completed += 1
else:
del procs[n]
completed += 1
rcodes[node_id] = 0
def _azure_blob_storage_transfer(storage_settings, data_transfer, source):
# type: (settings.StorageCredentialsSettings,
# settings.DataTransferSettings,
# settings.SourceSettings) -> None
"""Initiate an azure blob storage transfer
:param settings.StorageCredentialsSettings storage_settings:
storage settings
:param settings.DataTransferSettings data_transfer: data transfer settings
:param settings.SourceSettings source: source settings
"""
eo = data_transfer.blobxfer_extra_options
# append appropriate option for fshare
if data_transfer.is_file_share and '--mode file' not in eo:
eo = '--mode file {}'.format(eo)
thr = threading.Thread(
target=_wrap_blobxfer_subprocess,
args=(
storage_settings,
data_transfer.remote_path,
source,
eo,
)
)
thr.start()
return thr
def _wrap_blobxfer_subprocess(storage_settings, remote_path, source, eo):
# type: (StorageCredentialsSettings, str, SourceSettings, str) -> None
"""Wrapper function for blobxfer
:param StorageCredentialsSettings storage_settings: storage settings
:param str remote_path: remote path to transfer to
:param SourceSettings source: source settings
:param str eo: blobxfer extra options
"""
# generate include/exclude options
filters = _convert_filter_to_blobxfer_option(
source.include, source.exclude)
# get correct path
psrc = pathlib.Path(source.path)
cwd = str(psrc.parent)
rsrc = psrc.relative_to(psrc.parent)
# generate env
env = os.environ.copy()
env['BLOBXFER_STORAGE_ACCOUNT_KEY'] = storage_settings.account_key
# set cmd
cmd = [
('blobxfer upload --storage-account {sa} --remote-path {rp} '
'--local-path {lp} --endpoint {ep} --no-progress-bar '
'{filters} {eo}').format(
sa=storage_settings.account,
rp=remote_path,
lp=rsrc,
ep=storage_settings.endpoint,
filters=filters,
eo=eo)
]
logger.info('begin ingressing data from {} to remote path {}'.format(
source.path, remote_path))
proc = util.subprocess_nowait_pipe_stdout(
util.wrap_local_commands_in_shell(cmd), shell=True, cwd=cwd, env=env)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
if stderr is not None:
logger.error(stderr.decode('utf8'))
if stdout is not None:
logger.error(stdout.decode('utf8'))
logger.error('data ingress failed from {} to remote path {}'.format(
source.path, remote_path))
else:
if stdout is not None:
logger.debug(stdout.decode('utf8'))
def wait_for_storage_threads(storage_threads):
# type: (list) -> None
"""Wait for storage processes to complete
:param list storage_threads: list of storage threads
"""
if storage_threads is None:
return
i = 0
nthreads = len(storage_threads)
while nthreads > 0:
alive = sum(thr.is_alive() for thr in storage_threads)
if alive > 0:
i += 1
if i % 10 == 0:
i = 0
logger.debug(
'waiting for Azure Blob Storage transfer processes '
'to complete: {} active, {} completed'.format(
alive, nthreads - alive))
time.sleep(1)
else:
for thr in storage_threads:
thr.join()
if nthreads > 0:
logger.info('Azure Blob/File Storage transfer completed')
break
def ingress_data(
batch_client, compute_client, network_client, config, rls=None,
kind=None, total_vm_count=None, to_fs=None):
# type: (batch.BatchServiceClient,
# azure.mgmt.compute.ComputeManagementClient, dict, dict, str,
# int, str) -> list
"""Ingresses data into Azure
:param batch_client: The batch client to use.
:type batch_client: `batchserviceclient.BatchServiceClient`
:param azure.mgmt.compute.ComputeManagementClient compute_client:
compute client
:param azure.mgmt.network.NetworkManagementClient network_client:
network client
:param dict config: configuration dict
:param dict rls: remote login settings
:param str kind: 'all', 'shared', 'storage', or 'remotefs'
:param int total_vm_count: total current vm count
:param str to_fs: to remote filesystem
:rtype: list
:return: list of storage threads
"""
storage_threads = []
files = settings.global_resources_files(config)
if util.is_none_or_empty(files):
logger.info('no files to ingress detected')
return storage_threads
pool = settings.pool_settings(config)
is_windows = settings.is_windows_pool(config)
for fdict in files:
source = settings.files_source_settings(fdict)
dest = settings.files_destination_settings(fdict)
if (dest.shared_data_volume is not None and
dest.storage_account_settings is not None):
raise RuntimeError(
'cannot specify both shared data volume and storage for the '
'destination for source: {}'.format(source.path))
direct_single_node = False
if dest.relative_destination_path is not None:
if dest.storage_account_settings is not None:
raise RuntimeError(
'cannot specify a relative destination path for ingress '
'to storage; use the --collate option in blobxfer '
'instead.')
# check if this is going to a single vm
if dest.shared_data_volume is None:
if total_vm_count == 1:
direct_single_node = True
elif kind == 'storage':
# this is to prevent total_vm_count check below for
# non shared/all targets and will force continuation
# of the loop below
direct_single_node = True
elif total_vm_count is None:
raise ValueError('total_vm_count is not set')
else:
raise RuntimeError(
'Cannot ingress data directly into compute node '
'host for pools with more than one node. Please use '
'a shared data volume as the ingress destination '
'instead.')
if dest.shared_data_volume is not None or direct_single_node:
if kind == 'storage':
logger.warning(
'skipping data ingress from {} to {} for pool as ingress '
'to shared file system not specified'.format(
source.path, dest.shared_data_volume))
continue
if is_windows:
logger.error(
('cannot data ingress from {} to pool {} with windows '
'compute nodes').format(source.path, pool.id))
continue
# get rfs settings
rfs = None
dst_rfs = False
# set base dst path
dst = '{}/batch/tasks/mounts'.format(
settings.temp_disk_mountpoint(config))
# convert shared to actual path
if not direct_single_node:
sdv = settings.global_resources_shared_data_volumes(config)
for sdvkey in sdv:
if sdvkey == dest.shared_data_volume:
if settings.is_shared_data_volume_gluster_on_compute(
sdv, sdvkey):
if kind == 'remotefs':
continue
dst = '{}/{}/'.format(
dst, settings.get_gluster_on_compute_volume())
elif settings.is_shared_data_volume_storage_cluster(
sdv, sdvkey):
if kind != 'remotefs' or sdvkey != to_fs:
continue
if rfs is None:
rfs = settings.remotefs_settings(config, to_fs)
dst = rfs.storage_cluster.file_server.mountpoint
# add trailing directory separator if needed
if dst[-1] != '/':
dst = dst + '/'
dst_rfs = True
else:
raise RuntimeError(
'data ingress to {} not supported'.format(
sdvkey))
break
# skip entries that are a mismatch if remotefs transfer
# is selected
if kind == 'remotefs':
if not dst_rfs:
continue
else:
if dst_rfs:
continue
# set ssh info
if dst_rfs:
username = rfs.storage_cluster.ssh.username
# retrieve public ips from all vms in named storage cluster
rls = {}
for i in range(rfs.storage_cluster.vm_count):
vm_name = '{}-vm{}'.format(
rfs.storage_cluster.hostname_prefix, i)
vm = compute_client.virtual_machines.get(
resource_group_name=rfs.storage_cluster.resource_group,
vm_name=vm_name,
)
_, pip = resource.get_nic_and_pip_from_virtual_machine(
network_client, rfs.storage_cluster.resource_group, vm)
# create compute node rls settings with sc vm ip/port
rls[vm_name] = \
batchmodels.ComputeNodeGetRemoteLoginSettingsResult(
remote_login_ip_address=pip.ip_address,
remote_login_port=22)
else:
username = pool.ssh.username
if rls is None:
logger.warning(
'skipping data ingress from {} to {} for pool with no '
'remote login settings or non-existent pool'.format(
source.path, dest.shared_data_volume))
continue
if username is None:
raise RuntimeError(
'cannot ingress data to shared data volume without a '
'valid SSH user')
# try to get valid ssh private key (from various config blocks)
ssh_private_key = dest.data_transfer.ssh_private_key
if ssh_private_key is None:
ssh_private_key = pool.ssh.ssh_private_key
if ssh_private_key is None:
ssh_private_key = pathlib.Path(crypto.get_ssh_key_prefix())
if not ssh_private_key.exists():
raise RuntimeError(
'specified SSH private key is invalid or does not '
'exist')
logger.debug('using ssh_private_key from: {}'.format(
ssh_private_key))
if (dest.data_transfer.method == 'scp' or
dest.data_transfer.method == 'rsync+ssh'):
# split/source include/exclude will force multinode
# transfer with mpt=1
if (dest.data_transfer.split_files_megabytes is not None or
source.include is not None or
source.exclude is not None):
_multinode_transfer(
'multinode_' + dest.data_transfer.method, dest,
source, dst, username, ssh_private_key, rls, 1)
else:
_singlenode_transfer(
dest, source.path, dst, username, ssh_private_key,
rls)
elif (dest.data_transfer.method == 'multinode_scp' or
dest.data_transfer.method == 'multinode_rsync+ssh'):
_multinode_transfer(
dest.data_transfer.method, dest, source, dst,
username, ssh_private_key, rls,
dest.data_transfer.max_parallel_transfers_per_node)
else:
raise RuntimeError(
'unknown transfer method: {}'.format(
dest.data_transfer.method))
elif dest.storage_account_settings is not None:
if kind == 'shared':
logger.warning(
'skipping data ingress from {} for pool as ingress '
'to Azure Blob/File Storage not specified'.format(
source.path))
continue
thr = _azure_blob_storage_transfer(
settings.credentials_storage(
config, dest.storage_account_settings),
dest.data_transfer, source)
storage_threads.append(thr)
else:
raise RuntimeError(
'invalid file transfer configuration: {}'.format(fdict))
return storage_threads
|
common/dataset/__init__.py | AlexTaehwan/kgpolicy | 111 | 12622250 | from .preprocess import CKGData
|
tfjs_graph_converter/convert_fused_depthwise.py | httpsgithu/tfjs-to-tf | 114 | 12622271 | # SPDX-License-Identifier: MIT
# Copyright © 2020 <NAME>
"""Functions to rewrite FusedDepthwiseConv2d as native TensorFlow operations"""
import tfjs_graph_converter.graph_rewrite_util as util
from tfjs_graph_converter.graph_rewrite_util import generate_name_from
def _split_fused_depthwise(node: util.NodeDef, input_node_map: util.NameToNode,
weight_mods: util.WeightModifiers) -> util.NodeList:
"""Decompose fused op into DepthwiseConv2dNative + BiasAdd [+ Activation]
"""
fused_ops = list(s.decode('utf-8') for s in node.attr['fused_ops'].list.s)
inputs = node.input
names_used = set()
def node_name(node_index):
"""Return unique node names for sub-operations by appending fused-op"""
i = min(node_index, len(inputs)-1) # PReLU has 4 inputs, others only 3
name = generate_name_from(inputs[i], input_node_map)
if name in names_used:
name = generate_name_from(name, input_node_map,
suffix=fused_ops[node_index-2])
names_used.add(name)
return name
op = 'DepthwiseConv2dNative'
depthwise = util.make_op_node(op, inputs[0:2], node_name(1))
depthwise = util.copy_op_attrs(source=node, target=depthwise)
op = fused_ops[0]
bias_add = util.make_op_node(op, [depthwise, inputs[2]], node_name(2))
bias_add = util.copy_op_attrs(source=node, target=bias_add)
node_list = [depthwise, bias_add]
if len(fused_ops) > 1:
# we have an activation function
op = fused_ops[1]
input_nodes = [bias_add] + inputs[3:]
if util.get_op_def(op) is None:
# unsupported activation function - just copy type attribute
dtype = depthwise.attr['T'].type
activation = util.make_op_node(op, input_nodes, node_name(3),
dtype)
else:
# supported activation function - copy applicable attributes
activation = util.make_op_node(op, input_nodes, node_name(3))
activation = util.copy_op_attrs(source=node, target=activation)
node_list.append(activation)
return node_list
def split_fused_depthwise(input_graph_def: util.GraphDef) -> util.GraphDef:
"""Decompose all fused depthwise conv2d operations into separate operations
This function looks for fused depthwise operations and splits matching
nodes into individual operations.
Fused activation functions that aren't supported (e.g. 'Prelu') can be
replaced afterwards in a separate processing step.
Args:
input_graph_def: TF graph_def proto to be processed
Returns:
Updated copy of the input graph with matching nodes replaced by
individual operations
"""
return util.replace_matching_nodes(input_graph_def,
util.is_fused_depthwise,
_split_fused_depthwise)
|
redis/commands/json/__init__.py | utkarshgupta137/redis-py | 483 | 12622288 | <reponame>utkarshgupta137/redis-py<filename>redis/commands/json/__init__.py
from json import JSONDecodeError, JSONDecoder, JSONEncoder
import redis
from ..helpers import nativestr
from .commands import JSONCommands
from .decoders import bulk_of_jsons, decode_list
class JSON(JSONCommands):
"""
Create a client for talking to json.
:param decoder:
:type json.JSONDecoder: An instance of json.JSONDecoder
:param encoder:
:type json.JSONEncoder: An instance of json.JSONEncoder
"""
def __init__(
self, client, version=None, decoder=JSONDecoder(), encoder=JSONEncoder()
):
"""
Create a client for talking to json.
:param decoder:
:type json.JSONDecoder: An instance of json.JSONDecoder
:param encoder:
:type json.JSONEncoder: An instance of json.JSONEncoder
"""
# Set the module commands' callbacks
self.MODULE_CALLBACKS = {
"JSON.CLEAR": int,
"JSON.DEL": int,
"JSON.FORGET": int,
"JSON.GET": self._decode,
"JSON.MGET": bulk_of_jsons(self._decode),
"JSON.SET": lambda r: r and nativestr(r) == "OK",
"JSON.NUMINCRBY": self._decode,
"JSON.NUMMULTBY": self._decode,
"JSON.TOGGLE": self._decode,
"JSON.STRAPPEND": self._decode,
"JSON.STRLEN": self._decode,
"JSON.ARRAPPEND": self._decode,
"JSON.ARRINDEX": self._decode,
"JSON.ARRINSERT": self._decode,
"JSON.ARRLEN": self._decode,
"JSON.ARRPOP": self._decode,
"JSON.ARRTRIM": self._decode,
"JSON.OBJLEN": self._decode,
"JSON.OBJKEYS": self._decode,
"JSON.RESP": self._decode,
"JSON.DEBUG": self._decode,
}
self.client = client
self.execute_command = client.execute_command
self.MODULE_VERSION = version
for key, value in self.MODULE_CALLBACKS.items():
self.client.set_response_callback(key, value)
self.__encoder__ = encoder
self.__decoder__ = decoder
def _decode(self, obj):
"""Get the decoder."""
if obj is None:
return obj
try:
x = self.__decoder__.decode(obj)
if x is None:
raise TypeError
return x
except TypeError:
try:
return self.__decoder__.decode(obj.decode())
except AttributeError:
return decode_list(obj)
except (AttributeError, JSONDecodeError):
return decode_list(obj)
def _encode(self, obj):
"""Get the encoder."""
return self.__encoder__.encode(obj)
def pipeline(self, transaction=True, shard_hint=None):
"""Creates a pipeline for the JSON module, that can be used for executing
JSON commands, as well as classic core commands.
Usage example:
r = redis.Redis()
pipe = r.json().pipeline()
pipe.jsonset('foo', '.', {'hello!': 'world'})
pipe.jsonget('foo')
pipe.jsonget('notakey')
"""
if isinstance(self.client, redis.RedisCluster):
p = ClusterPipeline(
nodes_manager=self.client.nodes_manager,
commands_parser=self.client.commands_parser,
startup_nodes=self.client.nodes_manager.startup_nodes,
result_callbacks=self.client.result_callbacks,
cluster_response_callbacks=self.client.cluster_response_callbacks,
cluster_error_retry_attempts=self.client.cluster_error_retry_attempts,
read_from_replicas=self.client.read_from_replicas,
reinitialize_steps=self.client.reinitialize_steps,
lock=self.client._lock,
)
else:
p = Pipeline(
connection_pool=self.client.connection_pool,
response_callbacks=self.MODULE_CALLBACKS,
transaction=transaction,
shard_hint=shard_hint,
)
p._encode = self._encode
p._decode = self._decode
return p
class ClusterPipeline(JSONCommands, redis.cluster.ClusterPipeline):
"""Cluster pipeline for the module."""
class Pipeline(JSONCommands, redis.client.Pipeline):
"""Pipeline for the module."""
|
vivisect/tests/testreports.py | pombredanne/vivisect | 716 | 12622294 | import unittest
import vivisect.reports as v_reports
import vivisect.tests.helpers as helpers
class ReportsTest(unittest.TestCase):
'''
Test each of the base report modules.
'''
@classmethod
def setUpClass(cls):
cls.vw = helpers.getTestWorkspace('windows', 'i386', 'helloworld.exe')
def test_overlap(self):
cols, retn = v_reports.runReportModule(self.vw, 'vivisect.reports.overlaplocs')
self.assertEqual(cols, (("Overlap Size", int),
("This Location", str),
("Other Location", str)))
self.assertGreater(len(retn), 0)
for va, meta in retn.items():
size, baserepr, othrrepr = meta
self.assertGreater(size, 0)
self.assertNotEqual(baserepr, '')
self.assertNotEqual(othrrepr, '')
def test_undef(self):
cols, retn = v_reports.runReportModule(self.vw, 'vivisect.reports.undeftargets')
self.assertEqual(cols, (("Bytes", str),
("Name", str)))
self.assertGreater(len(retn), 0)
for va, undef in retn.items():
byts, mesg = undef
self.assertIsNone(self.vw.getLocation(va))
self.assertGreater(len(byts), 0)
self.assertGreater(len(mesg), 0)
def test_locationdist(self):
cols, retn = v_reports.runReportModule(self.vw, 'vivisect.reports.locationdist')
self.assertEqual(cols, (("Location Type", str),
("Instance Count", int),
("Size (bytes)", int),
("Size (percent)", int)))
self.assertEqual(retn, self.vw.getLocationDistribution())
def test_funcomp(self):
cols, retn = v_reports.runReportModule(self.vw, 'vivisect.reports.funccomplexity')
self.assertEqual(cols, (("Code Blocks", int),
("Mnem Dist", int)))
vw = self.vw
self.assertGreater(len(retn), 0)
for fva, comp in retn.items():
blks, mdist = comp
self.assertEqual(blks, len(vw.getFunctionBlocks(fva)))
self.assertEqual(mdist, vw.getFunctionMeta(fva, 'MnemDist', -1))
|
more_autograding_examples/python_random_input_output/submissions/solution.py | elihschiff/Submitty | 411 | 12622335 | <filename>more_autograding_examples/python_random_input_output/submissions/solution.py<gh_stars>100-1000
import sys
def percent_change(old,new):
return int(100*(float(new)-old)/old)
def print_change(old1, new1, old2, new2):
p1 = percent_change(old1,new1)
p2 = percent_change(old2,new2)
print (p1, "vs", p2)
print ("#icebucketchallenge vs #alsicebucketchallenge, percentage change")
print_change(200,500,100,300)
print_change(500,2000,300,1500)
print_change(2000,12000,1500,13000)
print_change(12000,24000,13000,25000)
print_change(24000,65000,25000,105000)
print_change(65000,70000,105000,85000)
# read the last test case from an input file (if provided)
if (len(sys.argv) > 1):
inputfile=sys.argv[1]
f=open(inputfile,"r")
contents=f.read().split(",")
o1=int(contents[0],10)
n1=int(contents[1],10)
o2=int(contents[2],10)
n2=int(contents[3],10)
print_change(o1,n1,o2,n2)
|
settings.py | mdgrotheer/twitter-intelligence | 202 | 12622342 | GOOGLE_MAP_API_KEY = 'YOUR_API_KEY'
PORT = 5000
|
tests/unit/test_objector.py | nickatnight/praw | 2,360 | 12622359 | <filename>tests/unit/test_objector.py
import pytest
from praw.exceptions import ClientException, RedditAPIException
from . import UnitTest
class TestObjector(UnitTest):
def test_objectify_returns_None_for_None(self):
assert self.reddit._objector.objectify(None) is None
def test_parse_error(self):
objector = self.reddit._objector
assert objector.parse_error({}) is None
assert objector.parse_error([]) is None
assert objector.parse_error({"asdf": 1}) is None
error_response = {
"json": {"errors": [["USER_REQUIRED", "Please log in to do that.", None]]}
}
error = objector.parse_error(error_response)
assert isinstance(error, RedditAPIException)
error_response = {"json": {"errors": []}}
with pytest.raises(ClientException):
objector.parse_error(error_response)
error_response = {
"json": {
"errors": [
["USER_REQUIRED", "Please log in to do that.", None],
["NO_SUBJECT", "please enter a subject", "subject"],
]
}
}
assert isinstance(objector.parse_error(error_response), RedditAPIException)
def test_check_error(self):
objector = self.reddit._objector
objector.check_error({"asdf": 1})
error_response = {
"json": {"errors": [["USER_REQUIRED", "Please log in to do that.", None]]}
}
with pytest.raises(RedditAPIException):
objector.check_error(error_response)
|
perma_web/perma/migrations/0008_auto_20160602_1911.py | rachelaus/perma | 317 | 12622362 | <reponame>rachelaus/perma<gh_stars>100-1000
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def set_registrar_status(apps, schema_editor):
Registrar = apps.get_model('perma', 'Registrar')
Registrar.objects.filter(is_approved=True).update(status='approved')
Registrar.objects.filter(is_approved=False).update(status='pending')
HistoricalRegistrar = apps.get_model('perma','HistoricalRegistrar')
HistoricalRegistrar.objects.filter(is_approved=True).update(status='approved')
HistoricalRegistrar.objects.filter(is_approved=False).update(status='pending')
class Migration(migrations.Migration):
dependencies = [
('perma', '0007_auto_20160527_1625'),
]
operations = [
migrations.AddField(
model_name='historicalregistrar',
name='status',
field=models.CharField(default=b'pending', max_length=20, choices=[(b'pending', b'pending'), (b'approved', b'approved'), (b'denied', b'denied')]),
),
migrations.AddField(
model_name='registrar',
name='status',
field=models.CharField(default=b'pending', max_length=20, choices=[(b'pending', b'pending'), (b'approved', b'approved'), (b'denied', b'denied')]),
),
migrations.RunPython(set_registrar_status),
migrations.RemoveField(
model_name='historicalregistrar',
name='is_approved',
),
migrations.RemoveField(
model_name='registrar',
name='is_approved',
),
]
|
annotation-vocab/tools/vocab_tester.py | ziransun/wpt | 2,479 | 12622364 |
# Author: <NAME> (<EMAIL>)
# License: Apache2
# Last Modified: 2016-09-02
from __future__ import print_function
import json
from rdflib import ConjunctiveGraph, URIRef
from pyld import jsonld
from pyld.jsonld import compact, expand, frame, from_rdf, to_rdf, JsonLdProcessor
import urllib
# Stop code from looking up the contexts online for every operation
docCache = {}
def fetch(url):
fh = urllib.urlopen(url)
data = fh.read()
fh.close()
return data
def load_document_and_cache(url):
if docCache.has_key(url):
return docCache[url]
doc = {
'contextUrl': None,
'documentUrl': None,
'document': ''
}
data = fetch(url)
doc['document'] = data;
docCache[url] = doc
return doc
jsonld.set_document_loader(load_document_and_cache)
class Validator(object):
def __init__(self):
self.rdflib_class_map = {
"Annotation": "oa:Annotation",
"Dataset": "dctypes:Dataset",
"Image": "dctypes:StillImage",
"Video": "dctypes:MovingImage",
"Audio": "dctypes:Sound",
"Text": "dctypes:Text",
"TextualBody": "oa:TextualBody",
"ResourceSelection": "oa:ResourceSelection",
"SpecificResource": "oa:SpecificResource",
"FragmentSelector": "oa:FragmentSelector",
"CssSelector": "oa:CssSelector",
"XPathSelector": "oa:XPathSelector",
"TextQuoteSelector": "oa:TextQuoteSelector",
"TextPositionSelector": "oa:TextPositionSelector",
"DataPositionSelector": "oa:DataPositionSelector",
"SvgSelector": "oa:SvgSelector",
"RangeSelector": "oa:RangeSelector",
"TimeState": "oa:TimeState",
"HttpState": "oa:HttpRequestState",
"CssStylesheet": "oa:CssStyle",
"Choice": "oa:Choice",
"Composite": "oa:Composite",
"List": "oa:List",
"Independents": "oa:Independents",
"Person": "foaf:Person",
"Software": "as:Application",
"Organization": "foaf:Organization",
"AnnotationCollection": "as:OrderedCollection",
"AnnotationPage": "as:OrderedCollectionPage",
"Audience": "schema:Audience"
}
def _clean_bnode_ids(self, js):
new = {}
for (k,v) in js.items():
if k == 'id' and v.startswith("_:"):
continue
elif type(v) == dict:
# recurse
res = self._clean_bnode_ids(v)
new[k] = res
else:
new[k] = v
return new
def _mk_rdflib_jsonld(self, js):
# rdflib's json-ld implementation sucks
# Pre-process to make it work
# recurse the structure looking for types, and replacing them.
new = {}
for (k,v) in js.items():
if k == 'type':
if type(v) == list:
nl = []
for i in v:
if self.rdflib_class_map.has_key(i):
nl.append(self.rdflib_class_map[i])
new['type'] = nl
else:
if self.rdflib_class_map.has_key(v):
new['type'] = self.rdflib_class_map[v]
elif type(v) == dict:
# recurse
res = self._mk_rdflib_jsonld(v)
new[k] = res
else:
new[k] = v
return new
def json_to_rdf(self, js, fmt=None):
d2 = self._mk_rdflib_jsonld(js)
js = json.dumps(d2)
g = ConjunctiveGraph()
g.parse(data=js, format='json-ld')
if fmt:
out = g.serialize(format=fmt)
return out
else:
return g
def rdf_to_jsonld(self, rdf, fmt):
g = ConjunctiveGraph()
g.parse(data=rdf, format=fmt)
out = g.serialize(format='json-ld')
j2 = json.loads(out)
j2 = {"@context": context_js, "@graph": j2}
framed = frame(j2, frame_js)
out = compact(framed, context_js)
# recursively clean blank node ids
#out = self._clean_bnode_ids(out)
return out
def compact_and_clean(self, js):
newjs = compact(js, context_js)
newjs['@context'] = context
if newjs.has_key("@graph"):
for k,v in newjs['@graph'].items():
newjs[k] = v
del newjs['@graph']
return newjs
validator = Validator()
example = "https://raw.githubusercontent.com/w3c/web-annotation/gh-pages/model/wd2/examples/correct/anno4.json"
example_ttl = "https://raw.githubusercontent.com/w3c/web-annotation/gh-pages/vocab/wd/examples/correct/anno1.ttl"
context = "http://www.w3.org/ns/anno.jsonld"
frameURI = "https://raw.githubusercontent.com/w3c/web-annotation/gh-pages/jsonld/annotation_frame.jsonld"
# ontology = "https://www.w3.org/ns/oa.ttl"
ontology = "https://raw.githubusercontent.com/w3c/web-annotation/gh-pages/vocab/wd/ontology/oa.ttl"
data = fetch(context)
context_js = json.loads(data)
data = fetch(example)
example_js = json.loads(data)
data = fetch(frameURI)
frame_js = json.loads(data)
# Test1: JSON-LD context document can be parsed without errors by JSON-LD validators
# Context document is parsable if it can be loaded and used to expand the example
try:
expanded = expand(example_js, context_js)
except:
print("Context is invalid, failed Test 1")
# Test2: JSON-LD context document can be used to convert JSON-LD serialized Annotations into RDF triples.
try:
jsonld_nq = to_rdf(example_js, {"base": "http://example.org/", "format": "application/nquads"})
except:
print("Cannot use context to convert JSON-LD to NQuads")
# Test3: Graphs produced are isomorphic
try:
rl_g = validator.json_to_rdf(example_js)
g = ConjunctiveGraph()
js_g = g.parse(data=jsonld_nq, format="nt")
rl_g_nq = rl_g.serialize(format="nquads")
assert(len(rl_g.store) == len(js_g.store))
assert(rl_g.isomorphic(js_g))
except:
print("Different triples from two parsers, or non-isomorphic graphs")
# Test4: The graphs produced can be converted back into JSON-LD without loss of information
try:
js = validator.rdf_to_jsonld(jsonld_nq, "nt")
js2 = validator.compact_and_clean(js)
assert(js2 == example_js)
except:
print("Failed to recompact parsed data")
raise
# Test5: ontology documents can be parsed without errors by validators
try:
g = ConjunctiveGraph().parse(ontology, format="turtle")
except:
raise
# Test6: ontology is internally consistent with respect to domains, ranges, etc
# step 1: find all the classes.
rdftype = URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#type")
rdfsdomain = URIRef("http://www.w3.org/2000/01/rdf-schema#domain")
rdfsrange = URIRef("http://www.w3.org/2000/01/rdf-schema#range")
rdfsresource = URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#Resource")
rdfssco = URIRef("http://www.w3.org/2000/01/rdf-schema#subClassOf")
asColl = URIRef("http://www.w3.org/ns/activitystreams#OrderedCollection")
skosConcept = URIRef("http://www.w3.org/2004/02/skos/core#Concept")
otherClasses = [asColl, skosConcept]
classes = list(g.subjects(rdftype, URIRef("http://www.w3.org/2000/01/rdf-schema#Class")))
props = list(g.subjects(rdftype, URIRef("http://www.w3.org/1999/02/22-rdf-syntax-ns#Property")))
for p in props:
domains = list(g.objects(p, rdfsdomain))
for d in domains:
assert(d in classes)
for p in props:
ranges = list(g.objects(p, rdfsrange))
for r in ranges:
if not r in classes and not str(r).startswith("http://www.w3.org/2001/XMLSchema#") and \
not r == rdfsresource:
print("Found inconsistent property: %s has unknown range" % p)
for c in classes:
parents = list(g.objects(c, rdfssco))
for p in parents:
if not p in classes and not p in otherClasses:
print("Found inconsistent class: %s has unknown superClass" % c)
print("Done.")
|
lbry/wallet/server/db/__init__.py | nishp77/lbry-sdk | 4,996 | 12622370 | <filename>lbry/wallet/server/db/__init__.py
import enum
@enum.unique
class DB_PREFIXES(enum.Enum):
claim_to_support = b'K'
support_to_claim = b'L'
claim_to_txo = b'E'
txo_to_claim = b'G'
claim_to_channel = b'I'
channel_to_claim = b'J'
claim_short_id_prefix = b'F'
effective_amount = b'D'
claim_expiration = b'O'
claim_takeover = b'P'
pending_activation = b'Q'
activated_claim_and_support = b'R'
active_amount = b'S'
repost = b'V'
reposted_claim = b'W'
undo = b'M'
claim_diff = b'Y'
tx = b'B'
block_hash = b'C'
header = b'H'
tx_num = b'N'
tx_count = b'T'
tx_hash = b'X'
utxo = b'u'
hashx_utxo = b'h'
hashx_history = b'x'
db_state = b's'
channel_count = b'Z'
support_amount = b'a'
block_txs = b'b'
|
src/webdav/__init__.py | rbanffy/Zope | 289 | 12622381 | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""The webdav package provides WebDAV capability for common Zope objects.
Current WebDAV support in Zope provides for the correct handling of HTTP
GET, HEAD, POST, PUT, DELETE, OPTIONS, TRACE, PROPFIND, PROPPATCH, MKCOL,
COPY and MOVE methods, as appropriate for the object that is the target
of the operation. Objects which do not support a given operation should
respond appropriately with a "405 Method Not Allowed" response.
Note that the ability of a Zope installation to support WebDAV HTTP methods
depends on the willingness of the web server to defer handling of those
methods to the Zope process. In most cases, servers will allow the process
to handle any request, so the Zope portion of your url namespace may well
be able to handle WebDAV operations even though your web server software
is not WebDAV-aware itself. Zope installations which use bundled server
implementations such as ZopeHTTPServer or ZServer should fully support
WebDAV functions.
References:
[WebDAV] <NAME>, <NAME>, Jr., <NAME>, <NAME>, D.
Jensen, "HTTP Extensions for Distributed Authoring - WebDAV." RFC 2518.
Microsoft, U.C. Irvine, Netscape, Novell. February, 1999."""
enable_ms_public_header = False
|
python/fcdd/runners/argparse_configs.py | kyungmin96/myfcdd | 152 | 12622388 | import os.path as pt
from argparse import ArgumentParser
import numpy as np
from fcdd.datasets import DS_CHOICES, PREPROC_CHOICES
from fcdd.datasets.noise_modes import MODES
from fcdd.models import choices
from fcdd.training.setup import OBJECTIVES, SUPERVISE_MODES
class DefaultConfig(object):
def __call__(self, parser: ArgumentParser) -> ArgumentParser:
"""
Defines all the arguments for running an FCDD experiment.
:param parser: instance of an ArgumentParser.
:return: the parser with added arguments
"""
# define directories for datasets and logging
parser.add_argument(
'--logdir', type=str, default=pt.join('..', '..', 'data', 'results', 'fcdd_{t}'),
help='Directory where log data is to be stored. The pattern {t} is replaced by the start time. '
'Defaults to ../../data/results/fcdd_{t}. '
)
parser.add_argument(
'--logdir-suffix', type=str, default='',
help='String suffix for log directory, again {t} is replaced by the start time. '
)
parser.add_argument(
'--datadir', type=str, default=pt.join('..', '..', 'data', 'datasets'),
help='Directory where datasets are found or to be downloaded to. Defaults to ../../data/datasets.',
)
parser.add_argument(
'--viz-ids', type=str, default=None,
help='Directory that contains log data of an old experiment. '
'When given, in addition to the usual log data and heatmaps, the training produces heatmaps for the '
'same images that have been logged in the according seeds and class runs found in the directory.'
)
parser.add_argument(
'--readme', type=str, default='',
help='Some notes to be stored in the automatically created config.txt configuration file.'
)
# training parameters
parser.add_argument(
'--objective', type=str, default='fcdd', choices=OBJECTIVES,
help='Chooses the objective to run explanation baseline experiments. Defaults to FCDD.'
)
parser.add_argument('-b', '--batch-size', type=int, default=128)
parser.add_argument('-e', '--epochs', type=int, default=200)
parser.add_argument('-w', '--workers', type=int, default=4)
parser.add_argument('-lr', '--learning_rate', type=float, default=1e-3)
parser.add_argument('-wd', '--weight-decay', type=float, default=1e-6)
parser.add_argument(
'--optimizer-type', type=str, default='sgd', choices=['sgd', 'adam'],
help='The type of optimizer. Defaults to "sgd". '
)
parser.add_argument(
'--scheduler-type', type=str, default='lambda', choices=['lambda', 'milestones'],
help='The type of learning rate scheduler. Either "lambda", which reduces the learning rate each epoch '
'by a certain factor, or "milestones", which sets the learning rate to certain values at certain '
'epochs. Defaults to "lambda"'
)
parser.add_argument(
'--lr-sched-param', type=float, nargs='*', default=[0.985],
help='Sequence of learning rate scheduler parameters. '
'For the "lambda" scheduler, just one parameter is allowed, '
'which sets the factor the learning rate is reduced per epoch. '
'For the "milestones" scheduler, at least two parameters are needed, '
'the first determining the factor by which the learning rate is reduced at each milestone, '
'and the others being each a milestone. For instance, "0.1 100 200 300" reduces the learning rate '
'by 0.1 at epoch 100, 200, and 300. '
)
parser.add_argument(
'--load', type=str, default=None,
help='Path to a file that contains a snapshot of the network model. '
'When given, the network loads the found weights and state of the training. '
'If epochs are left to be trained, the training is continued. '
'Note that only one snapshot is given, thus using a runner that trains for multiple different classes '
'to be nominal is not applicable. '
)
parser.add_argument('-d', '--dataset', type=str, default='custom', choices=DS_CHOICES)
parser.add_argument(
'-n', '--net', type=str, default='FCDD_CNN224_VGG_F', choices=choices(),
help='Chooses a network architecture to train. Note that not all architectures fit every objective. '
)
parser.add_argument(
'--preproc', type=str, default='aug1', choices=PREPROC_CHOICES,
help='Determines the kind of preprocessing pipeline (augmentations and such). '
'Have a look at the code (dataset implementation, e.g. fcdd.datasets.cifar.py) for details.'
)
parser.add_argument(
'--acc-batches', type=int, default=1,
help='To speed up data loading, '
'this determines the number of batches that are accumulated to be used for training. '
'For instance, acc_batches=2 iterates the data loader two times, concatenates the batches, and '
'passes the result to the further training procedure. This has no impact on the performance '
'if the batch size is reduced accordingly (e.g. one half in this example), '
'but can decrease training time. '
)
parser.add_argument('--no-bias', dest='bias', action='store_false', help='Uses no bias in network layers.')
parser.add_argument('--cpu', dest='cuda', action='store_false', help='Trains on CPU only.')
# artificial anomaly settings
parser.add_argument(
'--supervise-mode', type=str, default='noise', choices=SUPERVISE_MODES,
help='This determines the kind of artificial anomalies. '
'"unsupervised" uses no anomalies at all. '
'"other" uses ground-truth anomalies. '
'"noise" uses pure noise images or Outlier Exposure. '
'"malformed_normal" adds noise to nominal images to create malformed nominal anomalies. '
'"malformed_normal_gt" is like malformed_normal, but with ground-truth anomaly heatmaps for training. '
)
parser.add_argument(
'--noise-mode', type=str, default='imagenet22k', choices=MODES,
help='The type of noise used when artificial anomalies are activated. Dataset names refer to OE. '
'See fcdd.datasets.noise_modes.py.'
)
parser.add_argument(
'--oe-limit', type=int, default=np.infty,
help='Determines the amount of different samples used for Outlier Exposure. '
'Has no impact on synthetic anomalies.'
)
parser.add_argument(
'--offline-supervision', dest='online_supervision', action='store_false',
help='Instead of sampling artificial anomalies during training by having a 50%% chance to '
'replace nominal samples, this mode samples them once at the start of the training and adds them to '
'the training set. '
'This yields less performance and higher RAM utilization, but reduces the training time. '
)
parser.add_argument(
'--nominal-label', type=int, default=0,
help='Determines the label that marks nominal samples. '
'Note that this is not the class that is considered nominal! '
'For instance, class 5 is the nominal class, which is labeled with the nominal label 0.'
)
# heatmap generation parameters
parser.add_argument(
'--blur-heatmaps', dest='blur_heatmaps', action='store_true',
help='Blurs heatmaps, like done for the explanation baseline experiments in the paper.'
)
parser.add_argument(
'--gauss-std', type=float, default=10,
help='Sets a constant value for the standard deviation of the Gaussian kernel used for upsampling and '
'blurring.'
)
parser.add_argument(
'--quantile', type=float, default=0.97,
help='The quantile that is used to normalize the generated heatmap images. '
'This is explained in the Appendix of the paper.'
)
parser.add_argument(
'--resdown', type=int, default=64,
help='Sets the maximum resolution of logged images (per heatmap), images will be downsampled '
'if they exceed this threshold. For instance, resdown=64 makes every image of heatmaps contain '
'individual heatmaps and inputs of width 64 and height 64 at most.'
)
parser.set_defaults(cuda=True, bias=True, blur_heatmaps=False, online_supervision=True)
return parser
class DefaultFmnistConfig(DefaultConfig):
def __call__(self, parser: ArgumentParser) -> ArgumentParser:
parser = super().__call__(parser)
parser.set_defaults(
batch_size=128, epochs=400, learning_rate=1e-2,
weight_decay=1e-6, lr_sched_param=[0.98], dataset='fmnist',
net='FCDD_CNN28_W', quantile=0.85, noise_mode='cifar100',
preproc='lcnaug1', gauss_std=1.2,
)
return parser
class DefaultCifar10Config(DefaultConfig):
def __call__(self, parser: ArgumentParser) -> ArgumentParser:
parser = super().__call__(parser)
parser.set_defaults(
batch_size=20, acc_batches=10, epochs=600,
optimizer_type='adam', scheduler_type='milestones',
lr_sched_param=[0.1, 400, 500], dataset='cifar10',
net='FCDD_CNN32_LW3K', quantile=0.85,
noise_mode='cifar100', gauss_std=1.2,
)
return parser
class DefaultMvtecConfig(DefaultConfig):
def __call__(self, parser: ArgumentParser) -> ArgumentParser:
parser = super().__call__(parser)
parser.set_defaults(
batch_size=16, acc_batches=8, supervise_mode='malformed_normal',
gauss_std=12, weight_decay=1e-4, epochs=200, preproc='lcnaug1',
quantile=0.99, net='FCDD_CNN224_VGG_F', dataset='mvtec', noise_mode='confetti'
)
return parser
class DefaultImagenetConfig(DefaultConfig):
def __call__(self, parser: ArgumentParser) -> ArgumentParser:
parser = super().__call__(parser)
parser.set_defaults(
batch_size=20, acc_batches=10, epochs=600,
optimizer_type='adam', scheduler_type='milestones',
lr_sched_param=[0.1, 400, 500], noise_mode='imagenet22k',
dataset='imagenet', gauss_std=8, net='FCDD_CNN224_VGG_NOPT'
)
return parser
class DefaultPascalvocConfig(DefaultConfig):
def __call__(self, parser: ArgumentParser):
parser = super().__call__(parser)
parser.set_defaults(
batch_size=20, acc_batches=10, epochs=600,
optimizer_type='adam', scheduler_type='milestones', lr_sched_param=[0.1, 400, 500],
dataset='pascalvoc', noise_mode='imagenet', net='FCDD_CNN224_VGG_NOPT',
nominal_label=1, gauss_std=8, quantile=0.99,
)
return parser
|
test/test_seed_schema.py | aviv-julienjehannet/GenSON | 377 | 12622396 | <reponame>aviv-julienjehannet/GenSON<filename>test/test_seed_schema.py
from . import base
class TestSeedTuple(base.SchemaNodeTestCase):
def test_tuple(self):
self.add_schema({'type': 'array', 'items': []})
self.add_object([None])
self.assertResult({'type': 'array', 'items': [{'type': 'null'}]})
class TestPatternProperties(base.SchemaNodeTestCase):
def test_single_pattern(self):
self.add_schema({'type': 'object', 'patternProperties': {
r'^\d$': None}})
self.add_object({'0': 0, '1': 1, '2': 2})
self.assertResult({'type': 'object', 'patternProperties': {
r'^\d$': {'type': 'integer'}}})
def test_multi_pattern(self):
self.add_schema({'type': 'object', 'patternProperties': {
r'^\d$': None,
r'^[a-z]$': None}})
self.add_object({'0': 0, '1': 1, 'a': True, 'b': False})
self.assertResult({'type': 'object', 'patternProperties': {
r'^\d$': {'type': 'integer'},
r'^[a-z]$': {'type': 'boolean'}}})
def test_multi_pattern_multi_object(self):
self.add_schema({'type': 'object', 'patternProperties': {
r'^\d$': None,
r'^[a-z]$': None}})
self.add_object({'0': 0})
self.add_object({'1': 1})
self.add_object({'a': True})
self.add_object({'b': False})
self.assertResult({'type': 'object', 'patternProperties': {
r'^\d$': {'type': 'integer'},
r'^[a-z]$': {'type': 'boolean'}}})
def test_existing_schema(self):
self.add_schema({'type': 'object', 'patternProperties': {
r'^\d$': {'type': 'boolean'}}})
self.add_object({'0': 0, '1': 1, '2': 2})
self.assertResult({'type': 'object', 'patternProperties': {
r'^\d$': {'type': ['boolean', 'integer']}}})
def test_prefers_existing_properties(self):
self.add_schema({'type': 'object',
'properties': {'0': None},
'patternProperties': {r'^\d$': None}})
self.add_object({'0': 0, '1': 1, '2': 2})
self.assertResult({'type': 'object',
'properties': {'0': {'type': 'integer'}},
'patternProperties': {r'^\d$': {'type': 'integer'}},
'required': ['0']})
def test_keeps_unrecognized_properties(self):
self.add_schema({'type': 'object',
'patternProperties': {r'^\d$': None}})
self.add_object({'0': 0, '1': 1, '2': 2, 'a': True})
self.assertResult({'type': 'object',
'properties': {'a': {'type': 'boolean'}},
'patternProperties': {r'^\d$': {'type': 'integer'}},
'required': ['a']})
|
gunnery/account/urls.py | timgates42/gunnery | 314 | 12622425 | from django.conf.urls import patterns, url
from views import modal_permissions, profile_page
urlpatterns = patterns('',
url(r'^account/profile/(?P<user_id>[\d]+)/$', profile_page, name='profile'),
url(r'^account/login/$', 'django.contrib.auth.views.login', {'template_name': 'page/login.html'}),
url(r'^account/logout/$', 'django.contrib.auth.views.logout_then_login', name='logout'),
url(r'^account/password_reset/$', 'django.contrib.auth.views.password_reset', name='password_reset'),
url(r'^account/password_reset_done$', 'django.contrib.auth.views.password_reset_done',
name='password_reset_done'),
url(r'^account/password_reset_confirm/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>.+)/$',
'django.contrib.auth.views.password_reset_confirm', name='password_reset_confirm'),
url(r'^account/password_reset_complete$', 'django.contrib.auth.views.password_reset_complete',
name='password_reset_complete'),
url(r'^modal/permissions/(?P<group_id>[\d]+)/$', modal_permissions, name='modal_permissions'),
) |
brainstorm/hooks.py | PyCN/brainstorm | 1,473 | 12622435 | #!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals
import math
import signal
import sys
from collections import OrderedDict
import h5py
import numpy as np
from six import string_types
from brainstorm.describable import Describable
from brainstorm import optional
from brainstorm.structure.network import Network
from brainstorm.tools import evaluate
from brainstorm.utils import get_by_path, progress_bar, get_brainstorm_info
class Hook(Describable):
__undescribed__ = {
'__name__', # the name is saved in the trainer
'run_verbosity'
}
__default_values__ = {
'timescale': 'epoch',
'interval': 1,
'verbose': None
}
def __init__(self, name=None, timescale='epoch', interval=1, verbose=None):
self.timescale = timescale
self.interval = interval
self.__name__ = name or self.__class__.__name__
self.priority = 0
self.verbose = verbose
self.run_verbosity = None
def start(self, net, stepper, verbose, named_data_iters):
if self.verbose is None:
self.run_verbosity = verbose
else:
self.run_verbosity = self.verbose
def message(self, msg):
"""Print an output message if :attr:`run_verbosity` is True."""
if self.run_verbosity:
print("{} >> {}".format(self.__name__, msg))
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
pass
# -------------------------------- Saviors ---------------------------------- #
class SaveBestNetwork(Hook):
"""
Check to see if the specified log entry is at it's best value and if so,
save the network to a specified file.
Can save the network when the log entry is at its minimum (such as an
error) or maximum (such as accuracy) according to the ``criterion``
argument.
The ``timescale`` and ``interval`` should be the same as those for the
monitoring hook which logs the quantity of interest.
Args:
log_name:
Name of the log entry to be checked for improvement.
It should be in the form <monitorname>.<log_name> where log_name
itself may be a nested dictionary key in dotted notation.
filename:
Name of the HDF5 file to which the network should be saved.
criterion:
Indicates whether training should be stopped when the log entry is
at its minimum or maximum value. Must be either 'min' or 'max'.
Defaults to 'min'.
name (Optional[str]):
Name of this monitor. This name is used as a key in the trainer
logs. Default is 'SaveBestNetwork'.
timescale (Optional[str]):
Specifies whether the Monitor should be called after each epoch or
after each update. Default is 'epoch'.
interval (Optional[int]):
This monitor should be called every ``interval`` epochs/updates.
Default is 1.
verbose: bool, optional
Specifies whether the logs of this monitor should be printed, and
acts as a fallback verbosity for the used data iterator.
If not set it defaults to the verbosity setting of the trainer.
Examples:
Add a hook to monitor a quantity of interest:
>>> scorer = bs.scorers.Accuracy()
>>> trainer.add_hook(bs.hooks.MonitorScores('valid_getter', [scorer],
... name='validation'))
Check every epoch and save the network if validation accuracy rises:
>>> trainer.add_hook(bs.hooks.SaveBestNetwork('validation.Accuracy',
... filename='best_acc.h5',
... criterion='max'))
Check every epoch and save the network if validation loss drops:
>>> trainer.add_hook(bs.hooks.SaveBestNetwork('validation.total_loss',
... filename='best_loss.h5',
... criterion='min'))
"""
__undescribed__ = {'parameters': None}
__default_values__ = {'filename': None}
def __init__(self, log_name, filename=None, criterion='max', name=None,
timescale='epoch', interval=1, verbose=None):
super(SaveBestNetwork, self).__init__(name, timescale,
interval, verbose)
self.log_name = log_name
self.filename = filename
self.best_parameters = None
assert criterion == 'min' or criterion == 'max'
self.best_so_far = np.inf if criterion == 'min' else -np.inf
self.best_t = None
self.criterion = criterion
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
if epoch_nr == 0:
try:
e = get_by_path(logs, self.log_name)
except KeyError:
return
e = get_by_path(logs, self.log_name)
last = e[-1]
if self.criterion == 'min':
imp = last < self.best_so_far
else:
imp = last > self.best_so_far
if imp:
self.best_so_far = last
self.best_t = epoch_nr if self.timescale == 'epoch' else update_nr
params = net.get('parameters')
if self.filename is not None:
self.message("{} improved (criterion: {}). Saving network to "
"{}".format(self.log_name, self.criterion,
self.filename))
net.save_as_hdf5(self.filename)
else:
self.message("{} improved (criterion: {}). Caching parameters".
format(self.log_name, self.criterion))
self.parameters = params
else:
self.message("Last saved parameters at {} {} when {} was {}".
format(self.timescale, self.best_t, self.log_name,
self.best_so_far))
def load_best_network(self):
return Network.from_hdf5(self.filename) if self.filename is not None \
else self.parameters
class SaveLogs(Hook):
"""
Periodically Save the trainer logs dictionary to an HDF5 file.
Default behavior is to save every epoch.
"""
def __init__(self, filename, name=None, timescale='epoch', interval=1):
super(SaveLogs, self).__init__(name, timescale, interval)
self.filename = filename
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
with h5py.File(self.filename, 'w') as f:
f.attrs.create('info', get_brainstorm_info())
f.attrs.create('format', b'Logs file v1.0')
SaveLogs._save_recursively(f, logs)
@staticmethod
def _save_recursively(group, logs):
for name, log in logs.items():
if isinstance(log, dict):
subgroup = group.create_group(name)
SaveLogs._save_recursively(subgroup, log)
else:
group.create_dataset(name, data=np.array(log))
class SaveNetwork(Hook):
"""
Periodically save the weights of the network to the given file.
Default behavior is to save the network after every training epoch.
"""
def __init__(self, filename, name=None, timescale='epoch', interval=1):
super(SaveNetwork, self).__init__(name, timescale, interval)
self.filename = filename
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
net.save_as_hdf5(self.filename)
def load_network(self):
return Network.from_hdf5(self.filename)
# -------------------------------- Monitors --------------------------------- #
class MonitorLayerDeltas(Hook):
"""
Monitor some statistics about all the deltas of a layer.
"""
def __init__(self, layer_name, name=None, timescale='epoch', interval=1,
verbose=None):
if name is None:
name = "MonitorDeltas_{}".format(layer_name)
super(MonitorLayerDeltas, self).__init__(name, timescale,
interval, verbose)
self.layer_name = layer_name
def start(self, net, stepper, verbose, named_data_iters):
assert self.layer_name in net.layers.keys(), \
"{} >> No layer named {} present in network. Available layers " \
"are {}.".format(self.__name__, self.layer_name, net.layers.keys())
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
log = OrderedDict()
for key, v in net.buffer[self.layer_name].internals.items():
v = net.handler.get_numpy_copy(v)
log[key] = OrderedDict()
log[key]['min'] = v.min()
log[key]['avg'] = v.mean()
log[key]['max'] = v.max()
out_deltas_log = log['output_deltas'] = OrderedDict()
for key, v in net.buffer[self.layer_name].output_deltas.items():
v = net.handler.get_numpy_copy(v)
key_log = out_deltas_log[key] = OrderedDict()
key_log['min'] = v.min()
key_log['avg'] = v.mean()
key_log['max'] = v.max()
in_deltas_log = log['input_deltas'] = OrderedDict()
for key, v in net.buffer[self.layer_name].input_deltas.items():
key_log = in_deltas_log[key] = OrderedDict()
v = net.handler.get_numpy_copy(v)
key_log[key]['min'] = v.min()
key_log[key]['avg'] = v.mean()
key_log[key]['max'] = v.max()
return log
class MonitorLayerGradients(Hook):
"""
Monitor some statistics about all the gradients of a layer.
"""
def __init__(self, layer_name, name=None, timescale='epoch', interval=1,
verbose=None):
if name is None:
name = "MonitorGradients_{}".format(layer_name)
super(MonitorLayerGradients, self).__init__(name, timescale,
interval, verbose)
self.layer_name = layer_name
def start(self, net, stepper, verbose, named_data_iters):
assert self.layer_name in net.layers.keys(), \
"{} >> No layer named {} present in network. Available layers " \
"are {}.".format(self.__name__, self.layer_name, net.layers.keys())
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
log = OrderedDict()
for key, v in net.buffer[self.layer_name].gradients.items():
v = net.handler.get_numpy_copy(v)
log[key] = OrderedDict()
log[key]['min'] = v.min()
log[key]['avg'] = v.mean()
log[key]['max'] = v.max()
return log
class MonitorLayerInOuts(Hook):
"""
Monitor some statistics about all the inputs and outputs of a layer.
"""
def __init__(self, layer_name, name=None, timescale='epoch', interval=1,
verbose=None):
if name is None:
name = "MonitorInOuts_{}".format(layer_name)
super(MonitorLayerInOuts, self).__init__(name, timescale,
interval, verbose)
self.layer_name = layer_name
def start(self, net, stepper, verbose, named_data_iters):
assert self.layer_name in net.layers.keys(), \
"{} >> No layer named {} present in network. Available layers " \
"are {}.".format(self.__name__, self.layer_name, net.layers.keys())
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
log = OrderedDict()
input_log = log['inputs'] = OrderedDict()
for key, v in net.buffer[self.layer_name].inputs.items():
v = net.handler.get_numpy_copy(v)
key_log = input_log[key] = OrderedDict()
key_log['min'] = v.min()
key_log['avg'] = v.mean()
key_log['max'] = v.max()
output_log = log['outputs'] = OrderedDict()
for key, v in net.buffer[self.layer_name].outputs.items():
key_log = output_log[key] = OrderedDict()
v = net.handler.get_numpy_copy(v)
key_log['min'] = v.min()
key_log['avg'] = v.mean()
key_log['max'] = v.max()
return log
class MonitorLayerParameters(Hook):
"""
Monitor some statistics about all the parameters of a layer.
"""
def __init__(self, layer_name, name=None, timescale='epoch', interval=1,
verbose=None):
if name is None:
name = "MonitorParameters_{}".format(layer_name)
super(MonitorLayerParameters, self).__init__(name, timescale,
interval, verbose)
self.layer_name = layer_name
def start(self, net, stepper, verbose, named_data_iters):
assert self.layer_name in net.layers.keys(), \
"{} >> No layer named {} present in network. Available layers " \
"are {}.".format(self.__name__, self.layer_name, net.layers.keys())
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
log = OrderedDict()
for key, v in net.buffer[self.layer_name].parameters.items():
v = net.handler.get_numpy_copy(v)
log[key] = OrderedDict()
log[key]['min'] = v.min()
log[key]['avg'] = v.mean()
log[key]['max'] = v.max()
if len(v.shape) > 1:
log[key]['min_L2_norm'] = np.sqrt(np.sum(v ** 2, axis=1)).min()
log[key]['avg_L2_norm'] = np.sqrt(np.sum(v ** 2,
axis=1)).mean()
log[key]['max_L2_norm'] = np.sqrt(np.sum(v ** 2, axis=1)).max()
return log
class MonitorLoss(Hook):
"""
Monitor the losses computed by the network on a dataset using a given data
iterator.
"""
def __init__(self, iter_name, name=None, timescale='epoch', interval=1,
verbose=None):
super(MonitorLoss, self).__init__(name, timescale, interval, verbose)
self.iter_name = iter_name
self.iter = None
def start(self, net, stepper, verbose, named_data_iters):
super(MonitorLoss, self).start(net, stepper, verbose, named_data_iters)
if self.iter_name not in named_data_iters:
raise KeyError("{} >> {} is not present in named_data_iters. "
"Remember to pass it as a kwarg to Trainer.train()"
.format(self.__name__, self.iter_name))
self.iter = named_data_iters[self.iter_name]
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
return evaluate(net, self.iter, scorers=())
class MonitorScores(Hook):
"""
Monitor the losses and optionally several scores using a given data
iterator.
Args:
iter_name (str):
name of the data iterator to use (as specified in the train() call)
scorers (List[brainstorm.scorers.Scorer]):
List of Scorers to evaluate.
name (Optional[str]):
Name of this monitor. This name is used as a key in the trainer
logs. Default is 'MonitorScores'
timescale (Optional[str]):
Specifies whether the Monitor should be called after each epoch or
after each update. Default is 'epoch'.
interval (Optional[int]):
This monitor should be called every ``interval`` epochs/updates.
Default is 1.
verbose: bool, optional
Specifies whether the logs of this monitor should be printed, and
acts as a fallback verbosity for the used data iterator.
If not set it defaults to the verbosity setting of the trainer.
See Also:
MonitorLoss: monitor the overall loss of the network.
"""
def __init__(self, iter_name, scorers, name=None, timescale='epoch',
interval=1, verbose=None):
super(MonitorScores, self).__init__(name, timescale, interval, verbose)
self.iter_name = iter_name
self.iter = None
self.scorers = scorers
def start(self, net, stepper, verbose, named_data_iters):
super(MonitorScores, self).start(net, stepper, verbose,
named_data_iters)
if self.iter_name not in named_data_iters:
raise KeyError("{} >> {} is not present in named_data_iters. "
"Remember to pass it as a kwarg to Trainer.train()"
.format(self.__name__, self.iter_name))
self.iter = named_data_iters[self.iter_name]
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
return evaluate(net, self.iter, self.scorers)
# -------------------------------- Stoppers --------------------------------- #
class EarlyStopper(Hook):
"""
Stop the training if a log entry does not improve for some time.
Can stop training when the log entry is at its minimum (such as an error)
or maximum (such as accuracy) according to the ``criterion`` argument.
The ``timescale`` and ``interval`` should be the same as those for the
monitoring hook which logs the quantity of interest.
Args:
log_name:
Name of the log entry to be checked for improvement.
It should be in the form <monitorname>.<log_name> where log_name
itself may be a nested dictionary key in dotted notation.
patience:
Number of log updates to wait before stopping training.
Default is 1.
criterion:
Indicates whether training should be stopped when the log entry is
at its minimum or maximum value. Must be either 'min' or 'max'.
Defaults to 'min'.
name (Optional[str]):
Name of this monitor. This name is used as a key in the trainer
logs. Default is 'EarlyStopper'.
timescale (Optional[str]):
Specifies whether the Monitor should be called after each epoch or
after each update. Default is 'epoch'.
interval (Optional[int]):
This monitor should be called every ``interval`` epochs/updates.
Default is 1.
verbose: bool, optional
Specifies whether the logs of this monitor should be printed, and
acts as a fallback verbosity for the used data iterator.
If not set it defaults to the verbosity setting of the trainer.
Examples:
Add a hook to monitor a quantity of interest:
>>> scorer = bs.scorers.Accuracy()
>>> trainer.add_hook(bs.hooks.MonitorScores('valid_getter', [scorer],
... name='validation'))
Stop training if validation set accuracy does not rise for 10 epochs:
>>> trainer.add_hook(bs.hooks.EarlyStopper('validation.Accuracy',
... patience=10,
... criterion='max'))
Stop training if loss on validation set does not drop for 5 epochs:
>>> trainer.add_hook(bs.hooks.EarlyStopper('validation.total_loss',
... patience=5,
... criterion='min'))
"""
__default_values__ = {'patience': 1}
def __init__(self, log_name, patience=1, criterion='min',
name=None, timescale='epoch', interval=1, verbose=None):
super(EarlyStopper, self).__init__(name, timescale, interval, verbose)
self.log_name = log_name
self.patience = patience
if criterion not in ['min', 'max']:
raise ValueError("Unknown criterion: '{}'"
"(Should be 'min' or 'max')".format(criterion))
self.criterion = criterion
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
if epoch_nr == 0:
try:
e = get_by_path(logs, self.log_name)
except KeyError:
return
e = get_by_path(logs, self.log_name)
best_idx = np.argmin(e) if self.criterion == 'min' else np.argmax(e)
if len(e) > best_idx + self.patience:
self.message("Stopping because {} did not improve for {} checks "
"(criterion used : {}).".format(self.log_name,
self.patience,
self.criterion))
raise StopIteration()
class StopAfterEpoch(Hook):
"""
Stop the training after a specified number of epochs.
Args:
max_epochs (int):
The number of epochs to train.
name (Optional[str]):
Name of this monitor. This name is used as a key in the trainer
logs. Default is 'StopAfterEpoch'.
timescale (Optional[str]):
Specifies whether the Monitor should be called after each epoch or
after each update. Default is 'epoch'.
interval (Optional[int]):
This monitor should be called every ``interval`` epochs/updates.
Default is 1.
verbose: bool, optional
Specifies whether the logs of this monitor should be printed, and
acts as a fallback verbosity for the used data iterator.
If not set it defaults to the verbosity setting of the trainer.
"""
def __init__(self, max_epochs, name=None, timescale='epoch', interval=1,
verbose=None):
super(StopAfterEpoch, self).__init__(name, timescale,
interval, verbose)
self.max_epochs = max_epochs
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
if epoch_nr >= self.max_epochs:
self.message("Stopping because the maximum number of epochs ({}) "
"was reached.".format(self.max_epochs))
raise StopIteration()
class StopAfterThresholdReached(Hook):
"""
Stop the training if a log entry reaches the given threshold
Can stop training when the log entry becomes sufficiently small (such as an
error) or sufficiently large (such as accuracy) according to the threshold.
Args:
log_name:
Name of the log entry to be checked for improvement.
It should be in the form <monitorname>.<log_name> where log_name
itself may be a nested dictionary key in dotted notation.
threshold:
The threshold value to reach
criterion:
Indicates whether training should be stopped when the log entry is
at its minimum or maximum value. Must be either 'min' or 'max'.
Defaults to 'min'.
name (Optional[str]):
Name of this monitor. This name is used as a key in the trainer
logs. Default is 'StopAfterThresholdReached'.
timescale (Optional[str]):
Specifies whether the Monitor should be called after each epoch or
after each update. Default is 'epoch'.
interval (Optional[int]):
This monitor should be called every ``interval`` epochs/updates.
Default is 1.
verbose: bool, optional
Specifies whether the logs of this monitor should be printed, and
acts as a fallback verbosity for the used data iterator.
If not set it defaults to the verbosity setting of the trainer.
Examples:
Stop training if validation set accuracy is at least 97 %:
>>> trainer.add_hook(StopAfterThresholdReached('validation.Accuracy',
... threshold=0.97,
... criterion='max'))
Stop training if loss on validation set goes below 0.2:
>>> trainer.add_hook(StopAfterThresholdReached('validation.total_loss',
... threshold=0.2,
... criterion='min'))
"""
def __init__(self, log_name, threshold, criterion='min',
name=None, timescale='epoch', interval=1, verbose=None):
super(StopAfterThresholdReached, self).__init__(name, timescale,
interval, verbose)
self.log_name = log_name
self.threshold = threshold
if criterion not in ['min', 'max']:
raise ValueError("Unknown criterion: '{}'"
"(Must be 'min' or 'max')".format(criterion))
self.criterion = criterion
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
e = get_by_path(logs, self.log_name)
is_threshold_reached = False
if self.criterion == 'max' and max(e) >= self.threshold:
is_threshold_reached = True
elif self.criterion == 'min' and min(e) <= self.threshold:
is_threshold_reached = True
if is_threshold_reached:
self.message("Stopping because {} has reached the threshold {} "
"(criterion used : {})".format(
self.log_name, self.threshold, self.criterion))
raise StopIteration()
class StopOnNan(Hook):
"""
Stop the training if infinite or NaN values are found in parameters.
This hook can also check a list of logs for invalid values.
Args:
logs_to_check (Optional[list, tuple]):
A list of trainer logs to check in dotted notation. Defaults to ().
check_parameters (Optional[bool]):
Indicates whether the parameters should be checked for NaN.
Defaults to True.
name (Optional[str]):
Name of this monitor. This name is used as a key in the trainer
logs. Default is 'StopOnNan'.
timescale (Optional[str]):
Specifies whether the Monitor should be called after each epoch or
after each update. Default is 'epoch'.
interval (Optional[int]):
This monitor should be called every ``interval`` epochs/updates.
Default is 1.
verbose: bool, optional
Specifies whether the logs of this monitor should be printed, and
acts as a fallback verbosity for the used data iterator.
If not set it defaults to the verbosity setting of the trainer.
"""
def __init__(self, logs_to_check=(), check_parameters=True,
check_training_loss=True, name=None, timescale='epoch',
interval=1, verbose=None):
super(StopOnNan, self).__init__(name, timescale, interval, verbose)
self.logs_to_check = ([logs_to_check] if isinstance(logs_to_check,
string_types)
else logs_to_check)
self.check_parameters = check_parameters
self.check_training_loss = check_training_loss
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
for log_name in self.logs_to_check:
log = get_by_path(logs, log_name)
if not np.all(np.isfinite(log)):
self.message("NaN or inf detected in {}!".format(log_name))
raise StopIteration()
if self.check_parameters:
if not net.handler.is_fully_finite(net.buffer.parameters):
self.message("NaN or inf detected in parameters!")
raise StopIteration()
if self.check_training_loss and 'rolling_training' in logs:
rtrain = logs['rolling_training']
if 'total_loss' in rtrain:
loss = rtrain['total_loss']
else:
loss = rtrain['Loss']
if not np.all(np.isfinite(loss)):
self.message("NaN or inf detected in rolling training loss!")
raise StopIteration()
class StopOnSigQuit(Hook):
"""
Stop training after the next call if it received a SIGQUIT (Ctrl + \).
This hook makes it possible to exit the training loop and continue with
the rest of the program execution.
Args:
name (Optional[str]):
Name of this monitor. This name is used as a key in the trainer
logs. Default is 'StopOnSigQuit'.
timescale (Optional[str]):
Specifies whether the Monitor should be called after each epoch or
after each update. Default is 'epoch'.
interval (Optional[int]):
This monitor should be called every ``interval`` epochs/updates.
Default is 1.
verbose: bool, optional
Specifies whether the logs of this monitor should be printed, and
acts as a fallback verbosity for the used data iterator.
If not set it defaults to the verbosity setting of the trainer.
"""
__undescribed__ = {'quit': False}
def __init__(self, name=None, timescale='epoch', interval=1, verbose=None):
super(StopOnSigQuit, self).__init__(name, timescale, interval,
verbose=verbose)
self.quit = False
def start(self, net, stepper, verbose, named_data_iters):
super(StopOnSigQuit, self).start(net, stepper, verbose,
named_data_iters)
self.quit = False
signal.signal(signal.SIGQUIT, self.receive_signal)
def receive_signal(self, signum, stack):
self.message('Interrupting')
self.quit = True
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
if self.quit:
raise StopIteration('Received SIGQUIT signal.')
# ------------------------------ Visualizers -------------------------------- #
if not optional.has_bokeh:
BokehVisualizer = optional.bokeh_mock
else:
import bokeh.plotting as bk
import warnings
class BokehVisualizer(Hook):
"""
Visualizes log values in your browser during training time using the
Bokeh plotting library.
Before running the trainer the user is required to have the Bokeh
Server running.
By default the visualization is discarded upon closing the webbrowser.
However if an output file is specified then the .html file will be
saved after each iteration at the specified location.
Args:
log_names (list, array):
Contains the name of the logs that are being recorded to be
visualized. log_names should be of the form
<monitorname>.<log_name> where log_name itself may be a nested
dictionary key in dotted notation.
filename (Optional, str):
The location to which the .html file containing the accuracy
plot should be saved.
timescale (Optional[str]):
Specifies whether the Monitor should be called after each
epoch or after each update. Default is 'epoch'
interval (Optional[int]):
This monitor should be called every ``interval``
number of epochs/updates. Default is 1.
name (Optional[str]):
Name of this monitor. This name is used as a key in the trainer
logs. Default is 'MonitorScores'
verbose: bool, optional
Specifies whether the logs of this monitor should be printed,
and acts as a fallback verbosity for the used data iterator.
If not set it defaults to the verbosity setting of the trainer.
"""
def __init__(self, log_names, filename=None, timescale='epoch',
interval=1, name=None, verbose=None):
super(BokehVisualizer, self).__init__(name, timescale, interval,
verbose)
if isinstance(log_names, string_types):
self.log_names = [log_names]
elif isinstance(log_names, (tuple, list)):
self.log_names = log_names
else:
raise ValueError('log_names must be either str or list but'
' was {}'.format(type(log_names)))
self.filename = filename
self.bk = bk
self.TOOLS = "resize,crosshair,pan,wheel_zoom,box_zoom,reset,save"
self.colors = ['blue', 'green', 'red', 'olive', 'cyan', 'aqua',
'gray']
warnings.filterwarnings('error')
try:
self.bk.output_server(self.__name__)
warnings.resetwarnings()
except Warning:
raise StopIteration('Bokeh server is not running')
self.fig = self.bk.figure(
title=self.__name__, x_axis_label=self.timescale,
y_axis_label='value', tools=self.TOOLS,
plot_width=1000, x_range=(0, 25), y_range=(0, 1))
def start(self, net, stepper, verbose, named_data_iters):
count = 0
# create empty line objects
for log_name in self.log_names:
self.fig.line([], [], legend=log_name, line_width=2,
color=self.colors[count % len(self.colors)],
name=log_name)
count += 1
self.bk.show(self.fig)
self.bk.output_file('bokeh_visualisation.html',
title=self.__name__, mode='cdn')
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
if epoch_nr == 0:
return
for log_name in self.log_names:
renderer = self.fig.select(dict(name=log_name))
datasource = renderer[0].data_source
datasource.data["y"] = get_by_path(logs, log_name)
datasource.data["x"] = range(len(datasource.data["y"]))
self.bk.cursession().store_objects(datasource)
if self.filename is not None:
self.bk.save(self.fig, filename=self.filename + ".html")
class ProgressBar(Hook):
""" Adds a progress bar to show the training progress. """
def __init__(self):
super(ProgressBar, self).__init__(None, 'update', 1)
self.length = None
self.bar = None
def start(self, net, stepper, verbose, named_data_iters):
assert 'training_data_iter' in named_data_iters
self.length = named_data_iters['training_data_iter'].length
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
assert epoch_nr == 0 or math.ceil(update_nr / self.length) == epoch_nr
if update_nr % self.length == 1:
self.bar = progress_bar(self.length)
print(next(self.bar), end='')
sys.stdout.flush()
elif update_nr % self.length == 0:
if self.bar:
print(self.bar.send(self.length))
else:
print(self.bar.send(update_nr % self.length), end='')
sys.stdout.flush()
# ----------------------------- Miscellaneous ------------------------------- #
class InfoUpdater(Hook):
""" Save the information from logs to the Sacred custom info dict"""
def __init__(self, run, name=None, timescale='epoch', interval=1):
super(InfoUpdater, self).__init__(name, timescale, interval)
self.run = run
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
info = self.run.info
info['epoch_nr'] = epoch_nr
info['update_nr'] = update_nr
info['logs'] = logs
if 'nr_parameters' not in info:
info['nr_parameters'] = net.buffer.parameters.size
class ModifyStepperAttribute(Hook):
"""Modify an attribute of the training stepper."""
def __init__(self, schedule, attr_name='learning_rate',
timescale='epoch', interval=1, name=None, verbose=None):
super(ModifyStepperAttribute, self).__init__(name, timescale,
interval, verbose)
self.schedule = schedule
self.attr_name = attr_name
def start(self, net, stepper, verbose, monitor_kwargs):
super(ModifyStepperAttribute, self).start(net, stepper, verbose,
monitor_kwargs)
assert hasattr(stepper, self.attr_name), \
"The stepper {} does not have the attribute {}".format(
stepper.__class__.__name__, self.attr_name)
def __call__(self, epoch_nr, update_nr, net, stepper, logs):
setattr(stepper, self.attr_name,
self.schedule(epoch_nr, update_nr, self.timescale,
self.interval, net, stepper, logs))
|
src/encoded/tests/test_schema_computational_model.py | procha2/encoded | 102 | 12622443 | import pytest
def test_unique_software(testapp, computational_model_unique_software):
res = testapp.post_json('/computational_model', computational_model_unique_software, expect_errors=True)
assert res.status_code == 201
def test_non_unique_software(testapp, computational_model_non_unique_software):
res = testapp.post_json('/computational_model',computational_model_non_unique_software, expect_errors=True)
assert res.status_code == 422
|
qttbx/viewers/chimerax.py | dperl-sol/cctbx_project | 155 | 12622454 | <reponame>dperl-sol/cctbx_project
"""
Interface for ChimeraX using ISOLDE REST API client
https://www.cgl.ucsf.edu/chimerax/
https://isolde.cimr.cam.ac.uk/what-isolde/
"""
from __future__ import absolute_import, division, print_function
import os
import subprocess
import sys
import tempfile
from libtbx.utils import to_str, Sorry
from qttbx.viewers import ModelViewer
# =============================================================================
class ChimeraXViewer(ModelViewer):
viewer_name = 'ChimeraX'
# ---------------------------------------------------------------------------
def start_viewer(self):
self.run_basic_checks()
# write script
with tempfile.NamedTemporaryFile(mode='w', suffix='.cxc', delete=False) as self.script:
self.script.write('isolde remote rest start port {port}'.format(port=self.port))
# start viewer
command = [self.command, self.script.name]
self.process = subprocess.Popen(args=command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=False)
# connect to viewer
#self._connect()
# ---------------------------------------------------------------------------
def close_viewer(self):
if os.path.isfile(self.script.name):
os.remove(self.script.name)
# ---------------------------------------------------------------------------
def load_model(self, filename=None):
model_id = None
if os.path.isfile(filename):
model_id = self._client.load_model(filename)
else:
raise Sorry('Model file ({filename}) is not found.'.format(filename=filename))
return model_id
# ---------------------------------------------------------------------------
def _connect(self):
# find client.py in ISOLDE
client_path = None
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as python_script:
python_script.write("""\
from chimerax import isolde
print(isolde.__file__)
""")
command = [self.command, '--nogui', '--exit', '--script', python_script.name]
python_process = subprocess.Popen(args=command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=False)
stdout, stderr = python_process.communicate()
python_process.wait()
if python_process.returncode != 0:
raise Sorry('The ISOLDE installation location could not be found.')
else:
if os.path.isfile(python_script.name):
try:
os.remove(python_script.name)
except IOError:
pass
stdout = to_str(stdout).split('\n')
line = None
for line in stdout:
if 'isolde' in line:
break
client_path = os.path.abspath(
os.path.join(line, '..', 'remote_control', 'rest_server', 'client.py'))
if client_path is not None and os.path.isfile(client_path):
# import from file
if sys.version_info.major > 2:
import importlib.util
spec = importlib.util.spec_from_file_location('client', client_path)
client = importlib.util.module_from_spec(spec)
spec.loader.exec_module(client)
else:
import imp
client = imp.load_source('client', client_path)
self._client = client.IsoldeRESTClient('localhost', self.port)
self._client.connect()
self._connected = True
# =============================================================================
|
RecoParticleFlow/PFClusterProducer/python/particleFlowZeroSuppressionECAL_cff.py | ckamtsikis/cmssw | 852 | 12622467 | import FWCore.ParameterSet.Config as cms
pfZeroSuppressionThresholds_EB = [0.080]*170
pfZeroSuppressionThresholds_EEminus = [0.300]*39
pfZeroSuppressionThresholds_EEplus = pfZeroSuppressionThresholds_EEminus
#
# These are expected to be adjusted soon, while the thresholds for older setups will remain unchanged.
#
_pfZeroSuppressionThresholds_EB_2017 = pfZeroSuppressionThresholds_EB
_pfZeroSuppressionThresholds_EEminus_2017 = pfZeroSuppressionThresholds_EEminus
_pfZeroSuppressionThresholds_EEplus_2017 = _pfZeroSuppressionThresholds_EEminus_2017
#
# The three different set of thresholds will be used to study
# possible new thresholds of pfrechits and effects on high level objects
# The values proposed (A, B, C) are driven by expected noise levels
# A ~ 2.0 sigma noise equivalent thresholds
# B ~ 1.0 sigma noise equivalent thresholds
# C ~ 0.5 sigma noise equivalent thresholds
#
# A
_pfZeroSuppressionThresholds_EB_2018_A = [0.180]*170
_pfZeroSuppressionThresholds_EEminus_2018_A = [0.22, 0.22, 0.24, 0.26, 0.28, 0.3, 0.32, 0.34, 0.34, 0.36, 0.36, 0.38, 0.38, 0.4, 0.44, 0.46, 0.5, 0.54, 0.58, 0.62, 0.68, 0.72, 0.78, 0.84, 0.9, 1.0, 1.14, 1.36, 1.68, 2.14, 2.8, 3.76, 5.1, 6.94, 9.46, 12.84, 17.3, 23.2, 30.8]
_pfZeroSuppressionThresholds_EEplus_2018_A = _pfZeroSuppressionThresholds_EEminus_2018_A
_particle_flow_zero_suppression_ECAL_2018_A = cms.PSet(
thresholds = cms.vdouble(_pfZeroSuppressionThresholds_EB_2018_A + _pfZeroSuppressionThresholds_EEminus_2018_A + _pfZeroSuppressionThresholds_EEplus_2018_A
)
)
# B
_pfZeroSuppressionThresholds_EB_2018_B = [0.140]*170
_pfZeroSuppressionThresholds_EEminus_2018_B = [0.11, 0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.17, 0.18, 0.18, 0.19, 0.19, 0.20, 0.22, 0.23, 0.25, 0.27, 0.29, 0.31, 0.34, 0.36, 0.39, 0.42, 0.45, 0.50, 0.57, 0.68, 0.84, 1.07, 1.40, 1.88, 2.55, 3.47, 4.73, 6.42, 8.65, 11.6, 15.4]
_pfZeroSuppressionThresholds_EEplus_2018_B = _pfZeroSuppressionThresholds_EEminus_2018_B
_particle_flow_zero_suppression_ECAL_2018_B = cms.PSet(
thresholds = cms.vdouble(_pfZeroSuppressionThresholds_EB_2018_B + _pfZeroSuppressionThresholds_EEminus_2018_B + _pfZeroSuppressionThresholds_EEplus_2018_B
)
)
# C
_pfZeroSuppressionThresholds_EB_2018_C = [0.100]*170
_pfZeroSuppressionThresholds_EEminus_2018_C = [0.055, 0.055, 0.06, 0.065, 0.07, 0.075, 0.08, 0.085, 0.085, 0.09, 0.09, 0.095, 0.095, 0.1, 0.11, 0.115, 0.125, 0.135, 0.145, 0.155, 0.17, 0.18, 0.195, 0.21, 0.225, 0.25, 0.285, 0.34, 0.42, 0.535, 0.7, 0.94, 1.275, 1.735, 2.365, 3.21, 4.325, 5.8, 7.7 ]
_pfZeroSuppressionThresholds_EEplus_2018_C = _pfZeroSuppressionThresholds_EEminus_2018_C
_particle_flow_zero_suppression_ECAL_2018_C = cms.PSet(
thresholds = cms.vdouble(_pfZeroSuppressionThresholds_EB_2018_C + _pfZeroSuppressionThresholds_EEminus_2018_C + _pfZeroSuppressionThresholds_EEplus_2018_C
)
)
particle_flow_zero_suppression_ECAL = cms.PSet(
thresholds = cms.vdouble(pfZeroSuppressionThresholds_EB + pfZeroSuppressionThresholds_EEminus + pfZeroSuppressionThresholds_EEplus
)
)
_particle_flow_zero_suppression_ECAL_2017 = cms.PSet(
thresholds = cms.vdouble(_pfZeroSuppressionThresholds_EB_2017 + _pfZeroSuppressionThresholds_EEminus_2017 + _pfZeroSuppressionThresholds_EEplus_2017
)
)
#
# The thresholds have been temporarily removed (lowered to 80 MeV in EB and 300 MeV in EE,
# then overseeded by the gathering and seeding PF cluster thresholds)
# Later, we may need to reintroduce eta dependent thresholds
# to mitigate the effect of the noise
#
from Configuration.Eras.Modifier_run2_ECAL_2017_cff import run2_ECAL_2017
run2_ECAL_2017.toReplaceWith(particle_flow_zero_suppression_ECAL, _particle_flow_zero_suppression_ECAL_2017)
from Configuration.Eras.Modifier_phase2_ecal_cff import phase2_ecal
phase2_ecal.toReplaceWith(particle_flow_zero_suppression_ECAL, _particle_flow_zero_suppression_ECAL_2017)
|
python/mleap/sklearn/base.py | neilsummers/mleap | 1,401 | 12622517 | <filename>python/mleap/sklearn/base.py
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sklearn.linear_model import LinearRegression
from mleap.bundle.serialize import MLeapSerializer, MLeapDeserializer
import uuid
import os
import numpy as np
def serialize_to_bundle(self, path, model_name):
serializer = SimpleSerializer()
return serializer.serialize_to_bundle(self, path, model_name)
def deserialize_from_bundle(self, path, node_name):
serializer = SimpleSerializer()
return serializer.deserialize_from_bundle(self, path, node_name)
def mleap_init(self, input_features, prediction_column):
self.input_features = input_features
self.prediction_column = prediction_column
self.name = "{}_{}".format(self.op, uuid.uuid1())
setattr(LinearRegression, 'op', 'linear_regression')
setattr(LinearRegression, 'mlinit', mleap_init)
setattr(LinearRegression, 'serialize_to_bundle', serialize_to_bundle)
setattr(LinearRegression, 'deserialize_from_bundle', deserialize_from_bundle)
setattr(LinearRegression, 'serializable', True)
class SimpleSerializer(MLeapSerializer, MLeapDeserializer):
def __init__(self):
super(SimpleSerializer, self).__init__()
@staticmethod
def set_prediction_column(transformer, prediction_column):
transformer.prediction_column = prediction_column
@staticmethod
def set_input_features(transformer, input_features):
transformer.input_features = input_features
def serialize_to_bundle(self, transformer, path, model_name):
# compile tuples of model attributes to serialize
attributes = list()
attributes.append(('intercept', transformer.intercept_.tolist()[0]))
attributes.append(('coefficients', transformer.coef_.tolist()[0]))
# define node inputs and outputs
inputs = [{
"name": transformer.input_features,
"port": "features"
}]
outputs = [{
"name": transformer.prediction_column,
"port": "prediction"
}]
self.serialize(transformer, path, model_name, attributes, inputs, outputs)
def deserialize_from_bundle(self, transformer, node_path, node_name):
attributes_map = {
'coefficients': 'coef_',
'intercept': 'intercept_'
}
# Set serialized attributes
full_node_path = os.path.join(node_path, node_name)
transformer = self.deserialize_single_input_output(transformer, full_node_path, attributes_map)
# Set Additional Attributes
if 'intercept_' in transformer.__dict__:
transformer.fit_intercept = True
else:
transformer.fit_intercept = False
transformer.coef_ = np.array([transformer.coef_])
return transformer
|
ppci/lang/python/python2ir.py | kl4w3i/ppci | 161 | 12622525 | <reponame>kl4w3i/ppci
""" Python to IR compilation.
"""
import logging
import ast
import contextlib
import inspect
from ... import ir, irutils
from ...common import SourceLocation, CompilerError
from ...binutils import debuginfo
def python_to_ir(f, imports=None):
"""Compile a piece of python code to an ir module.
Args:
f (file-like-object): a file like object containing the python code
imports: Dictionary with symbols that are present.
Returns:
A :class:`ppci.ir.Module` module
.. doctest::
>>> import io
>>> from ppci.lang.python import python_to_ir
>>> f = io.StringIO("def calc(x: int) -> int: return x + 133")
>>> python_to_ir(f)
<ppci.ir.Module object at ...>
"""
mod = PythonToIrCompiler().compile(f, imports=imports)
return mod
class Var:
def __init__(self, value, lvalue, ty):
self.value = value
self.lvalue = lvalue
self.ty = ty
class PythonToIrCompiler:
""" Not peer-to-peer but python to ppci :) """
logger = logging.getLogger("p2p")
def __init__(self):
self.type_mapping = {"int": ir.i64, "float": ir.f64, "str": ir.ptr}
def compile(self, f, imports=None):
"""Convert python into IR-code.
Arguments:
f: the with the python code
Returns:
the ir-module.
"""
self.debug_db = debuginfo.DebugDb()
src = f.read()
self._filename = getattr(f, "name", None)
# Parse python code:
x = ast.parse(src)
self.function_map = {}
self.builder = irutils.Builder()
self.builder.prepare()
self.builder.set_module(ir.Module("foo", debug_db=self.debug_db))
if imports:
# Fill imported functions:
for name, signature in imports.items():
self.gen_import(name, signature)
for df in x.body:
self.logger.debug("Processing %s", df)
if isinstance(df, ast.FunctionDef):
self.gen_function(df)
else:
self.not_impl(df)
mod = self.builder.module
irutils.verify_module(mod)
return mod
def gen_import(self, name, signature):
# Determine function type:
if isinstance(signature, tuple):
return_type, arg_types = signature
else:
# Assume that we have a function:
signature = inspect.signature(signature)
if signature.return_annotation is inspect.Signature.empty:
return_type = None
else:
return_type = signature.return_annotation
arg_types = [p.annotation for p in signature.parameters.values()]
# Create external function:
ir_arg_types = [self.get_ty(t) for t in arg_types]
if return_type:
ir_function = ir.ExternalFunction(
name, ir_arg_types, self.get_ty(return_type)
)
else:
ir_function = ir.ExternalProcedure(name, ir_arg_types)
self.builder.module.add_external(ir_function)
self.function_map[name] = ir_function, return_type, arg_types
def gen_function(self, df):
""" Transform a python function into an IR-function """
self.local_map = {}
function_name = df.name
binding = ir.Binding.GLOBAL
dbg_int = debuginfo.DebugBaseType("int", 8, 1)
return_type = self.get_ty(df.returns)
if return_type:
ir_function = self.builder.new_function(
function_name, binding, return_type
)
else:
ir_function = self.builder.new_procedure(function_name, binding)
dbg_args = []
arg_types = []
for arg in df.args.args:
if not arg.annotation:
self.error(arg, "Need type annotation for {}".format(arg.arg))
aty = self.get_ty(arg.annotation)
arg_types.append(aty)
arg_name = arg.arg
# Debug info:
param = ir.Parameter(arg_name, aty)
dbg_args.append(debuginfo.DebugParameter(arg_name, dbg_int))
ir_function.add_parameter(param)
# Register function as known:
self.function_map[function_name] = ir_function, return_type, arg_types
self.logger.debug("Created function %s", ir_function)
self.builder.block_number = 0
self.builder.set_function(ir_function)
dfi = debuginfo.DebugFunction(
ir_function.name,
SourceLocation("foo.py", 1, 1, 1),
dbg_int,
dbg_args,
)
self.debug_db.enter(ir_function, dfi)
first_block = self.builder.new_block()
self.builder.set_block(first_block)
ir_function.entry = first_block
# Copy the parameters to variables (so they can be modified):
for parameter in ir_function.arguments:
# self.local_map[name] = Var(param, False, aty)
para_var = self.get_variable(df, parameter.name, ty=parameter.ty)
self.emit(ir.Store(parameter, para_var.value))
self.block_stack = []
self.gen_statement(df.body)
assert not self.block_stack
# Return if not yet done
if not self.builder.block.is_closed:
if return_type:
if self.builder.block.is_empty:
pass
else:
raise NotImplementedError()
else:
self.emit(ir.Exit())
# TODO: ugly:
ir_function.delete_unreachable()
def gen_statement(self, statement):
""" Generate code for a statement """
if isinstance(statement, list):
for inner_statement in statement:
self.gen_statement(inner_statement)
else:
with self.use_location(statement):
if isinstance(statement, ast.Pass):
pass # No comments :)
elif isinstance(statement, ast.Return):
self.gen_return(statement)
elif isinstance(statement, ast.If):
self.gen_if(statement)
elif isinstance(statement, ast.While):
self.gen_while(statement)
elif isinstance(statement, ast.Break):
self.gen_break(statement)
elif isinstance(statement, ast.Continue):
self.gen_continue(statement)
elif isinstance(statement, ast.For):
self.gen_for(statement)
elif isinstance(statement, ast.Assign):
self.gen_assign(statement)
elif isinstance(statement, ast.Expr):
self.gen_expr(statement.value)
elif isinstance(statement, ast.AugAssign):
self.gen_aug_assign(statement)
else: # pragma: no cover
self.not_impl(statement)
def gen_break(self, statement):
break_block = self.block_stack[-1][1]
self.builder.emit_jump(break_block)
unreachable_block = self.builder.new_block()
self.builder.set_block(unreachable_block)
def gen_continue(self, statement):
continue_block = self.block_stack[-1][0]
self.builder.emit_jump(continue_block)
unreachable_block = self.builder.new_block()
self.builder.set_block(unreachable_block)
def gen_return(self, statement):
""" Compile return statement. """
if self.builder.function.is_procedure:
if statement.value:
self.error(
statement,
"Cannot return a value from a function without return type.",
)
self.builder.emit_exit()
else:
if not statement.value:
self.error(
statement, "Must return a value from this function."
)
value = self.gen_expr(statement.value)
self.builder.emit_return(value)
void_block = self.builder.new_block()
self.builder.set_block(void_block)
def gen_if(self, statement):
""" Compile a python if-statement. """
ja_block = self.builder.new_block()
else_block = self.builder.new_block()
continue_block = self.builder.new_block()
self.gen_cond(statement.test, ja_block, else_block)
# Yes
self.builder.set_block(ja_block)
self.gen_statement(statement.body)
self.builder.emit_jump(continue_block)
# Else:
self.builder.set_block(else_block)
self.gen_statement(statement.orelse)
self.builder.emit_jump(continue_block)
self.builder.set_block(continue_block)
def gen_while(self, statement):
""" Compile python while-statement. """
if statement.orelse:
self.error(statement, "while-else not supported")
test_block = self.builder.new_block()
body_block = self.builder.new_block()
final_block = self.builder.new_block()
# Test:
self.builder.emit_jump(test_block)
self.builder.set_block(test_block)
self.gen_cond(statement.test, body_block, final_block)
# Body:
self.enter_loop(test_block, final_block)
self.builder.set_block(body_block)
self.gen_statement(statement.body)
self.builder.emit_jump(test_block)
self.leave_loop()
# The end:
self.builder.set_block(final_block)
def gen_for(self, statement):
""" Compile python for-statement. """
# Check else-clause:
if statement.orelse:
self.error(statement, "for-else not supported")
# Allow for loop with range in it:
if not isinstance(statement.iter, ast.Call):
self.error(statement.iter, "Only range supported in for loops")
if statement.iter.func.id != "range":
self.error(statement.iter, "Only range supported in for loops")
# Determine start and end values:
ra = statement.iter.args
if len(ra) == 1:
i_init = self.builder.emit_const(0, ir.i64)
n2 = self.gen_expr(ra[0])
elif len(ra) == 2:
i_init = self.gen_expr(ra[0])
n2 = self.gen_expr(ra[1])
else:
self.error(
statement.iter,
"Does not support {} arguments".format(len(ra)),
)
entry_block = self.builder.block
test_block = self.builder.new_block()
body_block = self.builder.new_block()
final_block = self.builder.new_block()
self.emit(ir.Jump(test_block))
# Test block:
self.builder.set_block(test_block)
i_phi = self.emit(ir.Phi("i_phi", ir.i64))
i_phi.set_incoming(entry_block, i_init)
self.emit(ir.CJump(i_phi, "<", n2, body_block, final_block))
# Publish looping variable:
self.local_map[statement.target.id] = Var(i_phi, False, ir.i64)
# Body:
self.enter_loop(test_block, final_block)
self.builder.set_block(body_block)
self.gen_statement(statement.body)
self.leave_loop()
# Increment loop variable:
one = self.builder.emit_const(1, ir.i64)
i_inc = self.builder.emit_add(i_phi, one, ir.i64)
i_phi.set_incoming(body_block, i_inc)
# Jump to start again:
self.builder.emit_jump(test_block)
# The end:
self.builder.set_block(final_block)
def gen_assign(self, statement):
""" Compile assignment-statement. """
if len(statement.targets) == 1:
target = statement.targets[0]
else:
self.error(
statement, "Only a single assignment target is supported."
)
if isinstance(target, ast.Name):
value = self.gen_expr(statement.value)
self.store_value(target, value)
elif isinstance(target, ast.Tuple):
# Tuple assign like: 'a, b, c = 1, 2, 3'
assert isinstance(statement.value, ast.Tuple)
values = statement.value.elts
targets = target.elts
assert len(statement.value.elts) == len(targets)
values = [self.gen_expr(v) for v in values]
for target, value in zip(targets, values):
self.store_value(target, value)
else: # pragma: no cover
self.not_impl(statement)
def gen_aug_assign(self, statement):
"""Compile augmented assign.
For example: 'a += 2'
"""
target = statement.target
if isinstance(target, ast.Name):
name = target.id
assert isinstance(name, str)
var = self.get_variable(target, name)
assert var.lvalue
lhs = self.builder.emit_load(var.value, var.ty)
rhs = self.gen_expr(statement.value)
op = self.binop_map[type(statement.op)]
value = self.emit(ir.Binop(lhs, op, rhs, "augassign", var.ty))
self.emit(ir.Store(value, var.value))
else: # pragma: no cover
self.not_impl(statement)
def store_value(self, target, value):
""" Store an IR-value into a target node. """
assert isinstance(target, ast.Name)
name = target.id
var = self.get_variable(target, name, ty=value.ty)
assert var.lvalue
self.emit(ir.Store(value, var.value))
def gen_cond(self, condition, yes_block, no_block):
""" Compile a condition. """
if isinstance(condition, ast.Compare):
self.gen_compare(condition, yes_block, no_block)
elif isinstance(condition, ast.BoolOp):
self.gen_bool_op(condition, yes_block, no_block)
else: # pragma: no cover
self.not_impl(condition)
def gen_compare(self, condition, yes_block, no_block):
# print(dir(c), c.ops, c.comparators)
# TODO: chained operators! ( 'a < b < c < d' )
assert len(condition.ops) == len(condition.comparators)
assert len(condition.ops) == 1
op_map = {
ast.Gt: ">",
ast.GtE: ">=",
ast.Lt: "<",
ast.LtE: "<=",
ast.Eq: "==",
ast.NotEq: "!=",
}
a = self.gen_expr(condition.left)
op = op_map[type(condition.ops[0])]
b = self.gen_expr(condition.comparators[0])
if a.ty is not b.ty:
self.error(condition, "Type mismatch, types must be the same.")
self.emit(ir.CJump(a, op, b, yes_block, no_block))
def gen_bool_op(self, condition, yes_block, no_block):
""" Compile a boolean operator such as 'and' """
assert len(condition.values) >= 1
first_values = condition.values[:-1]
last_value = condition.values[-1]
if isinstance(condition.op, ast.And):
# All values must be true here,
# so bail out on first false value.
for value in first_values:
all_true_block = self.builder.new_block()
self.gen_cond(value, all_true_block, no_block)
self.builder.set_block(all_true_block)
self.gen_cond(last_value, yes_block, no_block)
elif isinstance(condition.op, ast.Or):
# The first true value is enough to make this work!
for value in first_values:
all_false_block = self.builder.new_block()
self.gen_cond(value, yes_block, all_false_block)
self.builder.set_block(all_false_block)
self.gen_cond(last_value, yes_block, no_block)
else: # pragma: no cover
self.not_impl(condition)
def gen_expr(self, expr):
""" Generate code for a single expression """
with self.use_location(expr):
if isinstance(expr, ast.BinOp):
value = self.gen_binop(expr)
elif isinstance(expr, ast.Name):
value = self.gen_name(expr)
elif isinstance(expr, ast.Call):
value = self.gen_call(expr)
elif hasattr(ast, "Constant") and isinstance(expr, ast.Constant):
# Exists in Python 3.6+, generated in Python 3.8+
value = expr.value
if isinstance(value, str):
value = self.gen_string_constant(expr, value)
elif isinstance(value, (int, float)):
value = self.gen_num(expr, value)
else: # pragma: no cover
self.not_impl(condition)
elif isinstance(expr, ast.Num): # Python < 3.8
value = self.gen_num(expr, expr.n)
elif isinstance(expr, ast.Str): # Python < 3.8
value = self.gen_string_constant(expr, expr.s)
else: # pragma: no cover
self.not_impl(expr)
return value
def gen_name(self, expr):
""" Compile name node access. """
var = self.local_map[expr.id]
if var.lvalue:
value = self.builder.emit_load(var.value, var.ty)
else:
value = var.value
return value
binop_map = {
ast.Add: "+",
ast.Sub: "-",
ast.Mult: "*",
ast.Div: "/",
ast.FloorDiv: "/",
}
def gen_binop(self, expr):
""" Compile binary operator. """
a = self.gen_expr(expr.left)
b = self.gen_expr(expr.right)
if a.ty is not b.ty:
self.error(expr, "Type mismatch, types must be the same.")
# TODO: automatic coercion
# TODO: assume type of a?
ty = a.ty
op_typ = type(expr.op)
if op_typ in self.binop_map:
op = self.binop_map[op_typ]
else:
self.not_impl(expr)
value = self.builder.emit_binop(a, op, b, ty)
return value
def gen_call(self, expr):
""" Compile call-expression. """
assert isinstance(expr.func, ast.Name)
name = expr.func.id
# Lookup function and check types:
ir_function, return_type, arg_types = self.function_map[name]
self.logger.warning("Function arguments not type checked!")
# Evaluate arguments:
args = [self.gen_expr(a) for a in expr.args]
# Emit call:
if return_type:
value = self.emit(
ir.FunctionCall(ir_function, args, "res", return_type)
)
else:
self.emit(ir.ProcedureCall(ir_function, args))
value = None
return value
def gen_num(self, expr, num):
if isinstance(num, int):
value = self.builder.emit_const(num, ir.i64)
elif isinstance(num, float):
value = self.builder.emit_const(num, ir.f64)
else: # pragma: no cover
self.not_impl(expr)
return value
def gen_string_constant(self, expr, value):
data = value.encode("utf8") + bytes([0])
string_constant = self.emit(ir.LiteralData(data, "string_constant"))
value = self.emit(ir.AddressOf(string_constant, "string_constant_ptr"))
return value
# Helper functions:
def get_variable(self, node, name, ty=None):
"""Retrieve a variable, or create it if type is given."""
if name in self.local_map:
var = self.local_map[name]
else:
# Create a variable with the given name
# TODO: for now i64 is assumed to be the only type!
if ty is None:
self.error(node, "Undefined variable")
else:
mem = self.emit(ir.Alloc("alloc_{}".format(name), 8, 8))
addr = self.emit(ir.AddressOf(mem, "addr_{}".format(name)))
var = Var(addr, True, ty)
self.local_map[name] = var
return var
def common_type(self, ty1, ty2):
"""Determine the best target type for two input types.
For example,
(float, int) -> float
(int, int) -> int
"""
type_ranks = {
float: 10,
int: 5,
}
pass
def coerce(self, value, ty):
""" Try to fit the value into a new value of a type. """
return value
def not_impl(self, node): # pragma: no cover
print(dir(node))
self.error(node, "Cannot do {}".format(node))
def node_location(self, node):
""" Create a source code location for the given node. """
location = SourceLocation(
self._filename, node.lineno, node.col_offset + 1, 1
)
return location
def error(self, node, message):
""" Raise a nice error message as feedback """
location = self.node_location(node)
raise CompilerError(message, location)
def emit(self, instruction):
""" Emit an instruction """
self.builder.emit(instruction)
return instruction
def get_ty(self, annotation) -> ir.Typ:
""" Get the type based on type annotation """
if isinstance(annotation, type):
type_name = annotation.__name__
elif annotation is None:
return
else:
if (
isinstance(annotation, ast.NameConstant)
and annotation.value is None
):
return
type_name = annotation.id
if type_name in self.type_mapping:
return self.type_mapping[type_name]
else:
self.error(annotation, "Unhandled type: {}".format(type_name))
def enter_loop(self, continue_block, break_block):
self.block_stack.append((continue_block, break_block))
def leave_loop(self):
self.block_stack.pop()
@contextlib.contextmanager
def use_location(self, node):
"""Use the location of the node for all code generated
within the with clause.
"""
location = self.node_location(node)
self.builder.push_location(location)
yield
self.builder.pop_location()
|
api/utils/local.py | wkma/bk-sops | 881 | 12622544 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from contextlib import contextmanager
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from six.moves._thread import get_ident
except ImportError:
from _thread import get_ident
__all__ = ["local", "Local", "get_ident"]
"""Thread-local/Greenlet-local objects
Thread-local/Greenlet-local objects support the management of
thread-local/greenlet-local data. If you have data that you want
to be local to a thread/greenlet, simply create a
thread-local/greenlet-local object and use its attributes:
>>> mydata = Local()
>>> mydata.number = 42
>>> mydata.number
42
>>> hasattr(mydata, 'number')
True
>>> hasattr(mydata, 'username')
False
Reference :
from threading import local
"""
class Localbase(object):
__slots__ = ("__storage__", "__ident_func__")
def __new__(cls, *args, **kwargs):
self = object.__new__(cls, *args, **kwargs)
object.__setattr__(self, "__storage__", {})
object.__setattr__(self, "__ident_func__", get_ident)
return self
class Local(Localbase):
def __iter__(self):
ident = self.__ident_func__()
try:
return iter(list(self.__storage__[ident].items()))
except KeyError:
return iter([])
def __release_local__(self):
self.__storage__.pop(self.__ident_func__(), None)
def __getattr__(self, name):
ident = self.__ident_func__()
try:
return self.__storage__[ident][name]
except KeyError:
raise AttributeError(name)
def __setattr__(self, name, value):
if name in ("__storage__", "__ident_func__"):
raise AttributeError("%r object attribute '%s' is read-only" % (self.__class__.__name__, name))
ident = self.__ident_func__()
storage = self.__storage__
if ident not in storage:
storage[ident] = dict()
storage[ident][name] = value
def __delattr__(self, name):
if name in ("__storage__", "__ident_func__"):
raise AttributeError("%r object attribute '%s' is read-only" % (self.__class__.__name__, name))
ident = self.__ident_func__()
try:
del self.__storage__[ident][name]
if len(self.__storage__[ident]) == 0:
self.__release_local__()
except KeyError:
raise AttributeError(name)
def clear(self):
self.__release_local__()
local = Local()
@contextmanager
def with_request_local():
local_vars = {}
for k in ["operator", "username", "current_request"]:
if hasattr(local, k):
local_vars[k] = getattr(local, k)
delattr(local, k)
try:
yield local
finally:
for k, v in list(local_vars.items()):
setattr(local, k, v)
@contextmanager
def with_client_user(username):
with with_request_local() as local:
local.username = username
yield
@contextmanager
def with_client_operator(update_user):
with with_request_local() as local:
local.operator = update_user
yield
|
learntools/sql/ex5.py | roannav/learntools | 359 | 12622562 | <reponame>roannav/learntools<filename>learntools/sql/ex5.py
from google.cloud import bigquery
from learntools.core import *
# Setup (4.57s on Kaggle)
client = bigquery.Client()
# (3) YearDistrib
rides_per_year_query = """
SELECT EXTRACT(YEAR FROM trip_start_timestamp) AS year,
COUNT(1) AS num_trips
FROM `bigquery-public-data.chicago_taxi_trips.taxi_trips`
GROUP BY year
ORDER BY year
"""
rides_per_year_query_job = client.query(rides_per_year_query)
rides_per_year_answer = rides_per_year_query_job.to_dataframe()
# (4) MonthDistrib
rides_per_month_query = """
SELECT EXTRACT(MONTH FROM trip_start_timestamp) AS month,
COUNT(1) AS num_trips
FROM `bigquery-public-data.chicago_taxi_trips.taxi_trips`
WHERE EXTRACT(YEAR FROM trip_start_timestamp) = 2017
GROUP BY month
ORDER BY month
"""
rides_per_month_query_job = client.query(rides_per_month_query)
rides_per_month_answer = rides_per_month_query_job.to_dataframe()
# (5) TheLongQuery
speeds_query = """
WITH RelevantRides AS
(
SELECT EXTRACT(HOUR FROM trip_start_timestamp) AS hour_of_day,
trip_miles,
trip_seconds
FROM `bigquery-public-data.chicago_taxi_trips.taxi_trips`
WHERE trip_start_timestamp > '2017-01-01' AND
trip_start_timestamp < '2017-07-01' AND
trip_seconds > 0 AND
trip_miles > 0
)
SELECT hour_of_day,
COUNT(1) AS num_trips,
3600 * SUM(trip_miles) / SUM(trip_seconds) AS avg_mph
FROM RelevantRides
GROUP BY hour_of_day
ORDER BY hour_of_day
"""
speeds_query_job = client.query(speeds_query)
speeds_answer = speeds_query_job.to_dataframe()
# (1)
class GetTableName(EqualityCheckProblem):
_var = 'table_name'
_expected = 'taxi_trips'
_solution = CS("""
# List all the tables in the dataset
tables = list(client.list_tables(dataset))
# Print names of all tables in the dataset (there is only one!)
for table in tables:
print(table.table_id)
table_name = 'taxi_trips'
""")
# (2)
class WhatsWrongWithData(ThoughtExperiment):
_solution = \
"""
You can see the data by calling:
```python
# Construct a reference to the "taxi_trips" table
table_ref = dataset_ref.table("taxi_trips")
# API request - fetch the table
table = client.get_table(table_ref)
# Preview the first five lines of the "taxi_trips" table
client.list_rows(table, max_results=5).to_dataframe()
```
Some trips in the top few rows have `trip_seconds` or `trip_miles` values of 0.
Other location fields have values of `None`. That is a problem if we want to use those fields.
"""
# (3)
class YearDistrib(CodingProblem):
_var = 'rides_per_year_result'
def check(self, results):
# check 1: column names
results.columns = [c.lower() for c in results.columns]
assert ('year' in results.columns), ('Your results should have a `year` column. But your columns are {}.'.format(list(results.columns)))
assert ('num_trips' in results.columns), ('Your results should have a `num_trips` column. But your columns are {}.'.format(list(results.columns)))
# check 2: length of dataframe
assert (len(results) == len(rides_per_year_answer)), ("The results don't look right. Try again.")
# check 3: one value in particular
year_to_check = list(rides_per_year_answer["year"])[0]
correct_number = int(rides_per_year_answer.loc[rides_per_year_answer["year"]==year_to_check]["num_trips"].values)
submitted_number = int(results.loc[results["year"]==year_to_check]["num_trips"].values)
assert (correct_number == submitted_number), ("The results don't look right. Try again.")
_hint = "Start your query with `SELECT EXTRACT(YEAR FROM trip_start_timestamp) AS year, COUNT(1) AS num_trips`."
_solution = CS(
"""
rides_per_year_query = \"""
SELECT EXTRACT(YEAR FROM trip_start_timestamp) AS year,
COUNT(1) AS num_trips
FROM `bigquery-public-data.chicago_taxi_trips.taxi_trips`
GROUP BY year
ORDER BY year
\"""
# Set up the query (cancel the query if it would use too much of
# your quota)
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=10**10)
rides_per_year_query_job = client.query(rides_per_year_query, job_config=safe_config)
# API request - run the query, and return a pandas DataFrame
rides_per_year_result = rides_per_year_query_job.to_dataframe()
"""
)
# (4)
class MonthDistrib(CodingProblem):
_var = 'rides_per_month_result'
def check(self, results):
# check 1: column names
results.columns = [c.lower() for c in results.columns]
assert ('month' in results.columns), ('Your results should have a `month` column. But your columns are {}.'.format(list(results.columns)))
# check 2: length of dataframes
assert (len(results) == len(rides_per_month_answer)), ("The results don't look right. Try again.")
# check 3: one value in particular
month_to_check = list(rides_per_month_answer["month"])[0]
correct_number = rides_per_month_answer.loc[rides_per_month_answer["month"]==month_to_check].values[0][1]
submitted_number = results.loc[results["month"]==month_to_check].values[0][1]
assert(correct_number==submitted_number), ("The results don't look right. Try again.")
_hint = "Start your query with `SELECT EXTRACT(MONTH FROM trip_start_timestamp) AS month, COUNT(1) AS num_trips`."
_solution = CS(
"""
rides_per_month_query = \"""
SELECT EXTRACT(MONTH FROM trip_start_timestamp) AS month,
COUNT(1) AS num_trips
FROM `bigquery-public-data.chicago_taxi_trips.taxi_trips`
WHERE EXTRACT(YEAR FROM trip_start_timestamp) = 2017
GROUP BY month
ORDER BY month
\"""
# Set up the query (cancel the query if it would use too much of
# your quota)
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=10**10)
rides_per_month_query_job = client.query(rides_per_month_query, job_config=safe_config)
# API request - run the query, and return a pandas DataFrame
rides_per_month_result = rides_per_month_query_job.to_dataframe()
"""
)
# (5)
class TheLongQuery(CodingProblem):
_var = 'speeds_result'
def check(self, results):
# check 1: check column names
results.columns = [c.lower() for c in results.columns]
assert('hour_of_day' in results.columns), ("Your results should have an `hour_of_day` column.")
assert('num_trips' in results.columns), ("Your results should have an `num_trips` column.")
assert('avg_mph' in results.columns), ("Your results should have an `avg_mph` column.")
# check 2: length of dataframe
assert(results.shape[0] == speeds_answer.shape[0]), ('You should have {} rows in your results.'.format(speeds_answer.shape[0]))
# check 3: particular values
hour_to_check = list(speeds_answer['hour_of_day'])[0]
# check first value
correct_num_trips = speeds_answer.loc[speeds_answer['hour_of_day'] == hour_to_check]['num_trips'].values[0]
user_num_trips = results.loc[results['hour_of_day'] == hour_to_check]['num_trips'].values[0]
assert(correct_num_trips==user_num_trips), ("The results don't look right. Try again.")
# check second value
correct_avg_mph = round(speeds_answer.loc[speeds_answer['hour_of_day'] == hour_to_check]['avg_mph'].values[0], 3)
user_avg_mph = round(results.loc[results['hour_of_day'] == hour_to_check]['avg_mph'].values[0], 3)
assert(correct_avg_mph==user_avg_mph), ("The results don't look right. Try again.")
_solution = CS(
"""
speeds_query = \"""
WITH RelevantRides AS
(
SELECT EXTRACT(HOUR FROM trip_start_timestamp) AS hour_of_day,
trip_miles,
trip_seconds
FROM `bigquery-public-data.chicago_taxi_trips.taxi_trips`
WHERE trip_start_timestamp > '2017-01-01' AND
trip_start_timestamp < '2017-07-01' AND
trip_seconds > 0 AND
trip_miles > 0
)
SELECT hour_of_day,
COUNT(1) AS num_trips,
3600 * SUM(trip_miles) / SUM(trip_seconds) AS avg_mph
FROM RelevantRides
GROUP BY hour_of_day
ORDER BY hour_of_day
\"""
# Set up the query (cancel the query if it would use too much of
# your quota)
safe_config = bigquery.QueryJobConfig(maximum_bytes_billed=10**10)
speeds_query_job = client.query(speeds_query, job_config=safe_config)
# API request - run the query, and return a pandas DataFrame
speeds_result = speeds_query_job.to_dataframe()
# View results
print(speeds_result)
"""
)
class AllRidesInTheMorning(ThoughtExperiment):
_solution = \
"""
The results show rides with hours 1-12. But there should be results in the afternoon (hours 13-24).
Perhaps the raw data has lost the distinction between AM and PM values.
You can review 200 rows of the raw data with the commands:
```python
# Construct a reference to the "taxi_trips" table
table_ref = dataset_ref.table("taxi_trips")
# API request - fetch the table
table = client.get_table(table_ref)
# Preview the first five lines of the "taxi_trips" table
client.list_rows(table, max_results=200).to_dataframe()
```
You'll see that the timestamps are all in the AM hours (hours are less than or equal to 12.)
At first you might worry that the data is coming back sorted by time, but the variety of dates suggests that's not the case.
Part of data science is tracking down exactly this type of problem. If you were in an organization working on this, you could show the evidence you've just collected (e.g. the breakdown of trips by hour) to someone responsible for collecting the data, and help them debug the data collection and storage process.
"""
qvars = bind_exercises(globals(), [
GetTableName,
WhatsWrongWithData,
YearDistrib,
MonthDistrib,
TheLongQuery,
AllRidesInTheMorning
],
var_format='q_{n}',
)
__all__ = list(qvars)
|
selfdrive/can/packer.py | BoneE562/openpilot | 114 | 12622589 | # pylint: skip-file
import os
import subprocess
can_dir = os.path.dirname(os.path.abspath(__file__))
subprocess.check_call(["make", "packer_impl.so"], cwd=can_dir)
from selfdrive.can.packer_impl import CANPacker
assert CANPacker
|
app/demo/countries/migrations/0016_person_smart.py | sesostris/django-material-admin | 270 | 12622590 | # Generated by Django 3.0 on 2019-12-12 20:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('countries', '0015_auto_20190719_1539'),
]
operations = [
migrations.AddField(
model_name='person',
name='smart',
field=models.BooleanField(default=True),
),
]
|
text_extensions_for_pandas/io/conll.py | CODAIT/text-extensions-for-pandas | 193 | 12622592 | <filename>text_extensions_for_pandas/io/conll.py
#
# Copyright (c) 2020 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
# conll.py
"""
The ``io.conll`` module contains I/O functions related to CoNLL-2003 file format and
its derivatives, including CoNLL-U.
"""
from typing import *
import numpy as np
import pandas as pd
import regex
import requests
import os
from zipfile import ZipFile
from text_extensions_for_pandas.array.span import SpanArray
from text_extensions_for_pandas.array.token_span import (
TokenSpan,
TokenSpanArray,
)
# Special token that CoNLL-2003 format uses to delineate the documents in
# the collection.
_CONLL_DOC_SEPARATOR = "-DOCSTART-"
_EWT_DOC_SEPERATOR = "# newdoc id"
# _PUNCT_REGEX = regex.compile(f"[{string.punctuation}]+")
_PUNCT_OR_RIGHT_PAREN_REGEX = regex.compile(
# Punctuation, right paren, or apostrophe followed by 1-2 lowercase letters
# But not single or double quote, which could either begin or end a quotation
"[!#%)*+,-./:;=>?@\\]^_`|}~]|'[a-zA-Z]{1,2}"
)
# Tokens that behave like left parentheses for whitespace purposes,
# including dollar signs ("$100", not "$ 100")
_LEFT_PAREN_REGEX = regex.compile(r"[(<\[{$]+")
# _PUNCT_MATCH_FN = np.vectorize(lambda s: _PUNCT_REGEX.fullmatch(s) is not None)
_SPACE_BEFORE_MATCH_FN = np.vectorize(
lambda s: _PUNCT_OR_RIGHT_PAREN_REGEX.fullmatch(s) is not None
)
_SPACE_AFTER_MATCH_FN = np.vectorize(
lambda s: _LEFT_PAREN_REGEX.fullmatch(s) is not None
)
def default_conll_u_field_names() -> List[str]:
"""
:returns: The default set of field names (not including the required first
two fields) to use when parsing CoNLL-U files.
"""
return [
"lemma",
"upostag",
"xpostag",
"features",
"head",
"deprel",
"deps",
"misc",
]
def default_conll_u_numeric_cols() -> List[str]:
return [
"head",
"line_num",
]
def default_ewt_metadata() -> Dict[str, str]:
"""
:returns: What metadata to log from conllu (especially ewt) files.
This is a dict as follows: tag_in_file -> desired name.
When the tag in the file is seen in a comment, the following value will be stored
and be assumed to apply to all elements in that document.
"""
return {
"sent_id": "sentence_id",
"newpar id": "paragraph_id",
"newdoc id": "doc_id",
}
# Note, Index in sentence is explicit; starts one further long
# for more information see https://universaldependencies.org/docs/format.html
def _make_empty_meta_values(
column_names: List[str], iob_columns: List[bool]
) -> Dict[str, List[Optional[Union[str, int]]]]:
ret = {}
for i in range(len(column_names)):
name = column_names[i]
if i >= len(iob_columns) or not iob_columns[i]:
ret[name] = []
else:
ret[f"{name}_iob"] = []
ret[f"{name}_type"] = []
return ret
class _SentenceData:
"""
Data structure that encapsulates one sentence's worth of data
from a parsed CoNLL-2003 file.
Not intended for use outside this file.
"""
def __init__(
self,
column_names: List[str],
iob_columns: List[bool],
predicate_args: bool,
conllu_metadata_cols: List[str] = None,
):
self._column_names = column_names
self._iob_columns = iob_columns
self._num_standard_cols = len(self._column_names)
# metadata-- init to None
self._token_metadata = None
# Surface form of token
self._tokens = [] # Type: List[str]
# Line numbers for each token from the file
self._line_nums = [] # Type: List[int]
# metadata from conll_u file
self._conllu_metadata = (
dict.fromkeys(conllu_metadata_cols, "")
if conllu_metadata_cols is not None
else None
)
self._conllu_metadata_exists = False
self._conll_09_format = predicate_args
@property
def num_tokens(self) -> int:
return len(self._tokens)
@property
def tokens(self) -> List[str]:
return self._tokens
@property
def token_metadata(self) -> Dict[str, List[str]]:
return self._token_metadata
@property
def line_nums(self):
return self._line_nums
@property
def column_names(self):
return self._column_names
@property
def conll_u_metadata_feilds(self) -> List[str]:
return (
list(self._conllu_metadata.keys())
if self._conllu_metadata is not None
else None
)
@property
def has_conll_u_metadata(self):
return self._conllu_metadata_exists
@property
def conll_09_format(self):
return self._conll_09_format
def set_conll_u_metadata(self, field: str, val: str):
if str != "":
self._conllu_metadata_exists = True
self._conllu_metadata[field] = val
self._update_conllu_metadata_exists()
def set_batch_conll_u_metadata(self, metadata: Dict[str, str]):
assert metadata.keys() <= self._conllu_metadata.keys()
self._conllu_metadata.update(metadata)
self._update_conllu_metadata_exists()
def get_conll_u_metadata(self, field: str) -> str:
return self._conllu_metadata[field]
def _update_conllu_metadata_exists(self):
self._conllu_metadata_exists = any(
[v is not None and v != "" for v in self._conllu_metadata.values()]
)
def _process_line_tags(
self,
raw_tags: List[str],
line_num: int,
line_elems: List[str],
is_conll_u: bool = False,
):
if self._token_metadata is None:
self._token_metadata = _make_empty_meta_values(
self._column_names, self._iob_columns
)
for i in range(len(raw_tags)):
raw_tag = raw_tags[i]
name = self._column_names[i]
if not self._iob_columns[i]:
# non-IOB data
self._token_metadata[name].append(raw_tag)
else:
# IOB-format data; split into two values
if raw_tag.startswith("I-") or raw_tag.startswith("B-"):
# Tokens that are entities are tagged with tags like
# "I-PER" or "B-MISC".
tag, entity = raw_tag.split("-")
elif raw_tag == "O":
tag = raw_tag
entity = None
elif (not is_conll_u) and raw_tag == "-X-":
# Special metadata value for -DOCSTART- tags in the CoNLL corpus.
tag = "O"
entity = None
else:
raise ValueError(
f"Tag '{raw_tag}' of IOB-format field {i} at line "
f"{line_num} does not start with 'I-', 'O', "
f"or 'B-'.\n"
f"Fields of line are: {line_elems}"
)
self._token_metadata[f"{name}_iob"].append(tag)
self._token_metadata[f"{name}_type"].append(entity)
def add_line(self, line_num: int, line_elems: List[str]):
"""
:param line_num: Location in file, for error reporting
:param line_elems: Fields of a line, pre-split
"""
if len(line_elems) != 1 + len(self._column_names):
raise ValueError(
f"Unexpected number of elements {len(line_elems)} "
f"at line {line_num}; expected "
f"{1 + len(self._column_names)} elements."
)
token = line_elems[0]
raw_tags = line_elems[1:]
self._tokens.append(token)
self._line_nums.append(line_num)
self._process_line_tags(raw_tags, line_num, line_elems, is_conll_u=False)
def add_line_conllu(self, line_num: int, line_elems: List[str]):
"""
Similar to add_line, but handles additional logic for conllu files.
This includes the additional ignored entries on the left for word indexes within
:param line_num: Location in file, for error reporting
:param line_elems: Fields of a line, pre-split
"""
if len(line_elems) < 2 + len(self._column_names):
if len(line_elems) >= 2 + self._num_standard_cols:
line_elems.extend(
["_" for _ in range(2 + len(self._column_names) - len(line_elems))]
)
else:
raise ValueError(
f"Unexpected number of elements {len(line_elems)} "
f"at line {line_num}; expected "
f"{2 + len(self._column_names)} elements, "
f"got {len(line_elems)} instead."
f" min_num: {self._num_standard_cols}"
f"\nline reads: '{' '.join(line_elems) }'"
)
if (
len(line_elems) > 2 + len(self._column_names)
and self._conll_09_format
and self.num_tokens == 0
):
# only modify once per sentence
additional_lines = len(line_elems) - (3 + len(self._column_names))
self._column_names.append("predicate")
addnl_col_names = [f"pred{i}arg" for i in range(additional_lines)]
self._column_names.extend(addnl_col_names)
self._iob_columns.extend([False for _ in range(additional_lines + 1)])
# print(f"found Conll9 format. Added{additional_lines} columns. cols are now {self._column_names}")
assert len(self._column_names) + 2 == len(line_elems)
token = line_elems[1]
raw_tags = line_elems[2:len(self._column_names) + 2]
raw_tags = [None if tag == "_" else tag for tag in raw_tags]
self._tokens.append(token)
self._line_nums.append(line_num)
# because we do not combine
self._process_line_tags(raw_tags, line_num, line_elems, is_conll_u=True)
def _parse_conll_file(
input_file: str, column_names: List[str], iob_columns: List[bool]
) -> List[List[_SentenceData]]:
"""
Parse the CoNLL-2003 file format for training/test data to Python objects.
The format is especially tricky, so everything here is straight non-vectorized
Python code. If you want performance, write the contents of your CoNLL files back
out into a file format that supports performance.
:param input_file: Location of the file to read
:param column_names: Names for the metadata columns that come after the
token text. These names will be used to generate the names of the dataframe
that this function returns.
:param iob_columns: Mask indicating which of the metadata columns after the
token text should be treated as being in IOB format. If a column is in IOB format,
the returned data structure will contain *two* columns, holding IOB tags and
entity type tags, respectively. For example, an input column "ent" will turn into
output columns "ent_iob" and "ent_type".
:returns: A list of lists of _SentenceData objects. The top list has one entry per
document. The next level lists have one entry per sentence.
"""
with open(input_file, "r") as f:
lines = f.readlines()
# Build up a list of document metadata as Python objects
docs = [] # Type: List[List[Dict[str, List[str]]]]
current_sentence = _SentenceData(column_names, iob_columns, False)
# Information about the current document
sentences = [] # Type: SentenceData
for i in range(len(lines)):
line = lines[i].strip()
if 0 == len(line):
# Blank line is the sentence separator
if current_sentence.num_tokens > 0:
sentences.append(current_sentence)
current_sentence = _SentenceData(column_names, iob_columns, False)
else:
# Not at the end of a sentence
line_elems = line.split(" ")
current_sentence.add_line(i, line_elems)
if line_elems[0] == _CONLL_DOC_SEPARATOR and i > 0:
# End of document. Wrap up this document and start a new one.
#
# Note that the special "start of document" token is considered part
# of the document. If you do not follow this convention, the
# result sets from CoNLL 2003 won't line up.
# Note also that `current_sentence` is not in `sentences` and will be
# added to the next document.
docs.append(sentences)
sentences = []
# Close out the last sentence and document, if needed
if current_sentence.num_tokens > 0:
sentences.append(current_sentence)
if len(sentences) > 0:
docs.append(sentences)
return docs
def _parse_conll_u_file(
input_file: str,
column_names: List[str],
iob_columns: List[bool],
predicate_args: bool = True,
merge_subtokens: bool = False,
merge_subtoken_separator: str = "|",
metadata_fields: Dict[str, str] = None,
doc_seperator=_EWT_DOC_SEPERATOR
) -> List[List[_SentenceData]]:
"""
The format is especially tricky, so everything here is straight non-vectorized Python
code. If you want performance, write the contents of your CoNLL files back out into a
file format that supports performance.
:param input_file: Location of the file to read
:param column_names: Names for the metadata columns that come after the
token text. These names will be used to generate the names of the dataframe
that this function returns.
:param iob_columns: Mask indicating which of the metadata columns after the
token text should be treated as being in IOB format. If a column is in IOB format,
the returned data structure will contain *two* columns, holding IOB tags and
entity type tags, respectively. For example, an input column "ent" will turn into
output columns "ent_iob" and "ent_type".
:param predicate_args: whether or not predicate arguments are stored in this file
format.
:param metadata_fields: Optional. The types of metadata fields you want to store
from the document. Stored in the form of dictionary: tag_in_text -> "pretty" tag
(i.e. what you want to show in the output).
If no value is provided, then the return value of :func:`default_ewt_metadata()`
will be used.
:returns: A list of lists of _SentenceData objects. The top list has one entry per
document. The next level lists have one entry per sentence.
"""
if metadata_fields is None:
metadata_fields = default_ewt_metadata()
with open(input_file, "r") as f:
lines = f.readlines()
# Build up a list of document metadata as Python objects
docs = [] # Type: List[List[Dict[str, List[str]]]]
# metadata specific to conll_u
metadata_names = list(metadata_fields.values())
u_metadata = dict.fromkeys(metadata_names, "")
current_sentence = _SentenceData(
column_names.copy(), iob_columns.copy(), predicate_args, metadata_names
)
# Information about the current document
sentences = [] # Type: SentenceData
# if we merge subtokens we need additional logic
in_subtok = False # set this flag when inside of subtoken
subtok_end = None # only valid when in subtok
for i in range(len(lines)):
line = lines[i].strip()
if 0 == len(line):
# Blank line is the sentence separator
if current_sentence.num_tokens > 0:
sentences.append(current_sentence)
current_sentence = _SentenceData(
column_names.copy(),
iob_columns.copy(),
predicate_args,
metadata_names,
)
current_sentence.set_batch_conll_u_metadata(u_metadata)
elif line[0] == "#":
line_elems = line.split(" = ")
if line_elems[0] == doc_seperator:
if i > 0 and len(sentences) > 0:
# End of document. Wrap up this document and start a new one.
#
docs.append(sentences)
sentences = []
# reset doc, paragraph and sentence id's
# now check for metadata
line_elems[0] = line_elems[0].strip("# ")
if line_elems[0] in metadata_fields.keys():
key = metadata_fields[line_elems[0]]
current_sentence.set_conll_u_metadata(key, line_elems[1])
u_metadata[key] = line_elems[1]
elif not in_subtok:
# Not at the end of a sentence, or in a subtok
line_elems = line.split("\t")
# Ignore multi-word tokens for now; just use word sequence; may want to change, but we'd need to
# interpret each sub-word's info
if "-" not in line_elems[0]: # checks if has range
current_sentence.add_line_conllu(i, line_elems)
elif merge_subtokens:
in_subtok = True
# find start and end of range
start, end = line_elems[0].split("-")
subtok_end = (
int(end) - int(start) + i + 1
) # the end (inclusive) of subtoken, by global index
comb_elem_list = [[] for i in range(len(line_elems))]
for subtoken in lines[i + 1:subtok_end + 1]:
subtok_elems = subtoken.split("\t")
for field in range(2, len(line_elems)):
if subtok_elems[field] != "_":
comb_elem_list[field].append(subtok_elems[field])
combined_elems = line_elems[0:2] # first line is the same
for elem_list in comb_elem_list[2:]:
combined_elems.append(merge_subtoken_separator.join(elem_list))
current_sentence.add_line_conllu(i, combined_elems)
if in_subtok and i >= subtok_end:
in_subtok = False
subtok_end = None
# Close out the last sentence and document, if needed
if current_sentence.num_tokens > 0:
sentences.append(current_sentence)
if len(sentences) > 0:
docs.append(sentences)
return docs
def _parse_conll_output_file(
doc_dfs: List[pd.DataFrame], input_file: str
) -> List[Dict[str, List[str]]]:
"""
Parse the CoNLL-2003 file format for output data to Python
objects. This format is similar to the format that `_parse_conll_file`
produces, but without the token and document boundary information.
:param doc_dfs: List of `pd.DataFrame`s of token information from the
corresponding training data file, one `DataFrame` per document.
Used for determining document boundaries, which are not encoded in
CoNLL-2003 output file format.
:param input_file: Location of the file to read
:returns: A list of dicts. The top list has one entry per
document. The next level contains lists under the following keys:
* `iob`: List of IOB2 tags as strings. This function does **NOT**
correct for the silly way that CoNLL-format uses "B" tags. See
`_fix_iob_tags()` for that correction.
* `entity`: List of entity tags where `iob` contains I's or B's.
`None` everywhere else.
"""
with open(input_file, "r") as f:
lines = f.readlines()
# Build up a list of document metadata as Python objects
docs = [] # Type: List[Dict[str, List[str]]]
# Position in the corpus
doc_num = 0
num_tokens_in_doc = len(doc_dfs[doc_num].index)
token_num = 0
# Information about the current document's tokens
iobs = [] # Type: List[str]
entities = [] # Type: List[str]
for i in range(len(lines)):
line = lines[i].strip()
if 0 == len(line):
# Blank line is the sentence separator.
continue
if " " in line:
raise ValueError(
f"Line {i} contains unexpected space character.\n" f"Line was: '{line}'"
)
raw_tag = line
if raw_tag.startswith("I") or raw_tag.startswith("B"):
# Tokens that are entities are tagged with tags like
# "I-PER" or "B-MISC".
tag, entity = raw_tag.split("-")
elif raw_tag == "O":
tag = raw_tag
entity = None
else:
raise ValueError(
f"Unexpected tag {raw_tag} at line {i}.\n" f"Line was: '{line}'"
)
iobs.append(tag)
entities.append(entity)
token_num += 1
if token_num == num_tokens_in_doc:
# End of current document, advance to next
docs.append({"iob": iobs, "entity": entities})
iobs = []
entities = []
doc_num += 1
token_num = 0
if doc_num < len(doc_dfs):
num_tokens_in_doc = len(doc_dfs[doc_num].index)
if doc_num < len(doc_dfs):
print(
f"WARNING: Corpus has {len(doc_dfs)} documents, but "
f"only found outputs for {doc_num} of them."
)
# raise ValueError(f"Corpus has {len(doc_dfs)} documents, but "
# f"only found outputs for {doc_num} of them.")
return docs
def _iob_to_iob2(
df: pd.DataFrame, column_names: List[str], iob_columns: List[bool]
) -> pd.DataFrame:
"""
In CoNLL-2003 format, entities are stored in IOB format, where the first
token of an entity is only tagged "B" when there are two entities of the
same type back-to-back. This format makes downstream processing difficult.
If a given position has an `I` tag, that position may or may not be the
first token of an entity. Code will need to inspect both the I/O/B tags
*and* the entity type of multiple other tokens *and* the boundaries between
sentences to disambiguate between those two cases.
This function converts these IOB tags to the easier-to-consume IOB2 format;
see
https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging)
for details. Basically, every entity in IOB2 format begins with a `B` tag.
The `I` tag is only used for the second, third, etc. tokens of an entity.
:param df: A `pd.DataFrame` with one row per token of the document.
In addition to the metadata columns corresponding to `column_names`, this
dataframe must also contain sentence information in a column called `sentence`.
:param column_names: Names for the metadata columns in the original data file
that were used to generate the names of the columns of `df`.
:param iob_columns: Mask indicating which of the metadata columns after the
token text should be treated as being in IOB format.
:returns: A version of `df` with corrected IOB2 tags in the `ent_iob`
column. The original dataframe is not modified.
"""
ret = df.copy()
sentence_begins = df["sentence"].values.begin_token
for col_num in range(len(iob_columns)):
if iob_columns[col_num]:
name = column_names[col_num]
iobs = df[f"{name}_iob"].values.copy() # Modified in place
entities = df[f"{name}_type"].values
# Special-case the first one
if iobs[0] == "I":
iobs[0] = "B"
for iob_num in range(1, len(iobs)):
tag = iobs[iob_num]
prev_tag = iobs[iob_num - 1]
if tag == "I":
if (
prev_tag == "O" # Previous token not an entity
or (
prev_tag in ("I", "B")
and entities[iob_num] != entities[iob_num - 1]
) # Previous token a different type of entity
or (
sentence_begins[iob_num] != sentence_begins[iob_num - 1]
) # Start of new sentence
):
iobs[iob_num] = "B"
ret[f"{name}_iob"] = iobs
return ret
def _doc_to_df(
doc: List[_SentenceData],
column_names: List[str],
iob_columns: List[bool],
space_before_punct: bool,
conll_u: bool = False,
) -> pd.DataFrame:
"""
Convert the "Python objects" representation of a document from a
CoNLL-2003 file into a `pd.DataFrame` of token metadata.
:param doc: List of Python objects that represents the document.
:param column_names: Names for the metadata columns that come after the
token text. These names will be used to generate the names of the dataframe
that this function returns.
:param iob_columns: Mask indicating which of the metadata columns after the
token text should be treated as being in IOB format. If a column is in IOB format,
the returned dataframe will contain *two* columns, holding IOB2 tags and
entity type tags, respectively. For example, an input column "ent" will turn into
output columns "ent_iob" and "ent_type".
:param space_before_punct: If `True`, add whitespace before
punctuation characters (and after left parentheses)
when reconstructing the text of the document.
:return: DataFrame with four columns:
* `span`: Span of each token, with character offsets.
Backed by the concatenation of the tokens in the document into
a single string with one sentence per line.
* `ent_iob`: IOB2-format tags of tokens, exactly as they appeared
in the original file, with no corrections applied.
* `ent_type`: Entity type names for tokens tagged "I" or "B" in
the `ent_iob` column; `None` everywhere else.
* `line_num`: line number of each token in the parsed file
"""
# Character offsets of tokens in the reconstructed document
begins_list = [] # Type: List[np.ndarray]
ends_list = [] # Type: List[np.ndarray]
# Reconstructed text of each sentence
sentences_list = [] # Type: List[np.ndarray]
# Token offsets of sentences containing each token in the document.
sentence_begins_list = [] # Type: List[np.ndarray]
sentence_ends_list = [] # Type: List[np.ndarray]
# conll_u metadata information.
conll_u_ids_exsist = doc is not None and len(doc) != 0 and doc[0].has_conll_u_metadata
conll_2009_format = doc is not None and len(doc) != 0 and doc[0].conll_09_format
# this should be the same for all sentences so we check the first
if conll_2009_format:
max_list = max(doc, key=lambda sent: len(sent.column_names)).column_names
if len(max_list) > len(column_names):
column_names = max_list
# Token metadata column values. Key is column name, value is metadata for
# each token.
if conll_u_ids_exsist:
meta_lists = _make_empty_meta_values(
column_names + doc[0].conll_u_metadata_feilds, iob_columns
)
else:
meta_lists = _make_empty_meta_values(column_names, iob_columns)
# Line numbers of the parsed file for each token in the doc
doc_line_nums = []
char_position = 0
token_position = 0
for sentence_num in range(len(doc)):
sentence = doc[sentence_num]
tokens = sentence.tokens
# Don't put spaces before punctuation in the reconstituted string.
no_space_before_mask = (
np.zeros(len(tokens), dtype=bool)
if space_before_punct
else _SPACE_BEFORE_MATCH_FN(tokens)
)
no_space_after_mask = (
np.zeros(len(tokens), dtype=bool)
if space_before_punct
else _SPACE_AFTER_MATCH_FN(tokens)
)
no_space_before_mask[0] = True # No space before first token
no_space_after_mask[-1] = True # No space after last token
shifted_no_space_after_mask = np.roll(no_space_after_mask, 1)
prefixes = np.where(
np.logical_or(no_space_before_mask, shifted_no_space_after_mask), "", " "
)
string_parts = np.ravel((prefixes, tokens), order="F")
sentence_text = "".join(string_parts)
sentences_list.append(sentence_text)
lengths = np.array([len(t) for t in tokens])
prefix_lengths = np.array([len(p) for p in prefixes])
# Begin and end offsets, accounting for which tokens have spaces
# before them.
e = np.cumsum(lengths + prefix_lengths)
b = e - lengths
begins_list.append(b + char_position)
ends_list.append(e + char_position)
sentence_begin_token = token_position
sentence_end_token = token_position + len(e)
sentence_begins = np.repeat(sentence_begin_token, len(e))
sentence_ends = np.repeat(sentence_end_token, len(e))
sentence_begins_list.append(sentence_begins)
sentence_ends_list.append(sentence_ends)
for k in meta_lists.keys():
if k in sentence.token_metadata.keys():
meta_lists[k].extend(sentence.token_metadata[k])
elif conll_u_ids_exsist and k in sentence.conll_u_metadata_feilds:
data = sentence.get_conll_u_metadata(k)
meta_lists[k].extend([data for _ in range(sentence.num_tokens)])
else:
meta_lists[k].extend([None for _ in range(sentence.num_tokens)])
char_position += e[-1] + 1 # "+ 1" to account for newline
token_position += len(e)
doc_line_nums.extend(sentence.line_nums)
# move "head" indices so they point at the right words
if conll_u and "head" in column_names:
for i in range(sentence_begin_token, sentence_end_token):
val = meta_lists["head"][i]
if val is not None:
points_to = int(val)
meta_lists["head"][i] = (
points_to + sentence_begin_token - 1 if points_to != 0 else -1
)
begins = np.concatenate(begins_list)
ends = np.concatenate(ends_list)
doc_text = "\n".join(sentences_list)
char_spans = SpanArray(doc_text, begins, ends)
sentence_spans = TokenSpanArray(
char_spans,
np.concatenate(sentence_begins_list),
np.concatenate(sentence_ends_list),
)
ret = pd.DataFrame({"span": char_spans})
for k, v in meta_lists.items():
ret[k] = v
ret["sentence"] = sentence_spans
ret["line_num"] = pd.Series(doc_line_nums)
if conll_u and "head" in column_names:
ret = ret.astype({"head": "Int64"}, errors="ignore")
ret.loc[ret["head"] == -1, "head"] = pd.NA
return ret
def _output_doc_to_df(
tokens: pd.DataFrame,
outputs: Dict[str, List[str]],
column_name: str,
copy_tokens: bool,
) -> pd.DataFrame:
"""
Convert the "Python objects" representation of a document from a
CoNLL-2003 file into a `pd.DataFrame` of token metadata.
:param tokens: `pd.DataFrame` containing metadata about the tokens
of this document, as returned by `conll_2003_to_dataframe`
:param outputs: Dictionary containing outputs for this document,
with fields "iob" and "entity".
:param column_name: Name for the metadata value that the IOB-tagged data
in `input_file` encodes. If this name is present in `doc_dfs`, its value
will be replaced with the data from `input_file`; otherwise a new column
will be added to each dataframe.
:param copy_tokens: `True` if token information should be deep-copied.
:return: DataFrame with four columns:
* `span`: Span of each token, with character offsets.
Backed by the concatenation of the tokens in the document into
a single string with one sentence per line.
* `ent_iob`: IOB2-format tags of tokens, corrected so that every
entity begins with a "B" tag.
* `ent_type`: Entity type names for tokens tagged "I" or "B" in
the `ent_iob` column; `None` everywhere else.
"""
if copy_tokens:
return pd.DataFrame(
{
"span": tokens["span"].copy(),
f"{column_name}_iob": np.array(outputs["iob"]),
f"{column_name}_type": np.array(outputs["entity"]),
"sentence": tokens["sentence"].copy(),
}
)
else:
return pd.DataFrame(
{
"span": tokens["span"],
f"{column_name}_iob": np.array(outputs["iob"]),
f"{column_name}_type": np.array(outputs["entity"]),
"sentence": tokens["sentence"],
}
)
#####################################################
# External API functions below this line
def iob_to_spans(
token_features: pd.DataFrame,
iob_col_name: str = "ent_iob",
span_col_name: str = "span",
entity_type_col_name: str = "ent_type",
):
"""
Convert token tags in Inside–Outside–Beginning (IOB2) format to a series of
:class:`TokenSpan` objects of entities. See See wikipedia_ for more information
on the IOB2 format.
.. _wikipedia: https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging)
:param token_features: DataFrame of token features in the format returned by
:func:`make_tokens_and_features`.
:param iob_col_name: Name of a column in ``token_features`` that contains the
IOB2 tags as strings, "I", "O", or "B".
:param span_col_name: Name of a column in ``token_features`` that
contains the tokens as a :class:`SpanArray`.
:param entity_type_col_name: Optional name of a column in ``token_features``
that contains entity type information; or ``None`` if no such column exists.
:returns: A :class:`pd.DataFrame` with the following columns:
* ``span``: Span (with token offsets) of each entity
* ``<value of entity_type_col_name>``: (optional) Entity type
"""
# Start out with 1-token prefixes of all entities.
begin_mask = token_features[iob_col_name] == "B"
first_tokens = token_features[begin_mask].index
if entity_type_col_name is None:
entity_types = np.zeros(len(first_tokens))
else:
entity_types = token_features[begin_mask][entity_type_col_name]
# Add an extra "O" tag to the end of the IOB column to simplify the logic
# for handling the case where the document ends with an entity.
iob_series = (
token_features[iob_col_name].append(pd.Series(["O"])).reset_index(drop=True)
)
entity_prefixes = pd.DataFrame(
{
"ent_type": entity_types,
"begin": first_tokens, # Inclusive
"end": first_tokens + 1, # Exclusive
"next_tag": iob_series.iloc[first_tokens + 1].values,
}
)
df_list = [] # Type: pd.DataFrame
if len(entity_prefixes.index) == 0:
# Code below needs at least one element in the list for schema
df_list = [entity_prefixes]
# Iteratively expand the prefixes
while len(entity_prefixes.index) > 0:
complete_mask = entity_prefixes["next_tag"].isin(["O", "B"])
complete_entities = entity_prefixes[complete_mask]
incomplete_entities = entity_prefixes[~complete_mask].copy()
incomplete_entities["end"] = incomplete_entities["end"] + 1
incomplete_entities["next_tag"] = iob_series.iloc[
incomplete_entities["end"]
].values
df_list.append(complete_entities)
entity_prefixes = incomplete_entities
all_entities = pd.concat(df_list)
# Sort spans by location, not length.
all_entities.sort_values("begin", inplace=True)
# Convert [begin, end) pairs to spans
entity_spans_array = TokenSpanArray(
token_features[span_col_name].values,
all_entities["begin"].values,
all_entities["end"].values,
)
if entity_type_col_name is None:
return pd.DataFrame({"span": entity_spans_array})
else:
return pd.DataFrame(
{
"span": entity_spans_array,
entity_type_col_name: all_entities["ent_type"].values,
}
)
def spans_to_iob(
token_spans: Union[TokenSpanArray, List[TokenSpan], pd.Series],
span_ent_types: Union[str, Iterable, np.ndarray, pd.Series] = None,
) -> pd.DataFrame:
"""
Convert a series of :class:`TokenSpan` objects of entities to token tags in
Inside–Outside–Beginning (IOB2) format. See wikipedia_ for more information
on the IOB2 format.
.. _wikipedia: https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging)
:param token_spans: An object that can be converted to a :class:`TokenSpanArray` via
:func:`TokenSpanArray.make_array`. Should contain :class:`TokenSpan` objects
aligned with the target tokenization. All spans must be from the same document.
Usually you create this array by calling :func:`TokenSpanArray.align_to_tokens`.
:param span_ent_types: List of entity type strings corresponding to each of the
elements of ``token_spans``, or ``None`` to indicate null entity tags.
:returns: A :class:`pd.DataFrame` with two columns:
* "ent_iob": IOB2 tags as strings "ent_iob"
* "ent_type": Entity type strings (or NaN values if ``ent_types`` is ``None``)
"""
# Normalize inputs
token_spans = TokenSpanArray.make_array(token_spans)
if span_ent_types is None:
span_ent_types = [None] * len(token_spans)
elif isinstance(span_ent_types, str):
span_ent_types = [span_ent_types] * len(token_spans)
elif isinstance(span_ent_types, pd.Series):
span_ent_types = span_ent_types.values
# Define the IOB categorical type with "O" == 0, "B"==1, "I"==2
iob2_dtype = pd.CategoricalDtype(["O", "B", "I"], ordered=False)
# Handle an empty token span array
if len(token_spans) == 0:
return pd.DataFrame(
{
"ent_iob": pd.Series(dtype=iob2_dtype),
"ent_type": pd.Series(dtype="string"),
}
)
# All code that follows assumes at least one input span. All spans should
# be from the same document; otherwise there isn't a meaningful IOB
# representation of the entities.
if not token_spans.is_single_tokenization:
raise ValueError(
f"All input spans must be from the same tokenization of "
f"the same document "
f"(spans are {token_spans})"
)
tokens = token_spans.tokens[0]
# Initialize an IOB series with all 'O' entities
iob_data = np.zeros_like(tokens.begin, dtype=np.int64)
iob_tags = pd.Categorical.from_codes(codes=iob_data, dtype=iob2_dtype)
# Assign the begin tags
iob_tags[token_spans.begin_token] = "B"
# Fill in the remaining inside tags
i_lengths = token_spans.end_token - (token_spans.begin_token + 1)
i_mask = i_lengths > 0
i_begins = token_spans.begin_token[i_mask] + 1
i_ends = token_spans.end_token[i_mask]
for begin, end in zip(i_begins, i_ends):
iob_tags[begin:end] = "I"
# Use a similar process to generate entity type tags
ent_types = np.full(len(tokens), np.object_(None), dtype=object)
for ent_type, begin, end in zip(
span_ent_types, token_spans.begin_token, token_spans.end_token
):
ent_types[begin:end] = ent_type
return pd.DataFrame(
{"ent_iob": iob_tags, "ent_type": pd.Series(ent_types, dtype="string")}
)
def conll_2003_to_dataframes(
input_file: str,
column_names: List[str],
iob_columns: List[bool],
space_before_punct: bool = False,
) -> List[pd.DataFrame]:
"""
Parse a file in CoNLL-2003 training/test format into a DataFrame.
CoNLL-2003 training/test format looks like this::
-DOCSTART- -X- -X- O
CRICKET NNP I-NP O
- : O O
LEICESTERSHIRE NNP I-NP I-ORG
TAKE NNP I-NP O
OVER IN I-PP O
AT NNP I-NP O
Note the presence of the surface forms of tokens at the beginning
of the lines.
:param input_file: Location of input file to read.
:param space_before_punct: If ``True``, add whitespace before
punctuation characters when reconstructing the text of the document.
:param column_names: Names for the metadata columns that come after the
token text. These names will be used to generate the names of the dataframe
that this function returns.
:param iob_columns: Mask indicating which of the metadata columns after the
token text should be treated as being in IOB format. If a column is in IOB format,
the returned dataframe will contain *two* columns, holding **IOB2** tags and
entity type tags, respectively. For example, an input column "ent" will turn into
output columns "ent_iob" and "ent_type".
:returns: A list containing, for each document in the input file,
a separate :class:`pd.DataFrame` of four columns:
* **span**: Span of each token, with character offsets.
Backed by the concatenation of the tokens in the document into
a single string with one sentence per line.
* **ent_iob** IOB2-format tags of tokens, corrected so that every
entity begins with a "B" tag.
* **ent_type**: Entity type names for tokens tagged "I" or "B" in
the `ent_iob` column; `None` everywhere else.
"""
parsed_docs = _parse_conll_file(input_file, column_names, iob_columns)
doc_dfs = [
_doc_to_df(d, column_names, iob_columns, space_before_punct)
for d in parsed_docs
]
return [_iob_to_iob2(d, column_names, iob_columns) for d in doc_dfs]
def conll_u_to_dataframes(
input_file: str,
column_names: List[str] = None,
iob_columns: List[bool] = None,
has_predicate_args: bool = True,
space_before_punct: bool = False,
merge_subtokens: bool = False,
merge_subtoken_separator: str = "|",
numeric_cols: List[str] = None,
metadata_fields: Dict[str, str] = None,
separate_sentences_by_doc: bool = False
) -> List[pd.DataFrame]:
"""
Parses a file from
:param input_file: Location of input file to read.
:param space_before_punct: If `True`, add whitespace before
punctuation characters when reconstructing the text of the document.
:param column_names: Optional. Names for the metadata columns that come after the
token text. These names will be used to generate the names of the dataframe
that this function returns.
If no value is provided, these default to the list returned by
:func:`default_conll_u_field_names`, which is also the format defined at
https://universaldependencies.org/docs/format.html.
:param iob_columns: Mask indicating which of the metadata columns after the
token text should be treated as being in IOB format. If a column is in IOB format,
the returned dataframe will contain *two* columns, holding **IOB2** tags and
entity type tags, respectively. For example, an input column "ent" will turn into
output columns "ent_iob" and "ent_type". By default in CONLL_U or EWT formats this
is all false.
:param has_predicate_args: Whether or not the file format includes predicate args.
True by default, and should support most files in the conllu format, but will assume
that any tabs in the last element are additional predicate arguments
:param merge_subtokens: dictates how to handle tokens that are smaller than one word.
By default, we keep the subtokens as two seperate entities, but if this is set to
``True``, the subtokens will be merged into a single entity, of the same length as
the token, and their attributes will be concatenated
:param merge_subtoken_separator: If merge subtokens is selected, concatenate the
attributes with this separator, by default '|'
:param numeric_cols: Optional: Names of numeric columns drawn from `column_names`,
plus the default "built-in" column name `line_num`.
Any column whose name is in this list will be considered to hold numeric values.
Column names not present in the `column_names` argument will be ignored.
If no value is provided, then the return value of
:func:`default_conll_u_numeric_cols` will be used.
:param metadata_fields: Optional. Types of metadata fields you want to store from the
document, in the form of a dictionary: tag_in_text -> "pretty" tag (i.e. what you
want to show in the output).
If no value is provided, then the return value of :func:`default_ewt_metadata()`
will be used.
:param separate_sentences_by_doc: Optional. If ``False`` (the default behavior),
use the document boundaries defined in the CoNLL-U file. If ``True``, then treat
each sentence in the input file as a separate document.
:returns: A list containing, for each document in the input file,
a separate `pd.DataFrame` of four columns:
* `span`: Span of each token, with character offsets.
Backed by the concatenation of the tokens in the document into
a single string with one sentence per line.
* `ent_iob`: IOB2-format tags of tokens, corrected so that every
entity begins with a "B" tag.
* `ent_type`: Entity type names for tokens tagged "I" or "B" in
the `ent_iob` column; `None` everywhere else.
"""
# Fill in default values
if column_names is None:
column_names = default_conll_u_field_names()
if iob_columns is None:
iob_columns = [False] * len(column_names)
# fill with falses if not specified
if metadata_fields is None:
metadata_fields = default_ewt_metadata()
if numeric_cols is None:
numeric_cols = default_conll_u_numeric_cols()
#
split_doc_by = "# text" if separate_sentences_by_doc else _EWT_DOC_SEPERATOR
parsed_docs = _parse_conll_u_file(
input_file,
column_names,
iob_columns,
has_predicate_args,
merge_subtokens=merge_subtokens,
merge_subtoken_separator=merge_subtoken_separator,
metadata_fields=metadata_fields,
doc_seperator=split_doc_by
)
doc_dfs = [
_doc_to_df(d, column_names, iob_columns, space_before_punct, conll_u=True)
for d in parsed_docs
]
ret = [_iob_to_iob2(d, column_names, iob_columns) for d in doc_dfs]
for d in ret:
for col in numeric_cols:
if col in d:
d[col] = pd.to_numeric(d[col], errors="coerce")
return ret
def conll_2003_output_to_dataframes(
doc_dfs: List[pd.DataFrame],
input_file: str,
column_name: str = "ent",
copy_tokens: bool = False,
) -> List[pd.DataFrame]:
"""
Parse a file in CoNLL-2003 output format into a DataFrame.
CoNLL-2003 output format looks like this::
O
O
I-LOC
O
O
I-PER
I-PER
Note the lack of any information about the tokens themselves. Note
also the lack of any information about document boundaries.
:param doc_dfs: List of :class:`pd.DataFrame`s of token information, as
returned by :func:`conll_2003_to_dataframes`. This is needed because
CoNLL-2003 output format does not include any information about
document boundaries.
:param input_file: Location of input file to read.
:param column_name: Name for the metadata value that the IOB-tagged data
in ``input_file`` encodes. If this name is present in ``doc_dfs``, its value
will be replaced with the data from ``input_file``; otherwise a new column
will be added to each dataframe.
:param copy_tokens: If ``True``, deep-copy token series from the
elements of `doc_dfs` instead of using pointers.
:returns: A list containing, for each document in the input file,
a separate :class:`pd.DataFrame` of four columns:
* **span**: Span of each token, with character offsets.
Backed by the concatenation of the tokens in the document into
a single string with one sentence per line.
* **token_span**: Span of each token, with token offsets.
Backed by the contents of the `span` column.
* **<column_name>_iob**: IOB2-format tags of tokens, corrected so that every
entity begins with a "B" tag.
* **<column_name>_type**: Entity type names for tokens tagged "I" or "B" in
the ``<column_name>_iob`` column; ``None`` everywhere else.
"""
docs_list = _parse_conll_output_file(doc_dfs, input_file)
return [
_iob_to_iob2(
_output_doc_to_df(tokens, outputs, column_name, copy_tokens),
[column_name],
[True],
)
for tokens, outputs in zip(doc_dfs, docs_list)
]
def make_iob_tag_categories(
entity_types: List[str],
) -> Tuple[pd.CategoricalDtype, List[str], Dict[str, int]]:
"""
Enumerate all the possible token categories for combinations of
IOB tags and entity types (for example, ``I + "PER" ==> "I-PER"``).
Generate a consistent mapping from these strings to integers.
:param entity_types: Allowable entity type strings for the corpus
:returns: A triple of:
* Pandas CategoricalDtype
* mapping from integer to string label, as a list. This mapping is guaranteed
to be consistent with the mapping in the Pandas CategoricalDtype in the first
return value.
* mapping string label to integer, as a dict; the inverse of the second return
value.
"""
int_to_label = ["O"] + [f"{x}-{y}" for x in ["B", "I"] for y in entity_types]
label_to_int = {int_to_label[i]: i for i in range(len(int_to_label))}
token_class_dtype = pd.CategoricalDtype(categories=int_to_label)
return token_class_dtype, int_to_label, label_to_int
def add_token_classes(
token_features: pd.DataFrame,
token_class_dtype: pd.CategoricalDtype = None,
iob_col_name: str = "ent_iob",
entity_type_col_name: str = "ent_type",
) -> pd.DataFrame:
"""
Add additional columns to a dataframe of IOB-tagged tokens containing composite
string and integer category labels for the tokens.
:param token_features: Dataframe of tokens with IOB tags and entity type strings
:param token_class_dtype: Optional Pandas categorical dtype indicating how to map
composite tags like `I-PER` to integer values.
You can use :func:`make_iob_tag_categories` to generate this dtype.
If this parameter is not provided, this function will use an arbitrary mapping
using the values that appear in this dataframe.
:param iob_col_name: Optional name of a column in `token_features` that contains the
IOB2 tags as strings, "I", "O", or "B".
:param entity_type_col_name: Optional name of a column in `token_features`
that contains entity type information; or `None` if no such column exists.
:returns: A copy of `token_features` with two additional columns, `token_class`
(string class label) and `token_class_id` (integer label).
If `token_features` contains columns with either of these names, those columns will
be overwritten in the returned copy of `token_features`.
"""
if token_class_dtype is None:
empty_mask = token_features[entity_type_col_name].isna() | (
token_features[entity_type_col_name] == ""
)
token_class_type, _, label_to_int = make_iob_tag_categories(
list(token_features[~empty_mask][entity_type_col_name].unique())
)
else:
label_to_int = {
token_class_dtype.categories[i]: i
for i in range(len(token_class_dtype.categories))
}
elems = [] # Type: str
for index, row in token_features[[iob_col_name, entity_type_col_name]].iterrows():
if row[iob_col_name] == "O":
elems.append("O")
else:
elems.append(f"{row[iob_col_name]}-{row[entity_type_col_name]}")
ret = token_features.copy()
ret["token_class"] = pd.Categorical(elems, dtype=token_class_dtype)
ret["token_class_id"] = [label_to_int[elem] for elem in elems]
return ret
def decode_class_labels(class_labels: Iterable[str]):
"""
Decode the composite labels that :func:`add_token_classes` creates.
:param class_labels: Iterable of string class labels like "I-LOC"
:returns: A tuple of (IOB2 tags, entity type strings) corresponding
to the class labels.
"""
iobs = ["O" if t == "O" else t[:1] for t in class_labels]
types = [None if t == "O" else t.split("-")[1] for t in class_labels]
return iobs, types
def maybe_download_conll_data(target_dir: str) -> Dict[str, str]:
"""
Download and cache a copy of the CoNLL-2003 named entity recognition
data set.
**NOTE: This data set is licensed for research use only.**
Be sure to adhere to the terms of the license when using this data set!
:param target_dir: Directory where this function should write the corpus
files, if they are not already present.
:returns: Dictionary containing a mapping from fold name to file name for
each of the three folds (`train`, `test`, `dev`) of the corpus.
"""
_CONLL_DOWNLOAD_BASE_URL = (
"https://github.com/patverga/torch-ner-nlp-from-scratch/raw/master/"
"data/conll2003/"
)
_TRAIN_FILE_NAME = "eng.train"
_DEV_FILE_NAME = "eng.testa"
_TEST_FILE_NAME = "eng.testb"
_TRAIN_FILE = f"{target_dir}/{_TRAIN_FILE_NAME}"
_DEV_FILE = f"{target_dir}/{_DEV_FILE_NAME}"
_TEST_FILE = f"{target_dir}/{_TEST_FILE_NAME}"
def download_file(url, destination):
data = requests.get(url)
open(destination, "wb").write(data.content)
if not os.path.exists(_TRAIN_FILE):
download_file(_CONLL_DOWNLOAD_BASE_URL + _TRAIN_FILE_NAME, _TRAIN_FILE)
if not os.path.exists(_DEV_FILE):
download_file(_CONLL_DOWNLOAD_BASE_URL + _DEV_FILE_NAME, _DEV_FILE)
if not os.path.exists(_TEST_FILE):
download_file(_CONLL_DOWNLOAD_BASE_URL + _TEST_FILE_NAME, _TEST_FILE)
return {"train": _TRAIN_FILE, "dev": _DEV_FILE, "test": _TEST_FILE}
def maybe_download_dataset_data(
target_dir: str, document_url: str, fname: str = None
) -> Union[str, List[str]]:
"""
If the file found at the url is not found in the target directory,
downloads it, and saves it to that place in downloads.
Returns the path to the file. If a zip archive is downloaded, only files that are not already in the target
directory will be fetched, and if an alternate_name is given only that file will be operated on.
Note if a Zip archive is downloaded it will be unpacked so verify that the url being used is safe.
:param target_dir: Directory where this function should write the document
:param document_url: url from which to download the docuemnt. If no alternate name is specified,
it is assumed that the string after the last slash is the name of the file.
:param fname: if given, the name of the file that is checked in the target directory,
as well as what is used to save the file if no such file is found. If a zip file is downloaded, and a file of this
name exists in in the archive, only it will be extracted.
:returns: the path to the file, or None if downloading was not successful
If the file found at the url is not found in the target directory,
downloads it, and saves it to that place in downloads
"""
file_name = (
fname if fname is not None else document_url.split("/")[-1]
)
full_path = target_dir + "/" + file_name
# if no directory exists, create one
if not os.path.exists(target_dir):
os.mkdir(target_dir)
# special logic for zip files
if document_url.split(".")[-1] == "zip" and (
fname is None or not os.path.exists(full_path)
):
# if we have a zip file already, don't re-download it
zip_path = target_dir + "/" + document_url.split("/")[-1]
if not os.path.exists(zip_path):
data = requests.get(document_url)
open(zip_path, "wb").write(data.content)
# if need be, extract the zipfile documents
with ZipFile(zip_path, "r") as zipf:
fnames = zipf.namelist()
if fname is not None and fname in fnames:
zipf.extract(fname, target_dir)
return full_path
for fname in fnames:
if not os.path.exists(target_dir + fname):
zipf.extract(fname, target_dir)
if len(fnames) == 1:
full_path = target_dir + "/" + fnames[0]
else:
return [target_dir + "/" + fname for fname in fnames]
# regular logic
elif not os.path.exists(full_path):
data = requests.get(document_url)
open(full_path, "wb").write(data.content)
return full_path
def _prep_for_stacking(fold_name: str, doc_num: int, df: pd.DataFrame) -> pd.DataFrame:
"""
Subroutine of combine_folds()
"""
df_values = {
"fold": fold_name,
"doc_num": doc_num,
}
for colname in df.columns:
df_values[colname] = df[colname]
return pd.DataFrame(df_values)
def combine_folds(fold_to_docs: Dict[str, List[pd.DataFrame]]):
"""
Merge together multiple parts of a corpus (i.e. train, test, validation)
into a single DataFrame of all tokens in the corpus.
:param fold_to_docs: Mapping from fold name ("train", "test", etc.) to
list of per-document DataFrames as produced by :func:`util.conll_to_bert`.
All DataFrames must have the same schema, but any schema is ok.
:returns: corpus wide DataFrame with some additional leading columns `fold`
and `doc_num` to tell what fold and document number within the fold each
row of the dataframe comes from.
"""
to_stack = [] # Type: List[pd.DataFrame]
for fold_name, docs_in_fold in fold_to_docs.items():
to_stack.extend(
[
_prep_for_stacking(fold_name, i, docs_in_fold[i])
for i in range(len(docs_in_fold))
]
)
return pd.concat(to_stack).reset_index(drop=True)
def compute_accuracy_by_document(
corpus_dfs: Dict[Tuple[str, int], pd.DataFrame],
output_dfs: Dict[Tuple[str, int], pd.DataFrame],
) -> pd.DataFrame:
"""
Compute precision, recall, and F1 scores by document.
:param corpus_dfs: Gold-standard span/entity type pairs, as either:
* a dictionary of DataFrames, one DataFrames per document, indexed by
tuples of (collection name, offset into collection)
* a list of DataFrames, one per document
as returned by :func:`conll_2003_output_to_dataframes()`
:param output_dfs: Model outputs, in the same format as `gold_dfs`
(i.e. exactly the same column names). This is the format that
produces.
"""
if isinstance(corpus_dfs, list):
if not isinstance(output_dfs, list):
raise TypeError(
f"corpus_dfs is a list, but output_dfs is of type "
f"'{type(output_dfs)}', which is not a list."
)
corpus_dfs = {("", i): corpus_dfs[i] for i in range(len(corpus_dfs))}
output_dfs = {("", i): output_dfs[i] for i in range(len(output_dfs))}
# Note that it's important for all of these lists to be in the same
# order; hence these expressions all iterate over gold_dfs.keys()
num_true_positives = [
len(corpus_dfs[k].merge(output_dfs[k]).index) for k in corpus_dfs.keys()
]
num_extracted = [len(output_dfs[k].index) for k in corpus_dfs.keys()]
num_entities = [len(corpus_dfs[k].index) for k in corpus_dfs.keys()]
collection_name = [t[0] for t in corpus_dfs.keys()]
doc_num = [t[1] for t in corpus_dfs.keys()]
stats_by_doc = pd.DataFrame(
{
"fold": collection_name,
"doc_num": doc_num,
"num_true_positives": num_true_positives,
"num_extracted": num_extracted,
"num_entities": num_entities,
}
)
stats_by_doc["precision"] = (
stats_by_doc["num_true_positives"] / stats_by_doc["num_extracted"]
)
stats_by_doc["recall"] = (
stats_by_doc["num_true_positives"] / stats_by_doc["num_entities"]
)
stats_by_doc["F1"] = (
2.0
* (stats_by_doc["precision"] * stats_by_doc["recall"])
/ (stats_by_doc["precision"] + stats_by_doc["recall"])
)
return stats_by_doc
def compute_global_accuracy(stats_by_doc: pd.DataFrame):
"""
Compute collection-wide precision, recall, and F1 score from the
output of :func:`compute_f1_by_document`.
:param stats_by_doc: Output of :func:`make_stats_df`
:returns: A Python dictionary of collection-level statistics about
result quality.
"""
num_true_positives = stats_by_doc["num_true_positives"].sum()
num_entities = stats_by_doc["num_entities"].sum()
num_extracted = stats_by_doc["num_extracted"].sum()
precision = num_true_positives / num_extracted
recall = num_true_positives / num_entities
f1 = 2.0 * (precision * recall) / (precision + recall)
return {
"num_true_positives": num_true_positives,
"num_entities": num_entities,
"num_extracted": num_extracted,
"precision": precision,
"recall": recall,
"F1": f1,
}
|
prepare_dataset.py | scripples/scripp_gpt-2 | 105 | 12622618 | #!/usr/bin/env python3
# Usage:
# PYTHONPATH=src ./encode.py <file|directory|glob> /path/to/output.npz
# PYTHONPATH=src ./train --dataset /path/to/output.npz
import argparse
import numpy as np
import sys
import tqdm
from ftfy import fix_text
import tflex_utils
parser = argparse.ArgumentParser(
description='Use FTFY to prepare a dataset for training.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('infile', metavar='PATH', type=str, help='Input file, directory, or glob pattern (utf-8 text).')
parser.add_argument('--outfile', default="-", type=str, help='Output file path, or - for stdout')
def main():
args = parser.parse_args()
out = sys.stdout if args.outfile == '-' else open(args.outfile, "w")
for i, line in tflex_utils.for_each_line(args.infile, message='Fixing'):
fixed = fix_text(line)
out.write(fixed)
if i % 100 == 0:
out.flush()
if __name__ == '__main__':
main()
|
benchmark_scripts/train.py | qq456cvb/KeypointNet | 117 | 12622627 | import os
import hydra
import torch
import logging
logger = logging.getLogger(__name__)
import omegaconf
import importlib
from tqdm import tqdm
from utils import AverageMeter, ModelWrapper
import dataset
import utils
def train(cfg):
KeypointDataset = getattr(dataset, 'Keypoint{}Dataset'.format(cfg.task.capitalize()))
log_dir = os.path.curdir
train_dataset = KeypointDataset(cfg, 'train')
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=cfg.batch_size, shuffle=True, num_workers=cfg.num_workers, drop_last=True)
val_dataset = KeypointDataset(cfg, 'val')
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=cfg.batch_size, num_workers=cfg.num_workers)
cfg.num_classes = train_dataset.nclasses
model_impl = getattr(importlib.import_module('.{}'.format(cfg.network.name), package='models'), '{}Model'.format(cfg.task.capitalize()))(cfg).cuda()
model = ModelWrapper(model_impl).cuda()
logger.info('Start training on {} keypoint detection...'.format(cfg.task))
optimizer = torch.optim.Adam(
model.parameters(),
lr=1e-3
)
criterion = getattr(utils, '{}Criterion'.format(cfg.task.capitalize()))().cuda()
meter = AverageMeter()
best_loss = 1e10
for epoch in range(cfg.max_epoch + 1):
train_iter = tqdm(train_dataloader)
# Training
meter.reset()
model.train()
for i, data in enumerate(train_iter):
outputs = model(data)
loss = criterion(data, outputs)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_iter.set_postfix(loss=loss.item())
meter.update(loss.item())
logger.info(
f'Epoch: {epoch}, Average Train loss: {meter.avg}'
)
# validation loss
model.eval()
meter.reset()
val_iter = tqdm(val_dataloader)
for i, data in enumerate(val_iter):
with torch.no_grad():
outputs = model(data)
loss = criterion(data, outputs)
val_iter.set_postfix(loss=loss.item())
meter.update(loss.item())
if meter.avg < best_loss:
logger.info("best epoch: {}".format(epoch))
best_loss = meter.avg
torch.save(model.state_dict(), os.path.join(log_dir, 'best.pth'))
logger.info(
f'Epoch: {epoch}, Average Val loss: {meter.avg}'
)
@hydra.main(config_path='config', config_name='config')
def main(cfg):
omegaconf.OmegaConf.set_struct(cfg, False)
cfg.log_path = '{}_log'.format(cfg.task)
logger.info(cfg.pretty())
train(cfg)
if __name__ == '__main__':
main() |
tools/rename.py | gatehouse/cppcms | 388 | 12622632 | <reponame>gatehouse/cppcms
#!/usr/bin/env python
#
# Copyright <NAME> 2009. Use, modification and
# distribution is subject to the Boost Software License, Version
# 1.0. (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
#
import re
import sys
import os
import os.path
import StringIO
class SyntaxError(Exception):
def __init__(self,message):
self.message=message
class tockenizer:
def __init__(self,input):
self.input=input
def chr_end(self,line,pos,c):
size=len(line)
while pos<size:
if line[pos:pos+2]=="\\\\" or line[pos:pos+2]=='\\'+c:
pos+=2
elif line[pos]==c:
return pos+1
else:
pos+=1
return -1
def tockens(self):
line=self.input.readline()
while line:
size=len(line)
i=0
while i<size:
if line[i].isalpha() or line[i]=='_':
n=1
while i+n<size and (line[i+n].isalpha() or line[i+n]=='_' or line[i+n].isdigit()):
n+=1
yield line[i:i+n]
i+=n
elif line[i:i+2]==r'//':
yield line[i:]
i=size
elif line[i:i+2]==r'/*':
end=line.find(r'*/',i)
if end!=-1:
yield line[i:end+2]
i=end+2
else:
res=line[i:]
line=self.input.readline()
while line:
end=line.find(r'*/')
if end==-1:
res+=line
line=self.input.readline()
else:
res+=line[:end+2]
size=len(line)
i=end+2;
yield res
break
size=len(line)
elif line[i]=='"' or line[i]=="'":
c=line[i]
end=self.chr_end(line,i+1,c)
if end!=-1:
yield line[i:end]
i=end
else:
res=line[i:]
line=self.input.readline()
while line:
end=self.chr_end(line,0,c)
if end==-1:
res+=line
line=self.input.readline()
else:
res+=line[:end]
size=len(line)
i=end;
yield res
break
size=len(line)
elif i+1==size and line[i:i+1]=='\\\n':
yield '\\\n'
i+=2
else:
yield line[i]
i+=1
line=self.input.readline()
class renamer:
fname=re.compile(r'^(\w+)$')
ident=re.compile(r'^[a-zA-Z_][a-zA-Z_0-9]*$')
def __init__(self,input,output,namespace,newdir=''):
self.input=input
self.output=output
self.namespace=namespace
self.newdir=newdir
def process_tocken(self,tocken):
if self.ident.match(tocken):
tocken=tocken.replace('BOOST',namespace.upper())
tocken=tocken.replace('boost',namespace)
self.output.write(tocken)
def process_all(self,lst):
for tocken in lst:
self.process_tocken(tocken)
def convert_path(self,lst):
self.output.write('<'+self.newdir)
self.process_all(lst[2:])
def rename(self):
parser=tockenizer(self.input)
state='Normal'
substate=None
lst=[]
inc=re.compile(r'^"boost(/.*)"$')
for tocken in parser.tockens():
lst.append(tocken)
if state=='Normal' and tocken=='<':
state='<'
continue
elif state=='<' and tocken=='boost':
state='boost'
continue
elif state=='boost' and tocken=='/':
state='/'
continue
elif state=='/' and self.fname.match(tocken):
state='dir'
continue
elif state=='dir' and tocken=='/':
state='/'
continue
elif state=='dir' and tocken=='.':
state='.'
continue
elif state=='.' and (tocken=='ipp' or tocken=='h' or tocken=='hpp'):
state='hpp'
continue
elif state=='dir' and tocken=='>':
self.convert_path(lst)
lst=[]
elif state=='hpp' and tocken=='>':
self.convert_path(lst)
lst=[]
elif state=='Normal' and inc.match(tocken):
m=inc.match(tocken)
lst[0]='"'+self.newdir+m.group(1)+'"'
state='Normal'
self.process_all(lst)
lst=[]
def is_cpp(name):
for suffix in ['.hpp','.h','.ipp','.cpp','.c','.inl','inc','.SUNWCCh','.cxx','.cc' ]:
if name.endswith(suffix):
return True
if os.path.basename(os.path.dirname(name)) in ['tr1','cpp_c_headers']:
return True
return False
def is_ign(name):
ign=['.vcproj', '.sln', '.v2', '.html', '.cmake', '.txt', '.qbk',\
'.mak', '.sh', '.pl', '.r', '.css', '.png', '.doc', '.vsprops','.mcp'\
'.xml','.xsd','.jam','.htm','.bat','.xml','.dtd','.zip',\
'.gif','.sty','.pdf','.csh','.w','.fig','.graffle','.jpg',\
'.dot','.cfg','.dimacs','.expected','.dat','.js','.py','.svg','.jpeg','.mml',\
'.input','.flex','.hdf','.manifest','.xsl','.m4','.rst','.rsa','.pyste',\
'.ok','.err1','.err2','.err3','.mini','.db','.toyxml','.quickbook','.gold',\
'.cmd','.toc','.pem','.xls','.rsp','.reno','.output','.log','.in','.am']
for suffix in ign:
if name.endswith(suffix):
return True
if os.path.basename(os.path.dirname(name)) in ['doc','boehm_gc','debian']:
return True
name=os.path.basename(name)
if name in ['Jamfile', 'Makefile','Jamroot','INSTALL','README','LICENSE','Thumbs.db','TODO','NEWS','configure','sublibs','Changelog']:
return True
return False
def rename_one(name,namespace,newdir):
if is_cpp(name):
print "Processing file %s" % name
fin=file(name)
buffer=StringIO.StringIO()
ren=renamer(fin,buffer,namespace,newdir)
ren.rename()
fin.close()
fout=file(name,'w')
buffer.seek(0)
line=buffer.readline()
while line:
fout.write(line)
line=buffer.readline()
fout.close()
elif is_ign(name):
pass
else:
print "Warning!!!!!!!!!! Unlnown file type %s" % name
print "--------------------Ignoring----------------"
rep=file('warning.log','a')
rep.write('Unlnown file type %s\n' % name)
rep.close()
def rename_recursively(dir,namespace,newdir):
for root,dirs,files in os.walk(dir):
for file in files:
rename_one(os.path.join(root,file),namespace,newdir)
if __name__=='__main__':
if len(sys.argv)<3:
print "Usage rename.py path newnamespace"
print "for example: rename.py boost_1_39_0 mybst"
sys.exit(1)
path=sys.argv[1]
namespace=sys.argv[2]
if namespace.lower()!=namespace:
print "Namespace should be lowercase"
sys.exit(1)
newdir=namespace
ren=rename_recursively(path,namespace,newdir)
boost_dir=os.path.join(path,'boost')
new_dir=os.path.join(path,newdir)
if os.path.isdir(boost_dir):
os.rename(boost_dir,new_dir)
|
slackbot/management/commands/sendtestslack.py | mepsd/CLAC | 126 | 12622634 | <filename>slackbot/management/commands/sendtestslack.py
import djclick as click
from django.conf import settings
from django.core.management.base import CommandError
from slackbot.bot import sendmsg
from calc.site_utils import absolutify_url
@click.command()
def command() -> None:
url = absolutify_url('/')
if not settings.SLACKBOT_WEBHOOK_URL:
raise CommandError("SLACKBOT_WEBHOOK_URL must be configured.")
if not sendmsg(f"Hi, this is a test message from <{url}|CALC>!"):
raise CommandError("Sending test Slack message failed.")
print("Test Slack message sent successfully!")
|
zincbase/web/__init__.py | complexdb/zincbase | 174 | 12622638 | class GraphCaster:
def __init__(self, redis_address='redis://'):
"""Create a graph cast, so that a KB can be displayed
on localhost:5000. It won't work if you installed basic
Zincbase -- try `pip install zincbase[web]`.
:param str redis_address: URL of the redis instance
the graph cast should use. For local use, let the
default stand, provided you have `docker run -p 6379:6379 -d redis`
"""
try:
from flask_socketio import SocketIO, emit
except ImportError:
print('Please install zincbase[web]')
return False
self.redis = redis_address
self.socketio = SocketIO(message_queue=self.redis)
self.socketio.emit('reset')
self.node_update_queue = []
self.edge_update_queue = []
def add_node(self, node):
"""Add a node to the graph cast.
:param Node node: A node in the KB.
"""
attrs = { 'id': str(node) }
attrs.update(node.attrs)
self.socketio.emit('addNode', attrs, json=True)
def add_edge(self, from_node, to_node, attributes):
"""Add an edge to the graph cast.
"""
attrs = { 'source': str(from_node), 'target': str(to_node) }
attrs.update(attributes)
self.socketio.emit('addLink', attrs, json=True)
def update_node(self, node, defer=False):
"""Update a node in the graph cast with its current attributes in the KB.
:param Node node: The node to update
:param bool defer: If False, send immediately (and cause immediate
re-rendering on the client.) If True, batch and wait until `batch_update()`
to send updates together and re-render only once.
"""
attrs = { 'id': str(node) }
attrs.update({ 'attributes': node.attrs })
if not defer:
self.socketio.emit('updateNode', attrs, json=True)
else:
self.node_update_queue.append(attrs)
def update_edge(self, edge, defer=False):
"""Update an edge in the graph cast with its current attributes in the KB.
:param str sub, pred, obj: The edge, described as subject, predicate, object
:param KB kb: The knowledge base in which the edge exists
:param bool defer: If False, send immediately (and cause immediate
re-rendering on the client.) If True, batch and wait until `batch_update()`
to send updates together and re-render only once.
"""
attrs = { 'attributes': edge.attrs }
attrs.update({'from': edge._sub, 'pred': edge._pred, 'to': edge._ob})
if not defer:
self.socketio.emit('updateEdge', attrs, json=True)
else:
self.edge_update_queue.append(attrs)
def batch_update(self):
"""Perform a batch update. Any `update_node` or `update_edge` calls that
were made with `defer=True` will now be sent to the frontend.
"""
self.socketio.emit('batchUpdateNode', self.node_update_queue, json=True)
self.socketio.emit('batchUpdateEdge', self.edge_update_queue, json=True)
self.node_update_queue = []
self.edge_update_queue = []
def reset(self):
"""If any web client was already listening, reset it"""
self.socketio.emit('reset')
def render(self, node_color=0x11bb88, node_size=10, node_opacity=0.9,
node_label='id', node_visibility=True, edge_label='pred',
edge_opacity=1, edge_color=0x333333, edge_size=0,
edge_visibility=True, arrow_size=0, arrow_color=0x000001,
label_node=False, label_node_color='black', label_node_height=3,
label_node_offset=1, label_edge=False, label_edge_color='black',
label_edge_height=3, label_edge_offset=1,
bg_color=0xffffff, engine='d3'):
"""Perform the initial setup/rendering of the current graph.
:param node_color: Either a 24bit RGB int (such as 0xFF001A) or a string
containing a Javascript function which takes `node` as an argument, for
example `node => node.color`
:param node_size: Either a number >= 0 or a string containing a Javascript
function, for example `node => Math.log(node.enormity)`
:param node_label: Either a string representing a property of the node
to display (on hover) as its label, or a Javascript function returning a string.
All nodes have a property called `id` which is their name/string repr.
:param node_visibility: Either a string representing a property of the node
which evaluates truthy/falsy (in Javascript) to determine whether to display
the node, or a JS function that returns true or false, or True/False.
:param label_node: If True, nodes will be labeled with `node_label`. Unlike
`node_label`, which only displays on hover, this is a permanent text. Note
that the value updates when the value of `node[node_label]` changes (in Python).
:param label_node_color: RGB value for the color of a node's permanent label
:param label_node_height: Text height for the node's permanent label
:param label_node_offset: Integer specifying how far out from the node the
label should appear. Default is 1 unit on the z-axis.
:param edge_visibility: Either a string representing a property of the edge
which evaluates truthy/falsy (in Javascript) to determine whether to display
the edge, or a JS function that returns true or false, or True/False.
:param edge_label: Either a string representing a property of an edge
to display (on hover) as its label, or a Javascript function returning a string.
Defaults to the predicate.
:param float edge_opacity: Opacity of the edges, from 0-1
:param edge_color: Either a 24bit RGB int or a string containing a Javascript
function which takes `edge` as an argument, for example `edge => edge.color`.
:param edge_size: The width of an edge. Either a number >= 0 (where 0 means 1px)
or a string containing a Javascript function.
:param label_edge: If True, nodes will be labeled with `edge_label`. Unlike
`edge_label`, which only displays on hover, this is a permanent text. Note
that the value updates when the value of `edge[edge_label]` changes (in Python).
:param label_edge_color: RGB value for the color of a edge's permanent label
:param label_edge_height: Text height for the edge's permanent label
:param label_edge_offset: Integer specifying how far out from the edge the
label should appear. Default is 1 unit on the z-axis.
:param int arrow_size: If >0, display directional arrows on edges of that size.
:param int arrow_color: Color of arrows (if arrow_size > 0)
:param int bg_color: Hex background color for the graph, e.g. 0xFF0000 is red.
:param str engine: Specify d3 or ngraph. ngraph is faster but can be buggy, and
is only really suitable for static graphs. The layouts can look different also.
"""
if label_node:
label_node = {
'color': 'black',
'height': 3,
'offset': node_size + label_node_offset
}
if label_edge:
label_edge = {
'color': 'black',
'height': 3,
'offset': edge_size + label_edge_offset
}
attributes = { 'node_color': node_color, 'node_size': node_size,
'node_opacity': node_opacity, 'node_label': node_label,
'node_visibility': node_visibility, 'edge_visibility': edge_visibility,
'edge_opacity': edge_opacity, 'edge_color': edge_color,
'edge_size': edge_size, 'edge_label': edge_label,
'arrow_size': arrow_size, 'arrow_color': arrow_color,
'label_node': label_node, 'label_edge': label_edge,
'engine': engine, 'bg_color': bg_color }
self.socketio.emit('render', attributes, json=True)
def from_kb(self, kb):
# TODO Permit this to work with subsampling the KB
for node in kb.G.nodes:
self.add_node(kb.node(node))
for from_node, to_node, edge_attributes in kb.G.edges.data():
self.add_edge(from_node=from_node, to_node=to_node, attributes=edge_attributes) |
ParlAI/tests/test_params.py | UmaTaru/run | 163 | 12622648 | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Test ParlaiParser and other opt/params.py code."""
import os
import json
import unittest
from parlai.core.params import ParlaiParser
import parlai.core.agents as agents
import parlai.core.testing_utils as testing_utils
class _ExampleUpgradeOptAgent(agents.Agent):
def __init__(self, opt, shared=None):
super().__init__(opt)
assert 'is_upgraded' in opt
assert opt['is_upgraded'] is True
@classmethod
def upgrade_opt(cls, opt):
opt = super(_ExampleUpgradeOptAgent, cls).upgrade_opt(opt)
assert 'is_upgraded' not in opt
opt['is_upgraded'] = True
return opt
class TestParlaiParser(unittest.TestCase):
"""Test ParlaiParser."""
def test_upgrade_opt(self):
"""Test whether upgrade_opt works."""
with testing_utils.tempdir() as tmp:
with testing_utils.capture_output() as _:
modfn = os.path.join(tmp, 'model')
with open(modfn, 'w') as f:
f.write('Test.')
optfn = modfn + '.opt'
base_opt = {
'model': 'tests.test_params:_ExampleUpgradeOptAgent',
'dict_file': modfn + '.dict',
'model_file': modfn,
}
with open(optfn, 'w') as f:
json.dump(base_opt, f)
pp = ParlaiParser(True, True)
opt = pp.parse_args(['--model-file', modfn])
agents.create_agent(opt)
if __name__ == '__main__':
unittest.main()
|
crfnet/model/losses_neural_filter.py | XiaoJake/CameraRadarFusionNet | 256 | 12622681 |
##### NEURAL FILTER LOSSES #####
import tensorflow as tf
def binary_focal_loss(loss_fn, threshold=0.5, alpha=0.2, gamma=2.0):
"""
Compared to focal loss, the binary focal loss
:param alpha: Scale the focal weight with alpha.
:param gamma: Take the power of the focal weight with gamma.
"""
def _binary_focal_loss(y_true, y_pred):
# apply threshold to get clearly positive and negative predictions
y_true_binary = tf.keras.backend.greater(y_true, threshold)
# compute the focal loss
alpha_factor = tf.keras.backend.ones_like(y_true, dtype=tf.float32) * alpha # create an array with alpha values, same shape as y_true
alpha_factor = tf.where(y_true_binary, alpha_factor, 1 - alpha_factor) # alpha on true, 1-alpha on false
alpha_factor = alpha_factor * 2 # we don't want to half the learning rate
focal_weight = tf.where(y_true_binary, 1 - y_pred, y_pred)
# this is needed, because the output contains 0.0 after applying to the input grid
focal_weight = tf.clip_by_value(focal_weight, tf.keras.backend.epsilon(), 1.0)
focal_weight = alpha_factor * focal_weight**gamma
focal_weight = tf.squeeze(focal_weight, axis=-1)
focal_weight = tf.identity(focal_weight, name="focal_weight")
cls_loss = focal_weight * loss_fn(y_true, y_pred)
cls_loss = tf.identity(cls_loss, name="cls_loss")
# compute the normalizer: the number of positive anchors
normalizer = tf.where(y_true_binary)
normalizer = tf.keras.backend.cast(tf.keras.backend.shape(normalizer)[0], tf.keras.backend.floatx())
normalizer = tf.keras.backend.maximum(tf.keras.backend.cast_to_floatx(1), normalizer)
cls_loss_sum = tf.keras.backend.sum(cls_loss)
loss = cls_loss_sum / normalizer
loss = tf.identity(loss, name="focal_loss")
return loss #tf.keras.backend.sum(cls_loss) / normalizer
return _binary_focal_loss
def roc_auc_score(y_true, y_pred):
""" ROC AUC Score.
Source: https://github.com/tflearn/tflearn/blob/master/tflearn/objectives.py
Modifications: argument order y_pred and y_true
Approximates the Area Under Curve score, using approximation based on
the Wilcoxon-Mann-Whitney U statistic.
<NAME>., <NAME>., <NAME>., & <NAME>. (2003).
Optimizing Classifier Performance via an Approximation to the Wilcoxon-Mann-Whitney Statistic.
Measures overall performance for a full range of threshold levels.
Arguments:
y_true: `Tensor` . Targets (labels), a probability distribution.
y_pred: `Tensor`. Predicted values.
"""
with tf.name_scope("RocAucScore"):
pos = tf.boolean_mask(y_pred, tf.cast(y_true, tf.bool))
neg = tf.boolean_mask(y_pred, ~tf.cast(y_true, tf.bool))
pos = tf.expand_dims(pos, 0)
neg = tf.expand_dims(neg, 1)
# original paper suggests performance is robust to exact parameter choice
gamma = 0.2
p = 3
difference = tf.zeros_like(pos * neg) + pos - neg - gamma
masked = tf.boolean_mask(difference, difference < 0.0)
return tf.reduce_sum(tf.pow(-masked, p))
|
sdk/agrifood/azure-agrifood-farming/azure/agrifood/farming/operations/_planting_data_operations.py | rsdoherty/azure-sdk-for-python | 2,728 | 12622685 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, List, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PlantingDataOperations(object):
"""PlantingDataOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.agrifood.farming.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_farmer_id(
self,
farmer_id, # type: str
min_avg_planting_rate=None, # type: Optional[float]
max_avg_planting_rate=None, # type: Optional[float]
min_total_material=None, # type: Optional[float]
max_total_material=None, # type: Optional[float]
min_avg_material=None, # type: Optional[float]
max_avg_material=None, # type: Optional[float]
sources=None, # type: Optional[List[str]]
associated_boundary_ids=None, # type: Optional[List[str]]
operation_boundary_ids=None, # type: Optional[List[str]]
min_operation_start_date_time=None, # type: Optional[datetime.datetime]
max_operation_start_date_time=None, # type: Optional[datetime.datetime]
min_operation_end_date_time=None, # type: Optional[datetime.datetime]
max_operation_end_date_time=None, # type: Optional[datetime.datetime]
min_operation_modified_date_time=None, # type: Optional[datetime.datetime]
max_operation_modified_date_time=None, # type: Optional[datetime.datetime]
min_area=None, # type: Optional[float]
max_area=None, # type: Optional[float]
ids=None, # type: Optional[List[str]]
names=None, # type: Optional[List[str]]
property_filters=None, # type: Optional[List[str]]
statuses=None, # type: Optional[List[str]]
min_created_date_time=None, # type: Optional[datetime.datetime]
max_created_date_time=None, # type: Optional[datetime.datetime]
min_last_modified_date_time=None, # type: Optional[datetime.datetime]
max_last_modified_date_time=None, # type: Optional[datetime.datetime]
max_page_size=50, # type: Optional[int]
skip_token=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PlantingDataListResponse"]
"""Returns a paginated list of planting data resources under a particular farm.
:param farmer_id: ID of the associated farmer.
:type farmer_id: str
:param min_avg_planting_rate: Minimum AvgPlantingRate value(inclusive).
:type min_avg_planting_rate: float
:param max_avg_planting_rate: Maximum AvgPlantingRate value (inclusive).
:type max_avg_planting_rate: float
:param min_total_material: Minimum TotalMaterial value(inclusive).
:type min_total_material: float
:param max_total_material: Maximum TotalMaterial value (inclusive).
:type max_total_material: float
:param min_avg_material: Minimum AvgMaterial value(inclusive).
:type min_avg_material: float
:param max_avg_material: Maximum AvgMaterial value (inclusive).
:type max_avg_material: float
:param sources: Sources of the operation data.
:type sources: list[str]
:param associated_boundary_ids: Boundary IDs associated with operation data.
:type associated_boundary_ids: list[str]
:param operation_boundary_ids: Operation boundary IDs associated with operation data.
:type operation_boundary_ids: list[str]
:param min_operation_start_date_time: Minimum start date-time of the operation data, sample
format: yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type min_operation_start_date_time: ~datetime.datetime
:param max_operation_start_date_time: Maximum start date-time of the operation data, sample
format: yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type max_operation_start_date_time: ~datetime.datetime
:param min_operation_end_date_time: Minimum end date-time of the operation data, sample format:
yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type min_operation_end_date_time: ~datetime.datetime
:param max_operation_end_date_time: Maximum end date-time of the operation data, sample format:
yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type max_operation_end_date_time: ~datetime.datetime
:param min_operation_modified_date_time: Minimum modified date-time of the operation data,
sample format: yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type min_operation_modified_date_time: ~datetime.datetime
:param max_operation_modified_date_time: Maximum modified date-time of the operation data,
sample format: yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type max_operation_modified_date_time: ~datetime.datetime
:param min_area: Minimum area for which operation was applied (inclusive).
:type min_area: float
:param max_area: Maximum area for which operation was applied (inclusive).
:type max_area: float
:param ids: Ids of the resource.
:type ids: list[str]
:param names: Names of the resource.
:type names: list[str]
:param property_filters: Filters on key-value pairs within the Properties object.
eg. "{testKey} eq {testValue}".
:type property_filters: list[str]
:param statuses: Statuses of the resource.
:type statuses: list[str]
:param min_created_date_time: Minimum creation date of resource (inclusive).
:type min_created_date_time: ~datetime.datetime
:param max_created_date_time: Maximum creation date of resource (inclusive).
:type max_created_date_time: ~datetime.datetime
:param min_last_modified_date_time: Minimum last modified date of resource (inclusive).
:type min_last_modified_date_time: ~datetime.datetime
:param max_last_modified_date_time: Maximum last modified date of resource (inclusive).
:type max_last_modified_date_time: ~datetime.datetime
:param max_page_size: Maximum number of items needed (inclusive).
Minimum = 10, Maximum = 1000, Default value = 50.
:type max_page_size: int
:param skip_token: Skip token for getting next set of results.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PlantingDataListResponse or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.agrifood.farming.models.PlantingDataListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PlantingDataListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_farmer_id.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'farmerId': self._serialize.url("farmer_id", farmer_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if min_avg_planting_rate is not None:
query_parameters['minAvgPlantingRate'] = self._serialize.query("min_avg_planting_rate", min_avg_planting_rate, 'float')
if max_avg_planting_rate is not None:
query_parameters['maxAvgPlantingRate'] = self._serialize.query("max_avg_planting_rate", max_avg_planting_rate, 'float')
if min_total_material is not None:
query_parameters['minTotalMaterial'] = self._serialize.query("min_total_material", min_total_material, 'float')
if max_total_material is not None:
query_parameters['maxTotalMaterial'] = self._serialize.query("max_total_material", max_total_material, 'float')
if min_avg_material is not None:
query_parameters['minAvgMaterial'] = self._serialize.query("min_avg_material", min_avg_material, 'float')
if max_avg_material is not None:
query_parameters['maxAvgMaterial'] = self._serialize.query("max_avg_material", max_avg_material, 'float')
if sources is not None:
query_parameters['sources'] = [self._serialize.query("sources", q, 'str') if q is not None else '' for q in sources]
if associated_boundary_ids is not None:
query_parameters['associatedBoundaryIds'] = [self._serialize.query("associated_boundary_ids", q, 'str') if q is not None else '' for q in associated_boundary_ids]
if operation_boundary_ids is not None:
query_parameters['operationBoundaryIds'] = [self._serialize.query("operation_boundary_ids", q, 'str') if q is not None else '' for q in operation_boundary_ids]
if min_operation_start_date_time is not None:
query_parameters['minOperationStartDateTime'] = self._serialize.query("min_operation_start_date_time", min_operation_start_date_time, 'iso-8601')
if max_operation_start_date_time is not None:
query_parameters['maxOperationStartDateTime'] = self._serialize.query("max_operation_start_date_time", max_operation_start_date_time, 'iso-8601')
if min_operation_end_date_time is not None:
query_parameters['minOperationEndDateTime'] = self._serialize.query("min_operation_end_date_time", min_operation_end_date_time, 'iso-8601')
if max_operation_end_date_time is not None:
query_parameters['maxOperationEndDateTime'] = self._serialize.query("max_operation_end_date_time", max_operation_end_date_time, 'iso-8601')
if min_operation_modified_date_time is not None:
query_parameters['minOperationModifiedDateTime'] = self._serialize.query("min_operation_modified_date_time", min_operation_modified_date_time, 'iso-8601')
if max_operation_modified_date_time is not None:
query_parameters['maxOperationModifiedDateTime'] = self._serialize.query("max_operation_modified_date_time", max_operation_modified_date_time, 'iso-8601')
if min_area is not None:
query_parameters['minArea'] = self._serialize.query("min_area", min_area, 'float')
if max_area is not None:
query_parameters['maxArea'] = self._serialize.query("max_area", max_area, 'float')
if ids is not None:
query_parameters['ids'] = [self._serialize.query("ids", q, 'str') if q is not None else '' for q in ids]
if names is not None:
query_parameters['names'] = [self._serialize.query("names", q, 'str') if q is not None else '' for q in names]
if property_filters is not None:
query_parameters['propertyFilters'] = [self._serialize.query("property_filters", q, 'str') if q is not None else '' for q in property_filters]
if statuses is not None:
query_parameters['statuses'] = [self._serialize.query("statuses", q, 'str') if q is not None else '' for q in statuses]
if min_created_date_time is not None:
query_parameters['minCreatedDateTime'] = self._serialize.query("min_created_date_time", min_created_date_time, 'iso-8601')
if max_created_date_time is not None:
query_parameters['maxCreatedDateTime'] = self._serialize.query("max_created_date_time", max_created_date_time, 'iso-8601')
if min_last_modified_date_time is not None:
query_parameters['minLastModifiedDateTime'] = self._serialize.query("min_last_modified_date_time", min_last_modified_date_time, 'iso-8601')
if max_last_modified_date_time is not None:
query_parameters['maxLastModifiedDateTime'] = self._serialize.query("max_last_modified_date_time", max_last_modified_date_time, 'iso-8601')
if max_page_size is not None:
query_parameters['$maxPageSize'] = self._serialize.query("max_page_size", max_page_size, 'int', maximum=1000, minimum=10)
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'farmerId': self._serialize.url("farmer_id", farmer_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PlantingDataListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_farmer_id.metadata = {'url': '/farmers/{farmerId}/planting-data'} # type: ignore
def list(
self,
min_avg_planting_rate=None, # type: Optional[float]
max_avg_planting_rate=None, # type: Optional[float]
min_total_material=None, # type: Optional[float]
max_total_material=None, # type: Optional[float]
min_avg_material=None, # type: Optional[float]
max_avg_material=None, # type: Optional[float]
sources=None, # type: Optional[List[str]]
associated_boundary_ids=None, # type: Optional[List[str]]
operation_boundary_ids=None, # type: Optional[List[str]]
min_operation_start_date_time=None, # type: Optional[datetime.datetime]
max_operation_start_date_time=None, # type: Optional[datetime.datetime]
min_operation_end_date_time=None, # type: Optional[datetime.datetime]
max_operation_end_date_time=None, # type: Optional[datetime.datetime]
min_operation_modified_date_time=None, # type: Optional[datetime.datetime]
max_operation_modified_date_time=None, # type: Optional[datetime.datetime]
min_area=None, # type: Optional[float]
max_area=None, # type: Optional[float]
ids=None, # type: Optional[List[str]]
names=None, # type: Optional[List[str]]
property_filters=None, # type: Optional[List[str]]
statuses=None, # type: Optional[List[str]]
min_created_date_time=None, # type: Optional[datetime.datetime]
max_created_date_time=None, # type: Optional[datetime.datetime]
min_last_modified_date_time=None, # type: Optional[datetime.datetime]
max_last_modified_date_time=None, # type: Optional[datetime.datetime]
max_page_size=50, # type: Optional[int]
skip_token=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PlantingDataListResponse"]
"""Returns a paginated list of planting data resources across all farmers.
:param min_avg_planting_rate: Minimum AvgPlantingRate value(inclusive).
:type min_avg_planting_rate: float
:param max_avg_planting_rate: Maximum AvgPlantingRate value (inclusive).
:type max_avg_planting_rate: float
:param min_total_material: Minimum TotalMaterial value(inclusive).
:type min_total_material: float
:param max_total_material: Maximum TotalMaterial value (inclusive).
:type max_total_material: float
:param min_avg_material: Minimum AvgMaterial value(inclusive).
:type min_avg_material: float
:param max_avg_material: Maximum AvgMaterial value (inclusive).
:type max_avg_material: float
:param sources: Sources of the operation data.
:type sources: list[str]
:param associated_boundary_ids: Boundary IDs associated with operation data.
:type associated_boundary_ids: list[str]
:param operation_boundary_ids: Operation boundary IDs associated with operation data.
:type operation_boundary_ids: list[str]
:param min_operation_start_date_time: Minimum start date-time of the operation data, sample
format: yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type min_operation_start_date_time: ~datetime.datetime
:param max_operation_start_date_time: Maximum start date-time of the operation data, sample
format: yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type max_operation_start_date_time: ~datetime.datetime
:param min_operation_end_date_time: Minimum end date-time of the operation data, sample format:
yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type min_operation_end_date_time: ~datetime.datetime
:param max_operation_end_date_time: Maximum end date-time of the operation data, sample format:
yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type max_operation_end_date_time: ~datetime.datetime
:param min_operation_modified_date_time: Minimum modified date-time of the operation data,
sample format: yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type min_operation_modified_date_time: ~datetime.datetime
:param max_operation_modified_date_time: Maximum modified date-time of the operation data,
sample format: yyyy-MM-ddTHH:mm:ssZ (inclusive).
:type max_operation_modified_date_time: ~datetime.datetime
:param min_area: Minimum area for which operation was applied (inclusive).
:type min_area: float
:param max_area: Maximum area for which operation was applied (inclusive).
:type max_area: float
:param ids: Ids of the resource.
:type ids: list[str]
:param names: Names of the resource.
:type names: list[str]
:param property_filters: Filters on key-value pairs within the Properties object.
eg. "{testKey} eq {testValue}".
:type property_filters: list[str]
:param statuses: Statuses of the resource.
:type statuses: list[str]
:param min_created_date_time: Minimum creation date of resource (inclusive).
:type min_created_date_time: ~datetime.datetime
:param max_created_date_time: Maximum creation date of resource (inclusive).
:type max_created_date_time: ~datetime.datetime
:param min_last_modified_date_time: Minimum last modified date of resource (inclusive).
:type min_last_modified_date_time: ~datetime.datetime
:param max_last_modified_date_time: Maximum last modified date of resource (inclusive).
:type max_last_modified_date_time: ~datetime.datetime
:param max_page_size: Maximum number of items needed (inclusive).
Minimum = 10, Maximum = 1000, Default value = 50.
:type max_page_size: int
:param skip_token: Skip token for getting next set of results.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PlantingDataListResponse or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.agrifood.farming.models.PlantingDataListResponse]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PlantingDataListResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if min_avg_planting_rate is not None:
query_parameters['minAvgPlantingRate'] = self._serialize.query("min_avg_planting_rate", min_avg_planting_rate, 'float')
if max_avg_planting_rate is not None:
query_parameters['maxAvgPlantingRate'] = self._serialize.query("max_avg_planting_rate", max_avg_planting_rate, 'float')
if min_total_material is not None:
query_parameters['minTotalMaterial'] = self._serialize.query("min_total_material", min_total_material, 'float')
if max_total_material is not None:
query_parameters['maxTotalMaterial'] = self._serialize.query("max_total_material", max_total_material, 'float')
if min_avg_material is not None:
query_parameters['minAvgMaterial'] = self._serialize.query("min_avg_material", min_avg_material, 'float')
if max_avg_material is not None:
query_parameters['maxAvgMaterial'] = self._serialize.query("max_avg_material", max_avg_material, 'float')
if sources is not None:
query_parameters['sources'] = [self._serialize.query("sources", q, 'str') if q is not None else '' for q in sources]
if associated_boundary_ids is not None:
query_parameters['associatedBoundaryIds'] = [self._serialize.query("associated_boundary_ids", q, 'str') if q is not None else '' for q in associated_boundary_ids]
if operation_boundary_ids is not None:
query_parameters['operationBoundaryIds'] = [self._serialize.query("operation_boundary_ids", q, 'str') if q is not None else '' for q in operation_boundary_ids]
if min_operation_start_date_time is not None:
query_parameters['minOperationStartDateTime'] = self._serialize.query("min_operation_start_date_time", min_operation_start_date_time, 'iso-8601')
if max_operation_start_date_time is not None:
query_parameters['maxOperationStartDateTime'] = self._serialize.query("max_operation_start_date_time", max_operation_start_date_time, 'iso-8601')
if min_operation_end_date_time is not None:
query_parameters['minOperationEndDateTime'] = self._serialize.query("min_operation_end_date_time", min_operation_end_date_time, 'iso-8601')
if max_operation_end_date_time is not None:
query_parameters['maxOperationEndDateTime'] = self._serialize.query("max_operation_end_date_time", max_operation_end_date_time, 'iso-8601')
if min_operation_modified_date_time is not None:
query_parameters['minOperationModifiedDateTime'] = self._serialize.query("min_operation_modified_date_time", min_operation_modified_date_time, 'iso-8601')
if max_operation_modified_date_time is not None:
query_parameters['maxOperationModifiedDateTime'] = self._serialize.query("max_operation_modified_date_time", max_operation_modified_date_time, 'iso-8601')
if min_area is not None:
query_parameters['minArea'] = self._serialize.query("min_area", min_area, 'float')
if max_area is not None:
query_parameters['maxArea'] = self._serialize.query("max_area", max_area, 'float')
if ids is not None:
query_parameters['ids'] = [self._serialize.query("ids", q, 'str') if q is not None else '' for q in ids]
if names is not None:
query_parameters['names'] = [self._serialize.query("names", q, 'str') if q is not None else '' for q in names]
if property_filters is not None:
query_parameters['propertyFilters'] = [self._serialize.query("property_filters", q, 'str') if q is not None else '' for q in property_filters]
if statuses is not None:
query_parameters['statuses'] = [self._serialize.query("statuses", q, 'str') if q is not None else '' for q in statuses]
if min_created_date_time is not None:
query_parameters['minCreatedDateTime'] = self._serialize.query("min_created_date_time", min_created_date_time, 'iso-8601')
if max_created_date_time is not None:
query_parameters['maxCreatedDateTime'] = self._serialize.query("max_created_date_time", max_created_date_time, 'iso-8601')
if min_last_modified_date_time is not None:
query_parameters['minLastModifiedDateTime'] = self._serialize.query("min_last_modified_date_time", min_last_modified_date_time, 'iso-8601')
if max_last_modified_date_time is not None:
query_parameters['maxLastModifiedDateTime'] = self._serialize.query("max_last_modified_date_time", max_last_modified_date_time, 'iso-8601')
if max_page_size is not None:
query_parameters['$maxPageSize'] = self._serialize.query("max_page_size", max_page_size, 'int', maximum=1000, minimum=10)
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PlantingDataListResponse', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/planting-data'} # type: ignore
def get(
self,
farmer_id, # type: str
planting_data_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PlantingData"
"""Get a specified planting data resource under a particular farmer.
:param farmer_id: ID of the associated farmer resource.
:type farmer_id: str
:param planting_data_id: ID of the planting data resource.
:type planting_data_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PlantingData, or the result of cls(response)
:rtype: ~azure.agrifood.farming.models.PlantingData
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PlantingData"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'farmerId': self._serialize.url("farmer_id", farmer_id, 'str'),
'plantingDataId': self._serialize.url("planting_data_id", planting_data_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('PlantingData', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/farmers/{farmerId}/planting-data/{plantingDataId}'} # type: ignore
def create_or_update(
self,
farmer_id, # type: str
planting_data_id, # type: str
planting_data=None, # type: Optional["_models.PlantingData"]
**kwargs # type: Any
):
# type: (...) -> "_models.PlantingData"
"""Creates or updates an planting data resource under a particular farmer.
:param farmer_id: ID of the associated farmer.
:type farmer_id: str
:param planting_data_id: ID of the planting data resource.
:type planting_data_id: str
:param planting_data: Planting data resource payload to create or update.
:type planting_data: ~azure.agrifood.farming.models.PlantingData
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PlantingData, or the result of cls(response)
:rtype: ~azure.agrifood.farming.models.PlantingData
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PlantingData"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
content_type = kwargs.pop("content_type", "application/merge-patch+json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'farmerId': self._serialize.url("farmer_id", farmer_id, 'str'),
'plantingDataId': self._serialize.url("planting_data_id", planting_data_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if planting_data is not None:
body_content = self._serialize.body(planting_data, 'PlantingData')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if response.status_code == 200:
deserialized = self._deserialize('PlantingData', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PlantingData', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/farmers/{farmerId}/planting-data/{plantingDataId}'} # type: ignore
def delete(
self,
farmer_id, # type: str
planting_data_id, # type: str
**kwargs # type: Any
):
# type: (...) -> None
"""Deletes a specified planting data resource under a particular farmer.
:param farmer_id: ID of the associated farmer resource.
:type farmer_id: str
:param planting_data_id: ID of the planting data.
:type planting_data_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-31-preview"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'Endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'farmerId': self._serialize.url("farmer_id", farmer_id, 'str'),
'plantingDataId': self._serialize.url("planting_data_id", planting_data_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/farmers/{farmerId}/planting-data/{plantingDataId}'} # type: ignore
|
translators/objects/archive_assembly.py | Mango-3/blenderseed | 256 | 12622706 | #
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2019 <NAME>, The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import appleseed as asr
from ..assethandlers import AssetType
from ..translator import Translator
from ...logger import get_logger
logger = get_logger()
class ArchiveAssemblyTranslator(Translator):
def __init__(self, archive_obj, asset_handler):
logger.debug(f"appleseed: Creating archive asset translator for {archive_obj.name_full}")
super().__init__(archive_obj, asset_handler)
self.__instance_lib = asr.BlTransformLibrary()
self.__ass = None
self.__ass_name = None
self._bl_obj.appleseed.obj_name = self._bl_obj.name_full
@property
def orig_name(self):
return self._bl_obj.appleseed.obj_name
@property
def instances_size(self):
return len(self.__instance_lib)
def create_entities(self, bl_scene, context=None):
logger.debug(f"appleseed: Creating archive asset entity for {self.orig_name}")
self.__ass_name = f"{self.orig_name}_ass"
file_path = self._asset_handler.process_path(self._bl_obj.appleseed.archive_path,
AssetType.ARCHIVE_ASSET)
ass_options = {'filename': file_path}
self.__ass = asr.Assembly("archive_assembly", self.__ass_name, ass_options)
def add_instance_step(self, time, instance_id, bl_matrix):
self.__instance_lib.add_xform_step(time, instance_id, self._convert_matrix(bl_matrix))
def set_deform_key(self, time, depsgraph, index):
pass
def flush_entities(self, as_scene, as_main_assembly, as_project):
logger.debug(f"appleseed: Flushing archive asset entity for {self.orig_name} to project")
as_main_assembly.assemblies().insert(self.__ass)
self.__ass = as_main_assembly.assemblies().get_by_name(self.__ass_name)
self.flush_instances(as_main_assembly)
def flush_instances(self, as_main_assembly):
self.__instance_lib.flush_instances(as_main_assembly, self.__ass_name)
def update_archive_ass(self, depsgraph):
logger.debug(f"appleseed: Updating archive asset entity for {self.orig_name}")
file_path = self._asset_handler.process_path(self._bl_obj.appleseed.archive_path,
AssetType.ARCHIVE_ASSET)
ass_options = {'filename': file_path}
self.__ass.set_parameters(ass_options)
def clear_instances(self, as_main_assembly):
self.__instance_lib.clear_instances(as_main_assembly)
def delete_object(self, as_main_assembly):
logger.debug(f"appleseed: Deleting archive asset entity for {self.orig_name}")
self.clear_instances(as_main_assembly)
as_main_assembly.assemblies().remove(self.__ass)
self.__ass = None
|
modules/ssl/update_policies.py | kulikjak/httpd | 2,529 | 12622710 | <gh_stars>1000+
#!/usr/bin/env python
import json
import os
import sys
from httplib import HTTPSConnection
# The location were Mozilla defines the *current* TLS Security in JSON format
#
MOZ_TLS_CONF_SERVER = "statics.tls.security.mozilla.org"
MOZ_TLS_CONF_PATH = "/server-side-tls-conf.json"
MOZ_TLS_CONF_URL = "https://%s%s" % (MOZ_TLS_CONF_SERVER, MOZ_TLS_CONF_PATH)
# The version we already know. Accept nothing less.
#
MOZ_TLS_CONF_VERSION_MIN = 4.0
# keys inside the JSON document
#
KEY_CONF = 'configurations'
KEY_HREF = 'href'
KEY_OSSL_CIPHERS = 'openssl_ciphersuites'
KEY_TLS_VERSIONS = 'tls_versions'
KEY_VERSION = 'version'
# TLS Versions we know how to handle
#
TLS_VERSIONS = {
'TLSv1.3' : "SSL_PROTOCOL_TLSV1_3",
# Mozilla does not list TLSv1.3 yet, but we want it in there!
'TLSv1.2' : "(SSL_PROTOCOL_TLSV1_2|SSL_PROTOCOL_TLSV1_3)",
#'TLSv1.2' : "SSL_PROTOCOL_TLSV1_2",
'TLSv1.1' : "SSL_PROTOCOL_TLSV1_1",
'TLSv1' : "SSL_PROTOCOL_TLSV1",
'SSLv3' : "SSL_PROTOCOL_CONSTANTS_SSLV3",
}
TLS_1_X_VERSIONS = [ 'TLSv1.2', 'TLSv1.3' ]
# the Security configurations to extract
POLICY_NAMES = [ 'modern', 'intermediate', 'old' ]
def fail(msg):
sys.stderr.write(msg)
sys.exit(1)
def proto_string(tls_version):
if tls_version in TLS_VERSIONS:
return TLS_VERSIONS[tls_version]
fail("Unknown TLS protocol '%s'" % tls_version)
def proto_conf(tls_versions):
if len(TLS_VERSIONS) < len(tls_versions):
fail("more TLS versions used than we know: %s" % tls_versions)
if len(tls_versions) == 1:
return proto_string(tls_versions[0])
missing = []
for tls in TLS_VERSIONS:
if not tls in tls_versions:
missing.append(proto_string(tls))
if len(missing):
return "(SSL_PROTOCOL_ALL & ~(%s))" % "|".join(missing)
return "SSL_PROTOCOL_ALL"
# return an #ifdef required for a policy or None
#
def required_ifdef(conf):
for tlsv in conf[KEY_TLS_VERSIONS]:
# if it has a non-1_X protocol, it works without OpenSSL 1.0.2
if not tlsv in TLS_1_X_VERSIONS:
return None
return "HAVE_TLSV1_X"
def getPolicyDef():
c = HTTPSConnection(MOZ_TLS_CONF_SERVER)
c.request('GET', MOZ_TLS_CONF_PATH)
data = c.getresponse().read()
c.close()
return data
def printPolicies(doc):
print "#define SSL_POLICY_MOZILLA_VERSION %s" % doc[KEY_VERSION]
print ""
for pname in POLICY_NAMES:
prefix = "SSL_POLICY_%s" % pname.upper()
if not pname in doc[KEY_CONF]:
vars[prefix] = 0
continue
p = doc[KEY_CONF][pname]
ifdef = required_ifdef(p)
if ifdef:
print "#ifdef %s" % ifdef
print "#define %s 1" % prefix
print "#define %s_SSL_CIPHERS \"%s\"" % (prefix, p[KEY_OSSL_CIPHERS])
# Mozilla has not specced this yet
print "#define %s_TLS13_CIPHERS NULL" % (prefix)
print "#define %s_PROTOCOLS %s" % (prefix, proto_conf(p[KEY_TLS_VERSIONS]))
if ifdef:
print "#else /* ifdef %s */" % ifdef
print "#define %s 0" % prefix
print "#endif /* ifdef %s, else part */" % ifdef
print ""
def main(argv):
data = getPolicyDef()
doc = json.loads(data)
if MOZ_TLS_CONF_URL != doc[KEY_HREF]:
fail("ERROR: Unexpected href in policy document: %s\n" % doc[KEY_HREF])
if doc[KEY_VERSION] < MOZ_TLS_CONF_VERSION_MIN:
fail("ERROR: Expected at least version %s, but policy document has %s\n" \
% (MOZ_TLS_CONF_VERSION_MIN, doc[KEY_VERSION]))
if 1 == len(argv):
printPolicies(doc)
elif 2 == len(argv):
with open(argv[1]) as f:
for line in f:
if line == "@MOZILLA_SECURITY_POLICIES@\n":
printPolicies(doc)
else:
sys.stdout.write(line)
else:
fail("usage: %s [file] \nDownload and print/replace the Mozilla TLS Security policies" % argv[0])
if __name__ == "__main__":
main(sys.argv)
|
test/native/test_query.py | hugovk/pg8000 | 337 | 12622815 | import pytest
from pg8000.native import DatabaseError, to_statement
# Tests relating to the basic operation of the database driver, driven by the
# pg8000 custom interface.
@pytest.fixture
def db_table(request, con):
con.run(
"CREATE TEMPORARY TABLE t1 (f1 int primary key, "
"f2 bigint not null, f3 varchar(50) null) "
)
def fin():
try:
con.run("drop table t1")
except DatabaseError:
pass
request.addfinalizer(fin)
return con
def test_database_error(con):
with pytest.raises(DatabaseError):
con.run("INSERT INTO t99 VALUES (1, 2, 3)")
# Run a query on a table, alter the structure of the table, then run the
# original query again.
def test_alter(db_table):
db_table.run("select * from t1")
db_table.run("alter table t1 drop column f3")
db_table.run("select * from t1")
# Run a query on a table, drop then re-create the table, then run the
# original query again.
def test_create(db_table):
db_table.run("select * from t1")
db_table.run("drop table t1")
db_table.run("create temporary table t1 (f1 int primary key)")
db_table.run("select * from t1")
def test_parametrized(db_table):
res = db_table.run("SELECT f1, f2, f3 FROM t1 WHERE f1 > :f1", f1=3)
for row in res:
f1, f2, f3 = row
def test_insert_returning(db_table):
db_table.run("CREATE TEMPORARY TABLE t2 (id serial, data text)")
# Test INSERT ... RETURNING with one row...
res = db_table.run("INSERT INTO t2 (data) VALUES (:v) RETURNING id", v="test1")
row_id = res[0][0]
res = db_table.run("SELECT data FROM t2 WHERE id = :v", v=row_id)
assert "test1" == res[0][0]
assert db_table.row_count == 1
# Test with multiple rows...
res = db_table.run(
"INSERT INTO t2 (data) VALUES (:v1), (:v2), (:v3) " "RETURNING id",
v1="test2",
v2="test3",
v3="test4",
)
assert db_table.row_count == 3
ids = [x[0] for x in res]
assert len(ids) == 3
def test_row_count_select(db_table):
expected_count = 57
for i in range(expected_count):
db_table.run(
"INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v2, :v3)", v1=i, v2=i, v3=None
)
db_table.run("SELECT * FROM t1")
# Check row_count
assert expected_count == db_table.row_count
# Should be -1 for a command with no results
db_table.run("DROP TABLE t1")
assert -1 == db_table.row_count
def test_row_count_delete(db_table):
db_table.run(
"INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v2, :v3)", v1=1, v2=1, v3=None
)
db_table.run("DELETE FROM t1")
assert db_table.row_count == 1
def test_row_count_update(db_table):
db_table.run(
"INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v2, :v3)", v1=1, v2=1, v3=None
)
db_table.run(
"INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v2, :v3)", v1=2, v2=10, v3=None
)
db_table.run(
"INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v2, :v3)", v1=3, v2=100, v3=None
)
db_table.run(
"INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v2, :v3)", v1=4, v2=1000, v3=None
)
db_table.run(
"INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v2, :v3)", v1=5, v2=10000, v3=None
)
db_table.run("UPDATE t1 SET f3 = :v1 WHERE f2 > 101", v1="Hello!")
assert db_table.row_count == 2
def test_int_oid(con):
# https://bugs.launchpad.net/pg8000/+bug/230796
con.run("SELECT typname FROM pg_type WHERE oid = :v", v=100)
def test_unicode_query(con):
con.run(
"CREATE TEMPORARY TABLE \u043c\u0435\u0441\u0442\u043e "
"(\u0438\u043c\u044f VARCHAR(50), "
"\u0430\u0434\u0440\u0435\u0441 VARCHAR(250))"
)
def test_transactions(db_table):
db_table.run("start transaction")
db_table.run(
"INSERT INTO t1 (f1, f2, f3) VALUES (:v1, :v2, :v3)", v1=1, v2=1, v3="Zombie"
)
db_table.run("rollback")
db_table.run("select * from t1")
assert db_table.row_count == 0
def test_in(con):
ret = con.run("SELECT typname FROM pg_type WHERE oid = any(:v)", v=[16, 23])
assert ret[0][0] == "bool"
# An empty query should raise a ProgrammingError
def test_empty_query(con):
with pytest.raises(DatabaseError):
con.run("")
def test_rollback_no_transaction(con):
# Remove any existing notices
con.notices.clear()
# First, verify that a raw rollback does produce a notice
con.run("rollback")
assert 1 == len(con.notices)
# 25P01 is the code for no_active_sql_tronsaction. It has
# a message and severity name, but those might be
# localized/depend on the server version.
assert con.notices.pop().get(b"C") == b"25P01"
def test_close_prepared_statement(con):
ps = con.prepare("select 1")
ps.run()
res = con.run("select count(*) from pg_prepared_statements")
assert res[0][0] == 1 # Should have one prepared statement
ps.close()
res = con.run("select count(*) from pg_prepared_statements")
assert res[0][0] == 0 # Should have no prepared statements
def test_no_data(con):
assert con.run("START TRANSACTION") is None
def test_multiple_statements(con):
statements = "SELECT 5; SELECT '<NAME>';"
assert con.run(statements) == [[5], ["<NAME>"]]
def test_unexecuted_connection_row_count(con):
assert con.row_count is None
def test_unexecuted_connection_columns(con):
assert con.columns is None
def test_sql_prepared_statement(con):
con.run("PREPARE gen_series AS SELECT generate_series(1, 10);")
con.run("EXECUTE gen_series")
def test_to_statement():
new_query, _ = to_statement(
"SELECT sum(x)::decimal(5, 2) :f_2, :f1 FROM t WHERE a=:f_2"
)
expected = "SELECT sum(x)::decimal(5, 2) $1, $2 FROM t WHERE a=$1"
assert new_query == expected
|
tests/test_pyenv_feature_help.py | jmscraig/pyenv-win | 1,454 | 12622821 | <reponame>jmscraig/pyenv-win
from test_pyenv import TestPyenvBase
from test_pyenv_helpers import run_pyenv_test
class TestPyenvFeatureGlobal(TestPyenvBase):
def test_help(self, setup):
def commands(ctx):
stdout, stderr = ctx.pyenv("help")
stdout = "\r\n".join(stdout.splitlines()[:2])
assert (stdout.strip(), stderr) == ("Usage: pyenv <command> [<args>]", "")
run_pyenv_test({}, commands)
|
brew/selection/pruning/base.py | va26/brew | 344 | 12622839 | <gh_stars>100-1000
class Prunner(object):
def __init__(self):
pass
def fit(self, ensemble, X, y):
return self
def get(self, p=0.1):
return self.ensemble[:int(p * len(self.ensemble))]
|
sktime_dl/utils/__init__.py | talhaanwarch/sktime-dl | 510 | 12622843 | __all__ = [
"check_and_clean_data",
"check_and_clean_validation_data",
"check_is_fitted",
"save_trained_model"
]
from sktime_dl.utils._data import check_and_clean_data
from sktime_dl.utils._data import check_and_clean_validation_data
from sktime_dl.utils._models import check_is_fitted
from sktime_dl.utils._models import save_trained_model
|
predict.py | mariomeissner/lightning-transformers | 451 | 12622865 | <gh_stars>100-1000
"""The shell entry point `$ pl-transformers-predict` is also available."""
import hydra
from omegaconf import DictConfig
from lightning_transformers.cli.predict import main
@hydra.main(config_path="./conf", config_name="config")
def hydra_entry(cfg: DictConfig) -> None:
main(cfg)
if __name__ == "__main__":
hydra_entry()
|
slac_tf/slac/agents/slac/ablation_model_distribution_network.py | alfaevc/vlRLstack | 147 | 12622884 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import gin
import numpy as np
from slac.agents.slac.model_distribution_network import Bernoulli
from slac.agents.slac.model_distribution_network import Compressor
from slac.agents.slac.model_distribution_network import ConstantMultivariateNormalDiag
from slac.agents.slac.model_distribution_network import Decoder
from slac.agents.slac.model_distribution_network import MultivariateNormalDiag
from slac.agents.slac.model_distribution_network import Normal
from slac.utils import nest_utils
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.trajectories import time_step as ts
tfd = tfp.distributions
@gin.configurable
class SlacModelDistributionNetwork(tf.Module):
"""Equivalent to model_distribution_network.ModelDistributionNetwork.
We keep the implementations separate to minimize cluttering the implementation
of the main method.
"""
def __init__(self,
observation_spec,
action_spec,
latent1_first_prior_distribution_ctor=ConstantMultivariateNormalDiag,
latent1_prior_distribution_ctor=MultivariateNormalDiag,
latent1_posterior_distribution_ctor=MultivariateNormalDiag,
latent2_prior_distribution_ctor=MultivariateNormalDiag,
latent2_posterior_distribution_ctor=MultivariateNormalDiag,
base_depth=32,
latent1_size=32,
latent2_size=256,
kl_analytic=True,
skip_first_kl=False,
sequential_latent1_prior=True,
sequential_latent2_prior=True,
sequential_latent1_posterior=True,
sequential_latent2_posterior=True,
model_reward=False,
model_discount=False,
decoder_stddev=np.sqrt(0.1, dtype=np.float32),
reward_stddev=None,
name=None):
super(SlacModelDistributionNetwork, self).__init__(name=name)
self.observation_spec = observation_spec
self.action_spec = action_spec
self.base_depth = base_depth
self.latent1_size = latent1_size
self.latent2_size = latent2_size
self.kl_analytic = kl_analytic
self.skip_first_kl = skip_first_kl
self.model_reward = model_reward
self.model_discount = model_discount
# p(z_1^1)
self.latent1_first_prior = latent1_first_prior_distribution_ctor(latent1_size)
# p(z_1^2 | z_1^1)
self.latent2_first_prior = latent2_prior_distribution_ctor(8 * base_depth, latent2_size)
if sequential_latent1_prior:
# p(z_{t+1}^1 | z_t^2, a_t)
self.latent1_prior = latent1_prior_distribution_ctor(8 * base_depth, latent1_size)
else:
# p(z_{t+1}^1)
self.latent1_prior = lambda prev_latent, prev_action: self.latent1_first_prior(prev_latent[..., 0]) # prev_latent is only used to determine the batch shape
if sequential_latent2_prior:
# p(z_{t+1}^2 | z_{t+1}^1, z_t^2, a_t)
self.latent2_prior = latent2_prior_distribution_ctor(8 * base_depth, latent2_size)
else:
# p(z_{t+1}^2 | z_{t+1}^1)
self.latent2_prior = lambda latent1, prev_latent2, prev_action: self.latent2_first_prior(latent1)
# q(z_1^1 | x_1)
self.latent1_first_posterior = latent1_posterior_distribution_ctor(8 * base_depth, latent1_size)
# q(z_1^2 | z_1^1) = p(z_1^2 | z_1^1)
if latent2_posterior_distribution_ctor == latent2_prior_distribution_ctor:
self.latent2_first_posterior = self.latent2_first_prior # share
else:
self.latent2_first_posterior = latent2_posterior_distribution_ctor(8 * base_depth, latent2_size)
if sequential_latent1_posterior:
# q(z_{t+1}^1 | x_{t+1}, z_t^2, a_t)
self.latent1_posterior = latent1_posterior_distribution_ctor(8 * base_depth, latent1_size)
else:
# q(z_{t+1}^1 | x_{t+1})
self.latent1_posterior = lambda feature, prev_latent2, prev_action: self.latent1_first_posterior(feature)
if sequential_latent2_posterior:
# q(z_{t+1}^2 | z_{t+1}^1, z_t^2, a_t) = p(z_{t+1}^2 | z_{t+1}^1, z_t^2, a_t)
if latent2_posterior_distribution_ctor == latent2_prior_distribution_ctor:
self.latent2_posterior = self.latent2_prior
else:
self.latent2_posterior = latent2_posterior_distribution_ctor(8 * base_depth, latent2_size)
else:
# q(z_{t+1}^2 | z_{t+1}^1) = p(z_{t+1}^2 | z_{t+1}^1)
self.latent2_posterior = lambda latent1, prev_latent2, prev_action: self.latent2_first_posterior(latent1)
# compresses x_t into a vector
self.compressor = Compressor(base_depth, 8 * base_depth)
# p(x_t | z_t^1, z_t^2)
self.decoder = Decoder(base_depth, scale=decoder_stddev)
if self.model_reward:
# p(r_t | z_t^1, z_t^2, a_t, z_{t+1}^1, z_{t+1}^2)
self.reward_predictor = Normal(8 * base_depth, scale=reward_stddev)
else:
self.reward_predictor = None
if self.model_discount:
# p(d_t | z_{t+1}^1, z_{t+1}^2)
self.discount_predictor = Bernoulli(8 * base_depth)
else:
self.discount_predictor = None
@property
def state_size(self):
return self.latent1_size + self.latent2_size
def compute_loss(self, images, actions, step_types, rewards=None, discounts=None, latent_posterior_samples_and_dists=None):
sequence_length = step_types.shape[1].value - 1
if latent_posterior_samples_and_dists is None:
latent_posterior_samples_and_dists = self.sample_posterior(images, actions, step_types)
(latent1_posterior_samples, latent2_posterior_samples), (latent1_posterior_dists, latent2_posterior_dists) = (
latent_posterior_samples_and_dists)
(latent1_prior_samples, latent2_prior_samples), _ = self.sample_prior_or_posterior(actions, step_types) # for visualization
(latent1_conditional_prior_samples, latent2_conditional_prior_samples), _ = self.sample_prior_or_posterior(
actions, step_types, images=images[:, :1]) # for visualization. condition on first image only
def where_and_concat(reset_masks, first_prior_tensors, after_first_prior_tensors):
after_first_prior_tensors = tf.where(reset_masks[:, 1:], first_prior_tensors[:, 1:], after_first_prior_tensors)
prior_tensors = tf.concat([first_prior_tensors[:, 0:1], after_first_prior_tensors], axis=1)
return prior_tensors
reset_masks = tf.concat([tf.ones_like(step_types[:, 0:1], dtype=tf.bool),
tf.equal(step_types[:, 1:], ts.StepType.FIRST)], axis=1)
latent1_reset_masks = tf.tile(reset_masks[:, :, None], [1, 1, self.latent1_size])
latent1_first_prior_dists = self.latent1_first_prior(step_types)
# these distributions start at t=1 and the inputs are from t-1
latent1_after_first_prior_dists = self.latent1_prior(
latent2_posterior_samples[:, :sequence_length],
actions[:, :sequence_length])
latent1_prior_dists = nest_utils.map_distribution_structure(
functools.partial(where_and_concat, latent1_reset_masks),
latent1_first_prior_dists,
latent1_after_first_prior_dists)
latent2_reset_masks = tf.tile(reset_masks[:, :, None], [1, 1, self.latent2_size])
latent2_first_prior_dists = self.latent2_first_prior(latent1_posterior_samples)
# these distributions start at t=1 and the last 2 inputs are from t-1
latent2_after_first_prior_dists = self.latent2_prior(
latent1_posterior_samples[:, 1:sequence_length+1],
latent2_posterior_samples[:, :sequence_length],
actions[:, :sequence_length])
latent2_prior_dists = nest_utils.map_distribution_structure(
functools.partial(where_and_concat, latent2_reset_masks),
latent2_first_prior_dists,
latent2_after_first_prior_dists)
outputs = {}
if self.kl_analytic:
latent1_kl_divergences = tfd.kl_divergence(latent1_posterior_dists, latent1_prior_dists)
else:
latent1_kl_divergences = (latent1_posterior_dists.log_prob(latent1_posterior_samples)
- latent1_prior_dists.log_prob(latent1_posterior_samples))
if self.skip_first_kl:
latent1_kl_divergences = latent1_kl_divergences[:, 1:]
latent1_kl_divergences = tf.reduce_sum(latent1_kl_divergences, axis=1)
outputs.update({
'latent1_kl_divergence': tf.reduce_mean(latent1_kl_divergences),
})
if self.latent2_posterior == self.latent2_prior:
latent2_kl_divergences = 0.0
else:
if self.kl_analytic:
latent2_kl_divergences = tfd.kl_divergence(latent2_posterior_dists, latent2_prior_dists)
else:
latent2_kl_divergences = (latent2_posterior_dists.log_prob(latent2_posterior_samples)
- latent2_prior_dists.log_prob(latent2_posterior_samples))
if self.skip_first_kl:
latent2_kl_divergences = latent2_kl_divergences[:, 1:]
latent2_kl_divergences = tf.reduce_sum(latent2_kl_divergences, axis=1)
outputs.update({
'latent2_kl_divergence': tf.reduce_mean(latent2_kl_divergences),
})
outputs.update({
'kl_divergence': tf.reduce_mean(latent1_kl_divergences + latent2_kl_divergences),
})
likelihood_dists = self.decoder(latent1_posterior_samples, latent2_posterior_samples)
likelihood_log_probs = likelihood_dists.log_prob(images)
likelihood_log_probs = tf.reduce_sum(likelihood_log_probs, axis=1)
reconstruction_error = tf.reduce_sum(tf.square(images - likelihood_dists.distribution.loc),
axis=list(range(-len(likelihood_dists.event_shape), 0)))
reconstruction_error = tf.reduce_sum(reconstruction_error, axis=1)
outputs.update({
'log_likelihood': tf.reduce_mean(likelihood_log_probs),
'reconstruction_error': tf.reduce_mean(reconstruction_error),
})
# summed over the time dimension
elbo = likelihood_log_probs - latent1_kl_divergences - latent2_kl_divergences
if self.model_reward:
reward_dists = self.reward_predictor(
latent1_posterior_samples[:, :sequence_length],
latent2_posterior_samples[:, :sequence_length],
actions[:, :sequence_length],
latent1_posterior_samples[:, 1:sequence_length + 1],
latent2_posterior_samples[:, 1:sequence_length + 1])
reward_valid_mask = tf.cast(tf.not_equal(step_types[:, :sequence_length], ts.StepType.LAST), tf.float32)
reward_log_probs = reward_dists.log_prob(rewards[:, :sequence_length])
reward_log_probs = tf.reduce_sum(reward_log_probs * reward_valid_mask, axis=1)
reward_reconstruction_error = tf.square(rewards[:, :sequence_length] - reward_dists.loc)
reward_reconstruction_error = tf.reduce_sum(reward_reconstruction_error * reward_valid_mask, axis=1)
outputs.update({
'reward_log_likelihood': tf.reduce_mean(reward_log_probs),
'reward_reconstruction_error': tf.reduce_mean(reward_reconstruction_error),
})
elbo += reward_log_probs
if self.model_discount:
discount_dists = self.discount_predictor(
latent1_posterior_samples[:, 1:sequence_length + 1],
latent2_posterior_samples[:, 1:sequence_length + 1])
discount_log_probs = discount_dists.log_prob(discounts[:, :sequence_length])
discount_log_probs = tf.reduce_sum(discount_log_probs, axis=1)
discount_accuracy = tf.cast(
tf.equal(tf.cast(discount_dists.mode(), tf.float32), discounts[:, :sequence_length]), tf.float32)
discount_accuracy = tf.reduce_sum(discount_accuracy, axis=1)
outputs.update({
'discount_log_likelihood': tf.reduce_mean(discount_log_probs),
'discount_accuracy': tf.reduce_mean(discount_accuracy),
})
elbo += discount_log_probs
# average over the batch dimension
loss = -tf.reduce_mean(elbo)
posterior_images = likelihood_dists.mean()
prior_images = self.decoder(latent1_prior_samples, latent2_prior_samples).mean()
conditional_prior_images = self.decoder(latent1_conditional_prior_samples, latent2_conditional_prior_samples).mean()
outputs.update({
'elbo': tf.reduce_mean(elbo),
'images': images,
'posterior_images': posterior_images,
'prior_images': prior_images,
'conditional_prior_images': conditional_prior_images,
})
return loss, outputs
def sample_prior_or_posterior(self, actions, step_types=None, images=None):
"""Samples from the prior, except for the first time steps in which conditioning images are given."""
if step_types is None:
batch_size = tf.shape(actions)[0]
sequence_length = actions.shape[1].value # should be statically defined
step_types = tf.fill(
[batch_size, sequence_length + 1], ts.StepType.MID)
else:
sequence_length = step_types.shape[1].value - 1
actions = actions[:, :sequence_length]
if images is not None:
features = self.compressor(images)
# swap batch and time axes
actions = tf.transpose(actions, [1, 0, 2])
step_types = tf.transpose(step_types, [1, 0])
if images is not None:
features = tf.transpose(features, [1, 0, 2])
latent1_dists = []
latent1_samples = []
latent2_dists = []
latent2_samples = []
for t in range(sequence_length + 1):
is_conditional = images is not None and (t < images.shape[1].value)
if t == 0:
if is_conditional:
latent1_dist = self.latent1_first_posterior(features[t])
else:
latent1_dist = self.latent1_first_prior(step_types[t]) # step_types is only used to infer batch_size
latent1_sample = latent1_dist.sample()
if is_conditional:
latent2_dist = self.latent2_first_posterior(latent1_sample)
else:
latent2_dist = self.latent2_first_prior(latent1_sample)
latent2_sample = latent2_dist.sample()
else:
reset_mask = tf.equal(step_types[t], ts.StepType.FIRST)
if is_conditional:
latent1_first_dist = self.latent1_first_posterior(features[t])
latent1_dist = self.latent1_posterior(features[t], latent2_samples[t-1], actions[t-1])
else:
latent1_first_dist = self.latent1_first_prior(step_types[t])
latent1_dist = self.latent1_prior(latent2_samples[t-1], actions[t-1])
latent1_dist = nest_utils.map_distribution_structure(
functools.partial(tf.where, reset_mask), latent1_first_dist, latent1_dist)
latent1_sample = latent1_dist.sample()
if is_conditional:
latent2_first_dist = self.latent2_first_posterior(latent1_sample)
latent2_dist = self.latent2_posterior(latent1_sample, latent2_samples[t-1], actions[t-1])
else:
latent2_first_dist = self.latent2_first_prior(latent1_sample)
latent2_dist = self.latent2_prior(latent1_sample, latent2_samples[t-1], actions[t-1])
latent2_dist = nest_utils.map_distribution_structure(
functools.partial(tf.where, reset_mask), latent2_first_dist, latent2_dist)
latent2_sample = latent2_dist.sample()
latent1_dists.append(latent1_dist)
latent1_samples.append(latent1_sample)
latent2_dists.append(latent2_dist)
latent2_samples.append(latent2_sample)
try:
latent1_dists = nest_utils.map_distribution_structure(lambda *x: tf.stack(x, axis=1), *latent1_dists)
except:
latent1_dists = None
latent1_samples = tf.stack(latent1_samples, axis=1)
try:
latent2_dists = nest_utils.map_distribution_structure(lambda *x: tf.stack(x, axis=1), *latent2_dists)
except:
latent2_dists = None
latent2_samples = tf.stack(latent2_samples, axis=1)
return (latent1_samples, latent2_samples), (latent1_dists, latent2_dists)
def sample_posterior(self, images, actions, step_types, features=None):
sequence_length = step_types.shape[1].value - 1
actions = actions[:, :sequence_length]
if features is None:
features = self.compressor(images)
# swap batch and time axes
features = tf.transpose(features, [1, 0, 2])
actions = tf.transpose(actions, [1, 0, 2])
step_types = tf.transpose(step_types, [1, 0])
latent1_dists = []
latent1_samples = []
latent2_dists = []
latent2_samples = []
for t in range(sequence_length + 1):
if t == 0:
latent1_dist = self.latent1_first_posterior(features[t])
latent1_sample = latent1_dist.sample()
latent2_dist = self.latent2_first_posterior(latent1_sample)
latent2_sample = latent2_dist.sample()
else:
prev_latent2_sample = latent2_samples[t-1]
reset_mask = tf.equal(step_types[t], ts.StepType.FIRST)
latent1_first_dist = self.latent1_first_posterior(features[t])
latent1_dist = self.latent1_posterior(features[t], prev_latent2_sample, actions[t-1])
latent1_dist = nest_utils.map_distribution_structure(
functools.partial(tf.where, reset_mask), latent1_first_dist, latent1_dist)
latent1_sample = latent1_dist.sample()
latent2_first_dist = self.latent2_first_posterior(latent1_sample)
latent2_dist = self.latent2_posterior(latent1_sample, prev_latent2_sample, actions[t-1])
latent2_dist = nest_utils.map_distribution_structure(
functools.partial(tf.where, reset_mask), latent2_first_dist, latent2_dist)
latent2_sample = latent2_dist.sample()
latent1_dists.append(latent1_dist)
latent1_samples.append(latent1_sample)
latent2_dists.append(latent2_dist)
latent2_samples.append(latent2_sample)
latent1_dists = nest_utils.map_distribution_structure(lambda *x: tf.stack(x, axis=1), *latent1_dists)
latent1_samples = tf.stack(latent1_samples, axis=1)
latent2_dists = nest_utils.map_distribution_structure(lambda *x: tf.stack(x, axis=1), *latent2_dists)
latent2_samples = tf.stack(latent2_samples, axis=1)
return (latent1_samples, latent2_samples), (latent1_dists, latent2_dists)
@gin.configurable
class SimpleModelDistributionNetwork(tf.Module):
def __init__(self,
observation_spec,
action_spec,
base_depth=32,
latent_size=256,
kl_analytic=True,
sequential_latent_prior=True,
sequential_latent_posterior=True,
model_reward=False,
model_discount=False,
decoder_stddev=np.sqrt(0.1, dtype=np.float32),
reward_stddev=None,
name=None):
super(SimpleModelDistributionNetwork, self).__init__(name=name)
self.observation_spec = observation_spec
self.action_spec = action_spec
self.base_depth = base_depth
self.latent_size = latent_size
self.kl_analytic = kl_analytic
self.model_reward = model_reward
self.model_discount = model_discount
# p(z_1)
self.latent_first_prior = ConstantMultivariateNormalDiag(latent_size)
if sequential_latent_prior:
# p(z_{t+1} | z_t, a_t)
self.latent_prior = MultivariateNormalDiag(8 * base_depth, latent_size)
else:
# p(z_{t+1})
self.latent_prior = lambda prev_latent, prev_action: self.latent_first_prior(prev_latent[..., 0]) # prev_latent is only used to determine the batch shape
# q(z_1 | x_1)
self.latent_first_posterior = MultivariateNormalDiag(8 * base_depth, latent_size)
if sequential_latent_posterior:
# q(z_{t+1} | x_{t+1}, z_t, a_t)
self.latent_posterior = MultivariateNormalDiag(8 * base_depth, latent_size)
else:
# q(z_{t+1} | x_{t+1})
self.latent_posterior = lambda feature, prev_latent, prev_action: self.latent_first_posterior(feature)
# compresses x_t into a vector
self.compressor = Compressor(base_depth, 8 * base_depth)
# p(x_t | z_t)
self.decoder = Decoder(base_depth, scale=decoder_stddev)
if self.model_reward:
# p(r_t | z_t, a_t, z_{t+1})
self.reward_predictor = Normal(8 * base_depth, scale=reward_stddev)
else:
self.reward_predictor = None
if self.model_discount:
# p(d_t | z_{t+1})
self.discount_predictor = Bernoulli(8 * base_depth)
else:
self.discount_predictor = None
@property
def state_size(self):
return self.latent_size
def compute_loss(self, images, actions, step_types, rewards=None, discounts=None, latent_posterior_samples_and_dists=None):
sequence_length = step_types.shape[1].value - 1
if latent_posterior_samples_and_dists is None:
latent_posterior_samples_and_dists = self.sample_posterior(images, actions, step_types)
latent_posterior_samples, latent_posterior_dists = latent_posterior_samples_and_dists
latent_prior_samples, _ = self.sample_prior_or_posterior(actions, step_types) # for visualization
latent_conditional_prior_samples, _ = self.sample_prior_or_posterior(
actions, step_types, images=images[:, :1]) # for visualization. condition on first image only
def where_and_concat(reset_masks, first_prior_tensors, after_first_prior_tensors):
after_first_prior_tensors = tf.where(reset_masks[:, 1:], first_prior_tensors[:, 1:], after_first_prior_tensors)
prior_tensors = tf.concat([first_prior_tensors[:, 0:1], after_first_prior_tensors], axis=1)
return prior_tensors
reset_masks = tf.concat([tf.ones_like(step_types[:, 0:1], dtype=tf.bool),
tf.equal(step_types[:, 1:], ts.StepType.FIRST)], axis=1)
latent_reset_masks = tf.tile(reset_masks[:, :, None], [1, 1, self.latent_size])
latent_first_prior_dists = self.latent_first_prior(step_types)
# these distributions start at t=1 and the inputs are from t-1
latent_after_first_prior_dists = self.latent_prior(
latent_posterior_samples[:, :sequence_length], actions[:, :sequence_length])
latent_prior_dists = nest_utils.map_distribution_structure(
functools.partial(where_and_concat, latent_reset_masks),
latent_first_prior_dists,
latent_after_first_prior_dists)
outputs = {}
if self.kl_analytic:
latent_kl_divergences = tfd.kl_divergence(latent_posterior_dists, latent_prior_dists)
else:
latent_kl_divergences = (latent_posterior_dists.log_prob(latent_posterior_samples)
- latent_prior_dists.log_prob(latent_posterior_samples))
latent_kl_divergences = tf.reduce_sum(latent_kl_divergences, axis=1)
outputs.update({
'latent_kl_divergence': tf.reduce_mean(latent_kl_divergences),
})
outputs.update({
'kl_divergence': tf.reduce_mean(latent_kl_divergences),
})
likelihood_dists = self.decoder(latent_posterior_samples)
likelihood_log_probs = likelihood_dists.log_prob(images)
likelihood_log_probs = tf.reduce_sum(likelihood_log_probs, axis=1)
reconstruction_error = tf.reduce_sum(tf.square(images - likelihood_dists.distribution.loc),
axis=list(range(-len(likelihood_dists.event_shape), 0)))
reconstruction_error = tf.reduce_sum(reconstruction_error, axis=1)
outputs.update({
'log_likelihood': tf.reduce_mean(likelihood_log_probs),
'reconstruction_error': tf.reduce_mean(reconstruction_error),
})
# summed over the time dimension
elbo = likelihood_log_probs - latent_kl_divergences
if self.model_reward:
reward_dists = self.reward_predictor(
latent_posterior_samples[:, :sequence_length],
actions[:, :sequence_length],
latent_posterior_samples[:, 1:sequence_length + 1])
reward_valid_mask = tf.cast(tf.not_equal(step_types[:, :sequence_length], ts.StepType.LAST), tf.float32)
reward_log_probs = reward_dists.log_prob(rewards[:, :sequence_length])
reward_log_probs = tf.reduce_sum(reward_log_probs * reward_valid_mask, axis=1)
reward_reconstruction_error = tf.square(rewards[:, :sequence_length] - reward_dists.loc)
reward_reconstruction_error = tf.reduce_sum(reward_reconstruction_error * reward_valid_mask, axis=1)
outputs.update({
'reward_log_likelihood': tf.reduce_mean(reward_log_probs),
'reward_reconstruction_error': tf.reduce_mean(reward_reconstruction_error),
})
elbo += reward_log_probs
if self.model_discount:
discount_dists = self.discount_predictor(
latent_posterior_samples[:, 1:sequence_length + 1])
discount_log_probs = discount_dists.log_prob(discounts[:, :sequence_length])
discount_log_probs = tf.reduce_sum(discount_log_probs, axis=1)
discount_accuracy = tf.cast(
tf.equal(tf.cast(discount_dists.mode(), tf.float32), discounts[:, :sequence_length]), tf.float32)
discount_accuracy = tf.reduce_sum(discount_accuracy, axis=1)
outputs.update({
'discount_log_likelihood': tf.reduce_mean(discount_log_probs),
'discount_accuracy': tf.reduce_mean(discount_accuracy),
})
elbo += discount_log_probs
# average over the batch dimension
loss = -tf.reduce_mean(elbo)
posterior_images = likelihood_dists.mean()
prior_images = self.decoder(latent_prior_samples).mean()
conditional_prior_images = self.decoder(latent_conditional_prior_samples).mean()
outputs.update({
'elbo': tf.reduce_mean(elbo),
'images': images,
'posterior_images': posterior_images,
'prior_images': prior_images,
'conditional_prior_images': conditional_prior_images,
})
return loss, outputs
def sample_prior_or_posterior(self, actions, step_types=None, images=None):
"""Samples from the prior, except for the first time steps in which conditioning images are given."""
if step_types is None:
batch_size = tf.shape(actions)[0]
sequence_length = actions.shape[1].value # should be statically defined
step_types = tf.fill(
[batch_size, sequence_length + 1], ts.StepType.MID)
else:
sequence_length = step_types.shape[1].value - 1
actions = actions[:, :sequence_length]
if images is not None:
features = self.compressor(images)
# swap batch and time axes
actions = tf.transpose(actions, [1, 0, 2])
step_types = tf.transpose(step_types, [1, 0])
if images is not None:
features = tf.transpose(features, [1, 0, 2])
latent_dists = []
latent_samples = []
for t in range(sequence_length + 1):
is_conditional = images is not None and (t < images.shape[1].value)
if t == 0:
if is_conditional:
latent_dist = self.latent_first_posterior(features[t])
else:
latent_dist = self.latent_first_prior(step_types[t]) # step_types is only used to infer batch_size
latent_sample = latent_dist.sample()
else:
reset_mask = tf.equal(step_types[t], ts.StepType.FIRST)
if is_conditional:
latent_first_dist = self.latent_first_posterior(features[t])
latent_dist = self.latent_posterior(features[t], latent_samples[t-1], actions[t-1])
else:
latent_first_dist = self.latent_first_prior(step_types[t])
latent_dist = self.latent_prior(latent_samples[t-1], actions[t-1])
latent_dist = nest_utils.map_distribution_structure(
functools.partial(tf.where, reset_mask), latent_first_dist, latent_dist)
latent_sample = latent_dist.sample()
latent_dists.append(latent_dist)
latent_samples.append(latent_sample)
latent_dists = nest_utils.map_distribution_structure(lambda *x: tf.stack(x, axis=1), *latent_dists)
latent_samples = tf.stack(latent_samples, axis=1)
return latent_samples, latent_dists
def sample_posterior(self, images, actions, step_types, features=None):
sequence_length = step_types.shape[1].value - 1
actions = actions[:, :sequence_length]
if features is None:
features = self.compressor(images)
# swap batch and time axes
features = tf.transpose(features, [1, 0, 2])
actions = tf.transpose(actions, [1, 0, 2])
step_types = tf.transpose(step_types, [1, 0])
latent_dists = []
latent_samples = []
for t in range(sequence_length + 1):
if t == 0:
latent_dist = self.latent_first_posterior(features[t])
latent_sample = latent_dist.sample()
else:
reset_mask = tf.equal(step_types[t], ts.StepType.FIRST)
latent_first_dist = self.latent_first_posterior(features[t])
latent_dist = self.latent_posterior(features[t], latent_samples[t-1], actions[t-1])
latent_dist = nest_utils.map_distribution_structure(
functools.partial(tf.where, reset_mask), latent_first_dist, latent_dist)
latent_sample = latent_dist.sample()
latent_dists.append(latent_dist)
latent_samples.append(latent_sample)
latent_dists = nest_utils.map_distribution_structure(lambda *x: tf.stack(x, axis=1), *latent_dists)
latent_samples = tf.stack(latent_samples, axis=1)
return latent_samples, latent_dists
|
00Python/day07/basic01.py | HaoZhang95/PythonAndMachineLearning | 937 | 12622885 |
"""
缺省函数,就是参数可以有默认值,跟kotlin一样
返回值也可以简写,省略 -> int:
"""
def print_info(name, age=20):
print("姓名:%s, 年龄:%s" % (name, age))
print_info("张三", 28)
print_info("李四")
"""
元组[]不定长参数,参数的数量不确定, 调用类似于位置参数
参数名之前加上*表示这个星号表明参数的类型为元祖,但是传入实参的时候不需要中括号[]
"""
def my_func01(*args):
print(type(args))
print(args[0])
my_func01(1, 3, 5)
my_func01(1, 3, 5, 7)
"""
字典类型{}的不定长参数, 调用类似于关键字参数name=的形式
参数名前面加上**两个星号,表明这个参数为一个字典,传入的时候不需要写{},但是只能传入一个字典
"""
def my_func02(**kwargs):
print(type(kwargs))
print(kwargs["name"])
print(kwargs["age"])
my_func02(name="小明", age=12)
"""
一个函数的包含多return个
"""
def my_func03(score: int) -> str:
if score >= 70:
return "优秀"
elif score >= 30:
return "中性"
else:
return "差"
print(my_func03(50))
"""
处理多个返回值的方式
1- return ["小明", 28]
2- return {"name":"小明","age":28]
2- return 返回值1, 返回值2
"""
def my_func04(name, age):
return name, age
print(my_func04("张三",28)[0])
print(my_func04("张三",28)[1])
"""
python中的拆包(列表,字典,多个返回值): 一次性初始化多个变量的值
如果返回值是列表,字典,或者多个返回值,可以直接用来赋值多个变量的方式就叫做拆包,简化代码量
"""
num01, num02, num03, num04 = 1, 3.14, True, "Hello World"
num05, num06, num07, num08 = [1, 3.14, True, "Hello World"]
name, age = my_func04("李四", 28)
print(name, "-->", age)
"""
拆包中python快速交换两个变量的值, 免去了temp中间值
"""
a, b = 4, 5
a, b = b, a # b的引用给a, a的引用给b,快速交换值
|
src/scholarutil.py | moorage/synthetic-computer-vision | 940 | 12622956 | import sys, pickle, os, time, yaml
cur_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(cur_dir, 'scholar_py'))
import scholar
cache_file = os.path.join(cur_dir, 'papers.pkl')
yaml_cache_file = os.path.join(cur_dir, 'papers_cache.yml')
def get_paper_data(querier, paper):
if type(paper) is dict:
title = paper.get('title')
cluster_id = paper.get('cluster_id')
elif type(paper) is str:
title = paper
else:
raise "Input arg paper is of an invalid format %s" % repr(paper)
if cluster_id:
print 'Query by cluster_id'
query = scholar.ClusterScholarQuery(cluster = cluster_id)
else:
print 'Query by title "%s"' % title
query = scholar.SearchScholarQuery()
query.set_phrase(title)
query.set_num_page_results(1)
# This is important, set this to 1 can reduce the possiblility of get blocked by google
querier.send_query(query)
scholar.txt(querier, with_globals=True)
articles = querier.articles
time.sleep(1)
# for art in articles:
# print(encode(art.as_txt()) + '\n')
return articles[0] # Only return the top result
def get_scholar_data(paper_list):
querier = scholar.ScholarQuerier()
settings = scholar.ScholarSettings()
settings.set_citation_format(scholar.ScholarSettings.CITFORM_BIBTEX)
querier.apply_settings(settings)
scholar.ScholarConf.LOG_LEVEL = 3
cache = read_cache(cache_file)
assert(cache != None)
if cache.get('paper_list') == paper_list:
print 'Use cache from file %s' % cache_file
# Use cache to reduce the number of google scholar request
else:
# Update cache, instead of flushing a complete new one
print 'Get data from google scholar'
cache_paper_title = [p['title'] for p in cache['paper_list']]
missing_paper = [p for p in paper_list if p['title'] not in cache_paper_title]
missing_scholar_data = [get_paper_data(querier, v) for v in missing_paper]
# update cache
cache['paper_list'] += missing_paper
cache['scholar_data'] += missing_scholar_data
save_cache(cache_file, cache)
save_cache(cache_file, cache) # Enforce to flush cache
return cache['scholar_data']
def read_pickle_cache(cache_file):
# Use pickle to implement cache
print 'Load cache from file %s' % cache_file
if not os.path.isfile(cache_file):
empty_db = dict(paper_list = [], scholar_data = [])
return empty_db
with open(cache_file, 'r') as f:
db = pickle.load(f)
assert(db.get('paper_list'))
assert(db.get('scholar_data'))
assert(len(db['paper_list']) == len(db['scholar_data']))
return db
def save_pickle_cache(cache_file, obj):
print 'Save obj to cache %s' % cache_file
with open(cache_file, 'w') as f:
pickle.dump(obj, f)
read_cache = read_pickle_cache
save_cache = save_pickle_cache
def read_yaml_cache(cache_file):
print 'Load cache from file %s' % cache_file
if not os.path.isfile(cache_file):
return None
with open(cache_file, 'r') as f:
return yaml.load(f)
def save_yaml_cache(cache_file, obj):
print 'Save obj to cache %s' % cache_file
with open(cache_file, 'w') as f:
yaml.dump(obj, f)
|
Tools/smoke.py | isabella232/MissilesPerfectMaster | 158 | 12622971 | <gh_stars>100-1000
import sys
import os
import math
import random
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFilter
def crop(src_pixels, dst_pixels, x, y, w, h, dx, dy):
for ky in range(h):
for kx in range(w):
p = src_pixels[x+kx, y+ky];
dst_pixels[dx+kx, dy+ky] = p
#end
#end
#end
def convert(srcimg):
w, h = srcimg.size
#print("%d,%d" % (w, h))
src_pixels = srcimg.load()
tmpimg = Image.new('RGBA', [w, h], (0x00,0x00,0x00,0x00))
tmp_pixels = tmpimg.load()
for ky in range(h):
for kx in range(w):
p = src_pixels[kx, ky];
tmp_pixels[kx, ky] = (0xff,
0xff,
0xff,
p[3])
#end
#end
tmpimg.save("out2.png")
dstimg = Image.new('RGBA', [512, 512], (0x00,0x00,0x00,0x00))
s = 0
d = 0
src_pixels = tmpimg.load()
dst_pixels = dstimg.load()
for i in range(4):
#region = srcimg.crop((0, s, 128, s+512))
#dstimg.paste(region, (d, 0))
crop(src_pixels, dst_pixels, 0, s, 128, 512, d, 0)
s += 512
d += 128
#end
return dstimg
#end
if __name__ == '__main__':
args = sys.argv
src = args[1]
srcimg = Image.open(src, 'r')
dstimg = convert(srcimg)
dstimg.save("out.png")
#EOF
|
pytorchvideo/data/encoded_video_decord.py | kevinmtian/pytorchvideo | 2,391 | 12622976 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import logging
import math
from typing import BinaryIO, Dict, Optional, TypeVar
import torch
from .utils import thwc_to_cthw
from .video import Video
logger = logging.getLogger(__name__)
try:
import decord
except ImportError:
_HAS_DECORD = False
else:
_HAS_DECORD = True
if _HAS_DECORD:
decord.bridge.set_bridge("torch")
DecordDevice = TypeVar("DecordDevice")
class EncodedVideoDecord(Video):
"""
Accessing clips from an encoded video using Decord video reading API
as the decoding backend. For more details, please refer to -
`Decord <https://github.com/dmlc/decord>`
"""
def __init__(
self,
file: BinaryIO,
video_name: Optional[str] = None,
decode_audio: bool = True,
sample_rate: int = 44100,
mono: bool = True,
width: int = -1,
height: int = -1,
num_threads: int = 0,
fault_tol: int = -1,
) -> None:
"""
Args:
file (BinaryIO): a file-like object (e.g. io.BytesIO or io.StringIO) that
contains the encoded video.
video_name (str): An optional name assigned to the video.
decode_audio (bool): If disabled, audio is not decoded.
sample_rate: int, default is -1
Desired output sample rate of the audio, unchanged if `-1` is specified.
mono: bool, default is True
Desired output channel layout of the audio. `True` is mono layout. `False`
is unchanged.
width : int, default is -1
Desired output width of the video, unchanged if `-1` is specified.
height : int, default is -1
Desired output height of the video, unchanged if `-1` is specified.
num_threads : int, default is 0
Number of decoding thread, auto if `0` is specified.
fault_tol : int, default is -1
The threshold of corupted and recovered frames. This is to prevent silent fault
tolerance when for example 50% frames of a video cannot be decoded and duplicate
frames are returned. You may find the fault tolerant feature sweet in many
cases, but not for training models. Say `N = # recovered frames`
If `fault_tol` < 0, nothing will happen.
If 0 < `fault_tol` < 1.0, if N > `fault_tol * len(video)`,
raise `DECORDLimitReachedError`.
If 1 < `fault_tol`, if N > `fault_tol`, raise `DECORDLimitReachedError`.
"""
self._decode_audio = decode_audio
self._video_name = video_name
if not _HAS_DECORD:
raise ImportError(
"decord is required to use EncodedVideoDecord decoder. Please "
"install with 'pip install decord' for CPU-only version and refer to"
"'https://github.com/dmlc/decord' for GPU-supported version"
)
try:
if self._decode_audio:
self._av_reader = decord.AVReader(
uri=file,
ctx=decord.cpu(0),
sample_rate=sample_rate,
mono=mono,
width=width,
height=height,
num_threads=num_threads,
fault_tol=fault_tol,
)
else:
self._av_reader = decord.VideoReader(
uri=file,
ctx=decord.cpu(0),
width=width,
height=height,
num_threads=num_threads,
fault_tol=fault_tol,
)
except Exception as e:
raise RuntimeError(f"Failed to open video {video_name} with Decord. {e}")
if self._decode_audio:
self._fps = self._av_reader._AVReader__video_reader.get_avg_fps()
else:
self._fps = self._av_reader.get_avg_fps()
self._duration = float(len(self._av_reader)) / float(self._fps)
@property
def name(self) -> Optional[str]:
"""
Returns:
name: the name of the stored video if set.
"""
return self._video_name
@property
def duration(self) -> float:
"""
Returns:
duration: the video's duration/end-time in seconds.
"""
return self._duration
def close(self):
if self._av_reader is not None:
del self._av_reader
self._av_reader = None
def get_clip(
self, start_sec: float, end_sec: float
) -> Dict[str, Optional[torch.Tensor]]:
"""
Retrieves frames from the encoded video at the specified start and end times
in seconds (the video always starts at 0 seconds).
Args:
start_sec (float): the clip start time in seconds
end_sec (float): the clip end time in seconds
Returns:
clip_data:
A dictionary mapping the entries at "video" and "audio" to a tensors.
"video": A tensor of the clip's RGB frames with shape:
(channel, time, height, width). The frames are of type torch.float32 and
in the range [0 - 255].
"audio": A tensor of the clip's audio samples with shape:
(samples). The samples are of type torch.float32 and
in the range [0 - 255].
Returns None if no video or audio found within time range.
"""
if start_sec > end_sec or start_sec > self._duration:
raise RuntimeError(
f"Incorrect time window for Decord decoding for video: {self._video_name}."
)
start_idx = math.ceil(self._fps * start_sec)
end_idx = math.ceil(self._fps * end_sec)
end_idx = min(end_idx, len(self._av_reader))
frame_idxs = list(range(start_idx, end_idx))
audio = None
try:
outputs = self._av_reader.get_batch(frame_idxs)
except Exception as e:
logger.debug(f"Failed to decode video with Decord: {self._video_name}. {e}")
raise e
if self._decode_audio:
audio, video = outputs
if audio is not None:
audio = list(audio)
audio = torch.cat(audio, dim=1)
audio = torch.flatten(audio)
audio = audio.to(torch.float32)
else:
video = outputs
if video is not None:
video = video.to(torch.float32)
video = thwc_to_cthw(video)
return {
"video": video,
"audio": audio,
}
|
Dynamic Programming/Editing Distance/python/editing_distance.py | iabhimanyu/Algorithms | 715 | 12622990 | # Uses python2
def edit_distance(s, t):
m, n = len(s), len(t)
dp = [[0]*(n + 1) for i in xrange(m + 1)]
for i in xrange(m + 1):
for j in xrange(n + 1):
if i == 0:
dp[i][j] = j
elif j == 0:
dp[i][j] = i
elif s[i - 1] == t[j - 1]:
dp[i][j] = dp[i - 1][j - 1]
else:
dp[i][j] = 1 + min(dp[i - 1][j], min(dp[i][j - 1], dp[i - 1][j - 1]))
return dp[m][n]
if __name__ == "__main__":
print(edit_distance(raw_input(), raw_input()))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.