ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b411f1a7c258a46a12975dc4545a4962de602b8d | import logging
import os
import types
import weakref
from collections import defaultdict
import archinfo
import cle
from cle.address_translator import AT
l = logging.getLogger("angr.project")
# This holds the default execution engine for a given CLE loader backend.
# All the builtins right now use SimEngineVEX. This may not hold for long.
def global_default(): return {'any': SimEngineVEX}
default_engines = defaultdict(global_default)
def register_default_engine(loader_backend, engine, arch='any'):
"""
Register the default execution engine to be used with a given CLE backend.
Usually this is the SimEngineVEX, but if you're operating on something that isn't
going to be lifted to VEX, you'll need to make sure the desired engine is registered here.
:param loader_backend: The loader backend (a type)
:param type engine: The engine to use for the loader backend (a type)
:param arch: The architecture to associate with this engine. Optional.
:return:
"""
if not isinstance(loader_backend, type):
raise TypeError("loader_backend must be a type")
if not isinstance(engine, type):
raise TypeError("engine must be a type")
default_engines[loader_backend][arch] = engine
def get_default_engine(loader_backend, arch='any'):
"""
Get some sort of sane default for a given loader and/or arch.
Can be set with register_default_engine()
:param loader_backend:
:param arch:
:return:
"""
matches = default_engines[loader_backend]
for k,v in matches.items():
if k == arch or k == 'any':
return v
return None
projects = weakref.WeakValueDictionary()
def fake_project_unpickler(name):
if name not in projects:
raise AngrError("Project %s has not been opened." % name)
return projects[name]
fake_project_unpickler.__safe_for_unpickling__ = True
class Project(object):
"""
This is the main class of the angr module. It is meant to contain a set of binaries and the relationships between
them, and perform analyses on them.
:param thing: The path to the main executable object to analyze, or a CLE Loader object.
The following parameters are optional.
:param default_analysis_mode: The mode of analysis to use by default. Defaults to 'symbolic'.
:param ignore_functions: A list of function names that, when imported from shared libraries, should
never be stepped into in analysis (calls will return an unconstrained value).
:param use_sim_procedures: Whether to replace resolved dependencies for which simprocedures are
available with said simprocedures.
:param exclude_sim_procedures_func: A function that, when passed a function name, returns whether or not to wrap
it with a simprocedure.
:param exclude_sim_procedures_list: A list of functions to *not* wrap with simprocedures.
:param arch: The target architecture (auto-detected otherwise).
:param simos: a SimOS class to use for this project.
:param bool translation_cache: If True, cache translated basic blocks rather than re-translating them.
:param support_selfmodifying_code: Whether we aggressively support self-modifying code. When enabled, emulation
will try to read code from the current state instead of the original memory,
regardless of the current memory protections.
:type support_selfmodifying_code: bool
Any additional keyword arguments passed will be passed onto ``cle.Loader``.
:ivar analyses: The available analyses.
:type analyses: angr.analysis.Analyses
:ivar entry: The program entrypoint.
:ivar factory: Provides access to important analysis elements such as path groups and symbolic execution results.
:type factory: AngrObjectFactory
:ivar filename: The filename of the executable.
:ivar loader: The program loader.
:type loader: cle.Loader
:ivar surveyors: The available surveyors.
:type surveyors: angr.surveyors.surveyor.Surveyors
"""
def __init__(self, thing,
default_analysis_mode=None,
ignore_functions=None,
use_sim_procedures=True,
exclude_sim_procedures_func=None,
exclude_sim_procedures_list=(),
arch=None, simos=None,
load_options=None,
translation_cache=True,
support_selfmodifying_code=False,
**kwargs):
# Step 1: Load the binary
if load_options is None: load_options = {}
load_options.update(kwargs)
if isinstance(thing, cle.Loader):
if load_options:
l.warning("You provided CLE options to angr but you also provided a completed cle.Loader object!")
self.loader = thing
self.filename = self.loader.main_object.binary
elif hasattr(thing, 'read') and hasattr(thing, 'seek'):
l.info("Loading binary from stream")
self.filename = None
self.loader = cle.Loader(thing, **load_options)
elif not isinstance(thing, (unicode, str)) or not os.path.exists(thing) or not os.path.isfile(thing):
raise Exception("Not a valid binary file: %s" % repr(thing))
else:
# use angr's loader, provided by cle
l.info("Loading binary %s", thing)
self.filename = thing
self.loader = cle.Loader(self.filename, **load_options)
# Step 2: determine its CPU architecture, ideally falling back to CLE's guess
if isinstance(arch, str):
self.arch = archinfo.arch_from_id(arch) # may raise ArchError, let the user see this
elif isinstance(arch, archinfo.Arch):
self.arch = arch
elif arch is None:
self.arch = self.loader.main_object.arch
else:
raise ValueError("Invalid arch specification.")
# Step 3: Set some defaults and set the public and private properties
if not default_analysis_mode:
default_analysis_mode = 'symbolic'
if not ignore_functions:
ignore_functions = []
if isinstance(exclude_sim_procedures_func, types.LambdaType):
l.warning("Passing a lambda type as the exclude_sim_procedures_func argument to Project causes the resulting object to be un-serializable.")
self._sim_procedures = {}
self._default_analysis_mode = default_analysis_mode
self._exclude_sim_procedures_func = exclude_sim_procedures_func
self._exclude_sim_procedures_list = exclude_sim_procedures_list
self._should_use_sim_procedures = use_sim_procedures
self._support_selfmodifying_code = support_selfmodifying_code
self._ignore_functions = ignore_functions
self._executing = False # this is a flag for the convenience API, exec() and terminate_execution() below
if self._support_selfmodifying_code:
if translation_cache is True:
translation_cache = False
l.warning("Disabling IRSB translation cache because support for self-modifying code is enabled.")
# Look up the default engine.
engine_cls = get_default_engine(type(self.loader.main_object))
if not engine_cls:
raise AngrError("No engine associated with loader %s" % str(type(self.loader.main_object)))
engine = engine_cls(
stop_points=self._sim_procedures,
use_cache=translation_cache,
support_selfmodifying_code=support_selfmodifying_code)
procedure_engine = SimEngineProcedure()
hook_engine = SimEngineHook(self)
failure_engine = SimEngineFailure(self)
syscall_engine = SimEngineSyscall(self)
unicorn_engine = SimEngineUnicorn(self._sim_procedures)
self.entry = self.loader.main_object.entry
self.factory = AngrObjectFactory(
self,
engine,
procedure_engine,
[failure_engine, syscall_engine, hook_engine, unicorn_engine, engine])
self.analyses = Analyses(self)
self.surveyors = Surveyors(self)
self.kb = KnowledgeBase(self, self.loader.main_object)
if self.filename is not None:
projects[self.filename] = self
# Step 4: determine the guest OS
if isinstance(simos, type) and issubclass(simos, SimOS):
self._simos = simos(self) #pylint:disable=invalid-name
elif simos is None:
self._simos = os_mapping[self.loader.main_object.os](self)
else:
raise ValueError("Invalid OS specification or non-matching architecture.")
# Step 5: Register simprocedures as appropriate for library functions
for obj in self.loader.initial_load_objects:
self._register_object(obj)
# Step 6: Run OS-specific configuration
self._simos.configure_project()
def _register_object(self, obj):
"""
This scans through an objects imports and hooks them with simprocedures from our library whenever possible
"""
# Step 1: get the set of libraries we are allowed to use to resolve unresolved symbols
missing_libs = []
for lib_name in self.loader.missing_dependencies:
try:
missing_libs.append(SIM_LIBRARIES[lib_name])
except KeyError:
l.info("There are no simprocedures for missing library %s :(", lib_name)
# Step 2: Categorize every "import" symbol in each object.
# If it's IGNORED, mark it for stubbing
# If it's blacklisted, don't process it
# If it matches a simprocedure we have, replace it
for reloc in obj.imports.itervalues():
# Step 2.1: Quick filter on symbols we really don't care about
func = reloc.symbol
if func is None:
continue
if not func.is_function:
continue
if not reloc.resolved:
l.debug("Ignoring unresolved import '%s' from %s ...?", func.name, reloc.owner_obj)
continue
if self.is_hooked(reloc.symbol.resolvedby.rebased_addr):
l.debug("Already hooked %s (%s)", func.name, reloc.owner_obj)
continue
# Step 2.2: If this function has been resolved by a static dependency,
# check if we actually can and want to replace it with a SimProcedure.
# We opt out of this step if it is blacklisted by ignore_functions, which
# will cause it to be replaced by ReturnUnconstrained later.
if func.resolved and func.resolvedby.owner_obj is not self.loader.extern_object and \
func.name not in self._ignore_functions:
if self._check_user_blacklists(func.name):
continue
owner_name = func.resolvedby.owner_obj.provides
if isinstance(self.loader.main_object, cle.backends.pe.PE):
owner_name = owner_name.lower()
if owner_name not in SIM_LIBRARIES:
continue
sim_lib = SIM_LIBRARIES[owner_name]
if not sim_lib.has_implementation(func.name):
continue
l.info("Using builtin SimProcedure for %s from %s", func.name, sim_lib.name)
self.hook_symbol(func.name, sim_lib.get(func.name, self.arch))
# Step 2.3: If 2.2 didn't work, check if the symbol wants to be resolved
# by a library we already know something about. Resolve it appropriately.
# Note that _check_user_blacklists also includes _ignore_functions.
# An important consideration is that even if we're stubbing a function out,
# we still want to try as hard as we can to figure out where it comes from
# so we can get the calling convention as close to right as possible.
elif reloc.resolvewith is not None and reloc.resolvewith in SIM_LIBRARIES:
sim_lib = SIM_LIBRARIES[reloc.resolvewith]
if self._check_user_blacklists(func.name):
if not func.is_weak:
l.info("Using stub SimProcedure for unresolved %s from %s", func.name, sim_lib.name)
self.hook_symbol(func.name, sim_lib.get_stub(func.name, self.arch))
else:
l.info("Using builtin SimProcedure for unresolved %s from %s", func.name, sim_lib.name)
self.hook_symbol(func.name, sim_lib.get(func.name, self.arch))
# Step 2.4: If 2.3 didn't work (the symbol didn't request a provider we know of), try
# looking through each of the SimLibraries we're using to resolve unresolved
# functions. If any of them know anything specifically about this function,
# resolve it with that. As a final fallback, just ask any old SimLibrary
# to resolve it.
elif missing_libs:
for sim_lib in missing_libs:
if sim_lib.has_metadata(func.name):
if self._check_user_blacklists(func.name):
if not func.is_weak:
l.info("Using stub SimProcedure for unresolved %s from %s", func.name, sim_lib.name)
self.hook_symbol(func.name, sim_lib.get_stub(func.name, self.arch))
else:
l.info("Using builtin SimProcedure for unresolved %s from %s", func.name, sim_lib.name)
self.hook_symbol(func.name, sim_lib.get(func.name, self.arch))
break
else:
if not func.is_weak:
l.info("Using stub SimProcedure for unresolved %s", func.name)
self.hook_symbol(func.name, missing_libs[0].get(func.name, self.arch))
# Step 2.5: If 2.4 didn't work (we have NO SimLibraries to work with), just
# use the vanilla ReturnUnconstrained, assuming that this isn't a weak func
elif not func.is_weak:
l.info("Using stub SimProcedure for unresolved %s", func.name)
self.hook_symbol(func.name, SIM_PROCEDURES['stubs']['ReturnUnconstrained']())
def _check_user_blacklists(self, f):
"""
Has symbol name `f` been marked for exclusion by any of the user
parameters?
"""
return not self._should_use_sim_procedures or \
f in self._exclude_sim_procedures_list or \
f in self._ignore_functions or \
(self._exclude_sim_procedures_func is not None and self._exclude_sim_procedures_func(f))
#
# Public methods
# They're all related to hooking!
#
def hook(self, addr, hook=None, length=0, kwargs=None):
"""
Hook a section of code with a custom function. This is used internally to provide symbolic
summaries of library functions, and can be used to instrument execution or to modify
control flow.
When hook is not specified, it returns a function decorator that allows easy hooking.
Usage::
# Assuming proj is an instance of angr.Project, we will add a custom hook at the entry
# point of the project.
@proj.hook(proj.entry)
def my_hook(state):
print "Welcome to execution!"
:param addr: The address to hook.
:param hook: A :class:`angr.project.Hook` describing a procedure to run at the
given address. You may also pass in a SimProcedure class or a function
directly and it will be wrapped in a Hook object for you.
:param length: If you provide a function for the hook, this is the number of bytes
that will be skipped by executing the hook by default.
:param kwargs: If you provide a SimProcedure for the hook, these are the keyword
arguments that will be passed to the procedure's `run` method
eventually.
"""
if hook is None:
# if we haven't been passed a thing to hook with, assume we're being used as a decorator
return self._hook_decorator(addr, length=length, kwargs=kwargs)
if kwargs is None: kwargs = {}
l.debug('hooking %#x with %s', addr, hook)
if self.is_hooked(addr):
l.warning("Address is already hooked [hook(%#x, %s)]. Not re-hooking.", addr, hook)
return
if isinstance(hook, type):
if once("hook_instance_warning"):
l.critical("Hooking with a SimProcedure instance is deprecated! Please hook with an instance.")
hook = hook(**kwargs)
if callable(hook):
hook = SIM_PROCEDURES['stubs']['UserHook'](user_func=hook, length=length, **kwargs)
self._sim_procedures[addr] = hook
def is_hooked(self, addr):
"""
Returns True if `addr` is hooked.
:param addr: An address.
:returns: True if addr is hooked, False otherwise.
"""
return addr in self._sim_procedures
def hooked_by(self, addr):
"""
Returns the current hook for `addr`.
:param addr: An address.
:returns: None if the address is not hooked.
"""
if not self.is_hooked(addr):
l.warning("Address %#x is not hooked", addr)
return None
return self._sim_procedures[addr]
def unhook(self, addr):
"""
Remove a hook.
:param addr: The address of the hook.
"""
if not self.is_hooked(addr):
l.warning("Address %#x not hooked", addr)
return
del self._sim_procedures[addr]
def hook_symbol(self, symbol_name, obj, kwargs=None):
"""
Resolve a dependency in a binary. Uses the "externs object" (project.loader.extern_object) to
allocate an address for a new symbol in the binary, and then tells the loader to re-perform
the relocation process, taking into account the new symbol.
:param symbol_name: The name of the dependency to resolve.
:param obj: The thing with which to satisfy the dependency.
:param kwargs: If you provide a SimProcedure for the hook, these are the keyword
arguments that will be passed to the procedure's `run` method
eventually.
:returns: The address of the new symbol.
:rtype: int
"""
if type(obj) in (int, long):
# this is pretty intensely sketchy
l.info("Instructing the loader to re-point symbol %s at address %#x", symbol_name, obj)
self.loader.provide_symbol(self.loader.extern_object, symbol_name, AT.from_mva(obj, self.loader.extern_object).to_rva())
return obj
sym = self.loader.find_symbol(symbol_name)
if sym is None:
l.error("Could not find symbol %s", symbol_name)
return None
hook_addr, _ = self._simos.prepare_function_symbol(symbol_name, basic_addr=sym.rebased_addr)
if self.is_hooked(hook_addr):
l.warning("Re-hooking symbol %s", symbol_name)
self.unhook(hook_addr)
self.hook(hook_addr, obj, kwargs=kwargs)
return hook_addr
def hook_symbol_batch(self, hooks):
"""
Hook many symbols at once.
:param dict hooks: A mapping from symbol name to hook
"""
if once("hook_symbol_batch warning"):
l.critical("Due to advances in technology, hook_symbol_batch is no longer necessary for performance. Please use hook_symbol several times.")
for x in hooks:
self.hook_symbol(x, hooks[x])
def is_symbol_hooked(self, symbol_name):
"""
Check if a symbol is already hooked.
:param str symbol_name: Name of the symbol.
:return: True if the symbol can be resolved and is hooked, False otherwise.
:rtype: bool
"""
sym = self.loader.find_symbol(symbol_name)
if sym is None:
l.warning("Could not find symbol %s", symbol_name)
return False
hook_addr, _ = self._simos.prepare_function_symbol(symbol_name, basic_addr=sym.rebased_addr)
return self.is_hooked(hook_addr)
#
# A convenience API (in the style of triton and manticore) for symbolic execution.
#
def execute(self, *args, **kwargs):
"""
This function is a symbolic execution helper in the simple style
supported by triton and manticore. It designed to be run after
setting up hooks (see Project.hook), in which the symbolic state
can be checked.
This function can be run in three different ways:
- When run with no parameters, this function begins symbolic execution
from the entrypoint.
- It can also be run with a "state" parameter specifying a SimState to
begin symbolic execution from.
- Finally, it can accept any arbitrary keyword arguments, which are all
passed to project.factory.full_init_state.
If symbolic execution finishes, this function returns the resulting
simulation manager.
"""
if args:
state = args[0]
else:
state = self.factory.full_init_state(**kwargs)
pg = self.factory.simgr(state)
self._executing = True
return pg.step(until=lambda lpg: not self._executing)
def terminate_execution(self):
"""
Terminates a symbolic execution that was started with Project.execute().
"""
self._executing = False
#
# Private methods related to hooking
#
def _hook_decorator(self, addr, length=0, kwargs=None):
"""
Return a function decorator that allows easy hooking. Please refer to hook() for its usage.
:return: The function decorator.
"""
def hook_decorator(func):
self.hook(addr, func, length=length, kwargs=kwargs)
return hook_decorator
#
# Pickling
#
def __getstate__(self):
try:
analyses, surveyors = self.analyses, self.surveyors
self.analyses, self.surveyors = None, None
return dict(self.__dict__)
finally:
self.analyses, self.surveyors = analyses, surveyors
def __setstate__(self, s):
self.__dict__.update(s)
self.analyses = Analyses(self)
self.surveyors = Surveyors(self)
from .errors import AngrError
from .factory import AngrObjectFactory
from .simos import SimOS, os_mapping
from .analyses.analysis import Analyses
from .surveyors import Surveyors
from .knowledge_base import KnowledgeBase
from .engines import SimEngineFailure, SimEngineSyscall, SimEngineProcedure, SimEngineVEX, SimEngineUnicorn, SimEngineHook
from .misc.ux import once
from .procedures import SIM_PROCEDURES, SIM_LIBRARIES
|
py | b411f28669768a4090fa8a640a85d49b54a3e36d | # This file is a part of Arjuna
# Copyright 2015-2020 Rahul Verma
# Website: www.RahulVerma.net
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. |
py | b411f3d15d4d96fcdaccaadf46fbafd7a4d0370c | """ Optimizers class """
import torch
import torch.optim as optim
from torch.nn.utils import clip_grad_norm_
import operator
import functools
from copy import copy
from math import sqrt
import types
import importlib
from onmt.utils.misc import fn_args
from onmt.constants import ModelTask, TrainMode
def build_torch_optimizer(model, opt, ac_optim_opt=None):
"""Builds the PyTorch optimizer.
We use the default parameters for Adam that are suggested by
the original paper https://arxiv.org/pdf/1412.6980.pdf
These values are also used by other established implementations,
e.g. https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer
https://keras.io/optimizers/
Recently there are slightly different values used in the paper
"Attention is all you need"
https://arxiv.org/pdf/1706.03762.pdf, particularly the value beta2=0.98
was used there however, beta2=0.999 is still arguably the more
established value, so we use that here as well
Args:
model: The model to optimize.
opt. The dictionary of options.
Returns:
A ``torch.optim.Optimizer`` instance.
"""
params = [p for p in model.parameters() if p.requires_grad]
betas = [opt.adam_beta1, opt.adam_beta2]
if opt.optim == 'sgd':
optimizer = optim.SGD(params, lr=opt.learning_rate)
elif opt.optim == 'adagrad':
optimizer = optim.Adagrad(
params,
lr=opt.learning_rate,
initial_accumulator_value=opt.adagrad_accumulator_init)
elif opt.optim == 'adadelta':
optimizer = optim.Adadelta(params, lr=opt.learning_rate)
elif opt.optim == 'adafactor':
optimizer = AdaFactor(
params,
non_constant_decay=True,
enable_factorization=True,
weight_decay=0)
elif opt.optim == 'adam':
if ac_optim_opt is None or opt.train_mode == TrainMode.ACTOR:
lr = opt.learning_rate
elif ac_optim_opt == 'actor':
lr = opt.actor_learning_rate
elif ac_optim_opt == 'critic':
lr = opt.critic_learning_rate
optimizer = optim.Adam(
params,
lr=lr,
betas=betas,
eps=1e-9)
elif opt.optim == 'sharedadam':
if ac_optim_opt is None or opt.train_mode == TrainMode.ACTOR:
lr = opt.learning_rate
elif ac_optim_opt == 'actor':
lr = opt.actor_learning_rate
elif ac_optim_opt == 'critic':
lr = opt.critic_learning_rate
optimizer = SharedAdam(
params,
lr=lr,
betas=betas)
elif opt.optim == 'sparseadam':
dense = []
sparse = []
for name, param in model.named_parameters():
if not param.requires_grad:
continue
# TODO: Find a better way to check for sparse gradients.
if 'embed' in name:
sparse.append(param)
else:
dense.append(param)
optimizer = MultipleOptimizer(
[optim.Adam(
dense,
lr=opt.learning_rate,
betas=betas,
eps=1e-8),
optim.SparseAdam(
sparse,
lr=opt.learning_rate,
betas=betas,
eps=1e-8)])
elif opt.optim == 'fusedadam':
# we use here a FusedAdam() copy of an old Apex repo
optimizer = FusedAdam(
params,
lr=opt.learning_rate,
betas=betas)
if opt.model_dtype == 'fp16':
import apex
# In this case use the old FusedAdam with FP16_optimizer wrapper
static_loss_scale = opt.loss_scale
dynamic_loss_scale = opt.loss_scale == 0
optimizer = apex.contrib.optimizers.FP16_Optimizer(
optimizer,
static_loss_scale=static_loss_scale,
dynamic_loss_scale=dynamic_loss_scale)
else:
raise ValueError('Invalid optimizer type: ' + opt.optim)
return optimizer
def make_learning_rate_decay_fn(opt):
"""Returns the learning decay function from options."""
if opt.decay_method == 'noam':
return functools.partial(
noam_decay,
warmup_steps=opt.warmup_steps,
model_size=opt.rnn_size)
elif opt.decay_method == 'noamwd':
return functools.partial(
noamwd_decay,
warmup_steps=opt.warmup_steps,
model_size=opt.rnn_size,
rate=opt.learning_rate_decay,
decay_steps=opt.decay_steps,
start_step=opt.start_decay_steps)
elif opt.decay_method == 'rsqrt':
return functools.partial(
rsqrt_decay, warmup_steps=opt.warmup_steps)
elif opt.start_decay_steps is not None:
return functools.partial(
exponential_decay,
rate=opt.learning_rate_decay,
decay_steps=opt.decay_steps,
start_step=opt.start_decay_steps)
def noam_decay(step, warmup_steps, model_size):
"""Learning rate schedule described in
https://arxiv.org/pdf/1706.03762.pdf.
"""
return (
model_size ** (-0.5) *
min(step ** (-0.5), step * warmup_steps**(-1.5)))
def noamwd_decay(step, warmup_steps,
model_size, rate, decay_steps, start_step=0):
"""Learning rate schedule optimized for huge batches
"""
return (
model_size ** (-0.5) *
min(step ** (-0.5), step * warmup_steps**(-1.5)) *
rate ** (max(step - start_step + decay_steps, 0) // decay_steps))
def exponential_decay(step, rate, decay_steps, start_step=0):
"""A standard exponential decay, scaling the learning rate by :obj:`rate`
every :obj:`decay_steps` steps.
"""
return rate ** (max(step - start_step + decay_steps, 0) // decay_steps)
def rsqrt_decay(step, warmup_steps):
"""Decay based on the reciprocal of the step square root."""
return 1.0 / sqrt(max(step, warmup_steps))
class MultipleOptimizer(object):
""" Implement multiple optimizers needed for sparse adam """
def __init__(self, op):
""" ? """
self.optimizers = op
@property
def param_groups(self):
param_groups = []
for optimizer in self.optimizers:
param_groups.extend(optimizer.param_groups)
return param_groups
def zero_grad(self):
""" ? """
for op in self.optimizers:
op.zero_grad()
def step(self):
""" ? """
for op in self.optimizers:
op.step()
@property
def state(self):
""" ? """
return {k: v for op in self.optimizers for k, v in op.state.items()}
def state_dict(self):
""" ? """
return [op.state_dict() for op in self.optimizers]
def load_state_dict(self, state_dicts):
""" ? """
assert len(state_dicts) == len(self.optimizers)
for i in range(len(state_dicts)):
self.optimizers[i].load_state_dict(state_dicts[i])
class Optimizer(object):
"""
Controller class for optimization. Mostly a thin
wrapper for `optim`, but also useful for implementing
rate scheduling beyond what is currently available.
Also implements necessary methods for training RNNs such
as grad manipulations.
"""
def __init__(self,
optimizer,
learning_rate,
learning_rate_decay_fn=None,
max_grad_norm=None):
"""Initializes the controller.
Args:
optimizer: A ``torch.optim.Optimizer`` instance.
learning_rate: The initial learning rate.
learning_rate_decay_fn: An optional callable taking the current step
as argument and return a learning rate scaling factor.
max_grad_norm: Clip gradients to this global norm.
"""
self._optimizer = optimizer
self._learning_rate = learning_rate
self._learning_rate_decay_fn = learning_rate_decay_fn
self._max_grad_norm = max_grad_norm or 0
self._training_step = 1
self._decay_step = 1
self._fp16 = None
self._scaler = None
@classmethod
def from_opt(cls, model, opt, checkpoint=None, ac_optim_opt=None):
"""Builds the optimizer from options.
Args:
cls: The ``Optimizer`` class to instantiate.
model: The model to optimize.
opt: The dict of user options.
checkpoint: An optional checkpoint to load states from.
ac_optim_opt: An optional option for specifying whether to create an optimiser
for the actor or the critic (** to be specified only when using checkpoint)
Returns:
An ``Optimizer`` instance.
"""
optim_opt = opt
optim_state_dict = None
if opt.train_from and checkpoint is not None:
if ac_optim_opt == None:
optim = checkpoint['optim']
elif ac_optim_opt == 'actor':
optim = checkpoint['actor_optim']
elif ac_optim_opt == 'critic':
optim = checkpoint['critic_optim']
ckpt_opt = checkpoint['opt']
ckpt_state_dict = {}
if isinstance(optim, Optimizer): # Backward compatibility.
ckpt_state_dict['training_step'] = optim._step + 1
ckpt_state_dict['decay_step'] = optim._step + 1
ckpt_state_dict['optimizer'] = optim.optimizer.state_dict()
else:
ckpt_state_dict = optim
if opt.reset_optim == 'none':
# Load everything from the checkpoint.
optim_opt = ckpt_opt
optim_state_dict = ckpt_state_dict
elif opt.reset_optim == 'all':
# Build everything from scratch.
pass
elif opt.reset_optim == 'states':
# Reset optimizer, keep options.
optim_opt = ckpt_opt
optim_state_dict = ckpt_state_dict
del optim_state_dict['optimizer']
elif opt.reset_optim == 'keep_states':
# Reset options, keep optimizer.
optim_state_dict = ckpt_state_dict
# # Debugging
# if opt.model_task == ModelTask.AC and opt.async:
# optim_opt = opt
# optim_state_dict = None
if ac_optim_opt is None or opt.train_mode == TrainMode.ACTOR:
lr = opt.learning_rate
elif ac_optim_opt == 'actor':
lr = opt.actor_learning_rate
elif ac_optim_opt == 'critic':
lr = opt.critic_learning_rate
optimizer = cls(
build_torch_optimizer(model, optim_opt, ac_optim_opt),
learning_rate=lr,
learning_rate_decay_fn=make_learning_rate_decay_fn(optim_opt),
max_grad_norm=optim_opt.max_grad_norm)
if opt.model_dtype == "fp16":
if opt.optim == "fusedadam":
optimizer._fp16 = "legacy"
else:
optimizer._fp16 = "amp"
from torch.cuda.amp import GradScaler
optimizer._scaler = GradScaler()
if optim_state_dict:
optimizer.load_state_dict(optim_state_dict)
return optimizer
@property
def training_step(self):
"""The current training step."""
return self._training_step
@property
def amp(self):
"""True if use torch amp mix precision training."""
return self._fp16 == "amp"
def learning_rate(self):
"""Returns the current learning rate."""
if self._learning_rate_decay_fn is None:
return self._learning_rate
scale = self._learning_rate_decay_fn(self._decay_step)
return scale * self._learning_rate
def state_dict(self):
return {
'training_step': self._training_step,
'decay_step': self._decay_step,
'optimizer': self._optimizer.state_dict()
}
def load_state_dict(self, state_dict):
self._training_step = state_dict['training_step']
# State can be partially restored.
if 'decay_step' in state_dict:
self._decay_step = state_dict['decay_step']
if 'optimizer' in state_dict:
self._optimizer.load_state_dict(state_dict['optimizer'])
def zero_grad(self):
"""Zero the gradients of optimized parameters."""
self._optimizer.zero_grad()
def backward(self, loss):
"""Wrapper for backward pass. Some optimizer requires ownership of the
backward pass."""
if self.amp:
self._scaler.scale(loss).backward()
elif self._fp16 == "legacy":
kwargs = {}
if "update_master_grads" in fn_args(self._optimizer.backward):
kwargs["update_master_grads"] = True
self._optimizer.backward(loss, **kwargs)
else:
loss.backward()
def step(self):
"""Update the model parameters based on current gradients.
Optionally, will employ gradient modification or update learning
rate.
"""
learning_rate = self.learning_rate()
if self.amp:
self._scaler.unscale_(self._optimizer)
elif self._fp16 == "legacy":
if hasattr(self._optimizer, "update_master_grads"):
self._optimizer.update_master_grads()
if hasattr(self._optimizer, "clip_master_grads") and \
self._max_grad_norm > 0:
self._optimizer.clip_master_grads(self._max_grad_norm)
for group in self._optimizer.param_groups:
group['lr'] = learning_rate
if self._max_grad_norm > 0 and self._fp16 != "legacy":
clip_grad_norm_(group['params'], self._max_grad_norm)
if self.amp:
# unscaled optimizer's gradients (already done therefore skip),
# skips optimizer.step() if gradients contain infs/NaNs.
self._scaler.step(self._optimizer)
# Updates the scale for next iteration.
self._scaler.update()
else:
self._optimizer.step()
self._decay_step += 1
self._training_step += 1
# Code below is an implementation of https://arxiv.org/pdf/1804.04235.pdf
# inspired but modified from https://github.com/DeadAt0m/adafactor-pytorch
class AdaFactor(torch.optim.Optimizer):
def __init__(self, params, lr=None, beta1=0.9, beta2=0.999, eps1=1e-30,
eps2=1e-3, cliping_threshold=1, non_constant_decay=True,
enable_factorization=True, ams_grad=True, weight_decay=0):
enable_momentum = beta1 != 0
if non_constant_decay:
ams_grad = False
defaults = dict(lr=lr, beta1=beta1, beta2=beta2, eps1=eps1,
eps2=eps2, cliping_threshold=cliping_threshold,
weight_decay=weight_decay, ams_grad=ams_grad,
enable_factorization=enable_factorization,
enable_momentum=enable_momentum,
non_constant_decay=non_constant_decay)
super(AdaFactor, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdaFactor, self).__setstate__(state)
def _experimental_reshape(self, shape):
temp_shape = shape[2:]
if len(temp_shape) == 1:
new_shape = (shape[0], shape[1]*shape[2])
else:
tmp_div = len(temp_shape) // 2 + len(temp_shape) % 2
new_shape = (shape[0]*functools.reduce(operator.mul,
temp_shape[tmp_div:], 1),
shape[1]*functools.reduce(operator.mul,
temp_shape[:tmp_div], 1))
return new_shape, copy(shape)
def _check_shape(self, shape):
'''
output1 - True - algorithm for matrix, False - vector;
output2 - need reshape
'''
if len(shape) > 2:
return True, True
elif len(shape) == 2:
return True, False
elif len(shape) == 2 and (shape[0] == 1 or shape[1] == 1):
return False, False
else:
return False, False
def _rms(self, x):
return sqrt(torch.mean(x.pow(2)))
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse \
gradients, use SparseAdam instead')
is_matrix, is_need_reshape = self._check_shape(grad.size())
new_shape = p.data.size()
if is_need_reshape and group['enable_factorization']:
new_shape, old_shape = \
self._experimental_reshape(p.data.size())
grad = grad.view(new_shape)
state = self.state[p]
if len(state) == 0:
state['step'] = 0
if group['enable_momentum']:
state['exp_avg'] = torch.zeros(new_shape,
dtype=torch.float32,
device=p.grad.device)
if is_matrix and group['enable_factorization']:
state['exp_avg_sq_R'] = \
torch.zeros((1, new_shape[1]),
dtype=torch.float32,
device=p.grad.device)
state['exp_avg_sq_C'] = \
torch.zeros((new_shape[0], 1),
dtype=torch.float32,
device=p.grad.device)
else:
state['exp_avg_sq'] = torch.zeros(new_shape,
dtype=torch.float32,
device=p.grad.device)
if group['ams_grad']:
state['exp_avg_sq_hat'] = \
torch.zeros(new_shape, dtype=torch.float32,
device=p.grad.device)
if group['enable_momentum']:
exp_avg = state['exp_avg']
if is_matrix and group['enable_factorization']:
exp_avg_sq_r = state['exp_avg_sq_R']
exp_avg_sq_c = state['exp_avg_sq_C']
else:
exp_avg_sq = state['exp_avg_sq']
if group['ams_grad']:
exp_avg_sq_hat = state['exp_avg_sq_hat']
state['step'] += 1
lr_t = group['lr']
lr_t *= max(group['eps2'], self._rms(p.data))
if group['enable_momentum']:
if group['non_constant_decay']:
beta1_t = group['beta1'] * \
(1 - group['beta1'] ** (state['step'] - 1)) \
/ (1 - group['beta1'] ** state['step'])
else:
beta1_t = group['beta1']
exp_avg.mul_(beta1_t).add_(1 - beta1_t, grad)
if group['non_constant_decay']:
beta2_t = group['beta2'] * \
(1 - group['beta2'] ** (state['step'] - 1)) / \
(1 - group['beta2'] ** state['step'])
else:
beta2_t = group['beta2']
if is_matrix and group['enable_factorization']:
exp_avg_sq_r.mul_(beta2_t). \
add_(1 - beta2_t, torch.sum(torch.mul(grad, grad).
add_(group['eps1']),
dim=0, keepdim=True))
exp_avg_sq_c.mul_(beta2_t). \
add_(1 - beta2_t, torch.sum(torch.mul(grad, grad).
add_(group['eps1']),
dim=1, keepdim=True))
v = torch.mul(exp_avg_sq_c,
exp_avg_sq_r).div_(torch.sum(exp_avg_sq_r))
else:
exp_avg_sq.mul_(beta2_t). \
addcmul_(1 - beta2_t, grad, grad). \
add_((1 - beta2_t)*group['eps1'])
v = exp_avg_sq
g = grad
if group['enable_momentum']:
g = torch.div(exp_avg, 1 - beta1_t ** state['step'])
if group['ams_grad']:
torch.max(exp_avg_sq_hat, v, out=exp_avg_sq_hat)
v = exp_avg_sq_hat
u = torch.div(g, (torch.div(v, 1 - beta2_t **
state['step'])).sqrt().add_(group['eps1']))
else:
u = torch.div(g, v.sqrt())
u.div_(max(1, self._rms(u) / group['cliping_threshold']))
p.data.add_(-lr_t * (u.view(old_shape) if is_need_reshape and
group['enable_factorization'] else u))
if group['weight_decay'] != 0:
p.data.add_(-group['weight_decay'] * lr_t, p.data)
return loss
class SharedAdam(torch.optim.Adam):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-9, weight_decay=0):
super(SharedAdam, self).__init__(params, lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
for group in self.param_groups:
for p in group['params']:
state = self.state[p]
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
state['exp_avg'].share_memory_()
state['exp_avg_sq'].share_memory_()
# def load_state(self, state_dict):
#
# self.load_state_dict(state_dict)
#
# for group in self.param_groups:
# for p in group['params']:
# state = self.state[p]
# if len(state) == 0:
# state['step'] = 0
# state['exp_avg'] = torch.zeros_like(p.data)
# state['exp_avg_sq'] = torch.zeros_like(p.data)
#
# state['exp_avg'].share_memory_()
# state['exp_avg_sq'].share_memory_()
class FusedAdam(torch.optim.Optimizer):
"""Implements Adam algorithm. Currently GPU-only.
Requires Apex to be installed via
``python setup.py install --cuda_ext --cpp_ext``.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square.
(default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in FusedAdam!
eps_inside_sqrt (boolean, optional): in the 'update parameters' step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
.. _Adam: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params,
lr=1e-3, bias_correction=True,
betas=(0.9, 0.999), eps=1e-8, eps_inside_sqrt=False,
weight_decay=0., max_grad_norm=0., amsgrad=False):
global fused_adam_cuda
fused_adam_cuda = importlib.import_module("fused_adam_cuda")
if amsgrad:
raise RuntimeError('AMSGrad variant not supported.')
defaults = dict(lr=lr, bias_correction=bias_correction,
betas=betas, eps=eps, weight_decay=weight_decay,
max_grad_norm=max_grad_norm)
super(FusedAdam, self).__init__(params, defaults)
self.eps_mode = 0 if eps_inside_sqrt else 1
def step(self, closure=None, grads=None, output_params=None,
scale=1., grad_norms=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
grads (list of tensors, optional): weight gradient to use for the
optimizer update. If gradients have type torch.half, parameters
are expected to be in type torch.float. (default: None)
output params (list of tensors, optional): A reduced precision copy
of the updated weights written out in addition to the regular
updated weights. Have to be of same type as gradients.
(default: None)
scale (float, optional): factor to divide gradient tensor values
by before applying to weights. (default: 1)
"""
loss = None
if closure is not None:
loss = closure()
if grads is None:
grads_group = [None]*len(self.param_groups)
# backward compatibility
# assuming a list/generator of parameter means single group
elif isinstance(grads, types.GeneratorType):
grads_group = [grads]
elif type(grads[0]) != list:
grads_group = [grads]
else:
grads_group = grads
if output_params is None:
output_params_group = [None]*len(self.param_groups)
elif isinstance(output_params, types.GeneratorType):
output_params_group = [output_params]
elif type(output_params[0]) != list:
output_params_group = [output_params]
else:
output_params_group = output_params
if grad_norms is None:
grad_norms = [None]*len(self.param_groups)
for group, grads_this_group, output_params_this_group, \
grad_norm in zip(self.param_groups, grads_group,
output_params_group, grad_norms):
if grads_this_group is None:
grads_this_group = [None]*len(group['params'])
if output_params_this_group is None:
output_params_this_group = [None]*len(group['params'])
# compute combined scale factor for this group
combined_scale = scale
if group['max_grad_norm'] > 0:
# norm is in fact norm*scale
clip = ((grad_norm / scale) + 1e-6) / group['max_grad_norm']
if clip > 1:
combined_scale = clip * scale
bias_correction = 1 if group['bias_correction'] else 0
for p, grad, output_param in zip(group['params'],
grads_this_group,
output_params_this_group):
# note: p.grad should not ever be set for correct operation of
# mixed precision optimizer that sometimes sends None gradients
if p.grad is None and grad is None:
continue
if grad is None:
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('FusedAdam does not support sparse \
gradients, please consider \
SparseAdam instead')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
out_p = torch.tensor([], dtype=torch.float) if output_param \
is None else output_param
fused_adam_cuda.adam(p.data,
out_p,
exp_avg,
exp_avg_sq,
grad,
group['lr'],
beta1,
beta2,
group['eps'],
combined_scale,
state['step'],
self.eps_mode,
bias_correction,
group['weight_decay'])
return loss
|
py | b411f3f84633f430aaeddfce64490b4d9ec6976d | from marshmallow import Schema, fields
from sqlalchemy import Column, ForeignKey
from sqlalchemy.orm import relationship
from src.api import db
from src.shared.entity import Base
from ..users.entities import UserSchema, User
class Project(Base, db.Model):
__tablename__ = "projet"
id_p = Column(db.Integer, primary_key=True)
code_p = Column(db.Integer, unique=True, nullable=False)
nom_p = Column(db.String(250), unique=True, nullable=False)
statut_p = Column(db.Boolean(250), default=False)
id_u = Column(db.Integer, ForeignKey('utilisateur.id_u'))
responsable = relationship(User)
def __init__(self, code_p, nom_p, statut_p, id_u, id_p='', responsable=''):
if id_p != '':
self.id_p = id_p
if responsable != '':
self.responsable = responsable
self.code_p = code_p
self.nom_p = nom_p
self.statut_p = statut_p
self.id_u = id_u
class ProjectSchema(Schema):
id_p = fields.Integer()
code_p = fields.Integer()
nom_p = fields.Str()
statut_p = fields.Bool()
id_u = fields.Integer()
responsable = fields.Nested(UserSchema(exclude=['password_u']))
|
py | b411f59dd0e13956299db074bc637c60abc21665 | """
eZmax API Definition
This API expose all the functionnalities for the eZmax and eZsign applications. # noqa: E501
The version of the OpenAPI document: 1.1.3
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import eZmaxApi
from eZmaxApi.model.multilingual_apikey_description import MultilingualApikeyDescription
class TestMultilingualApikeyDescription(unittest.TestCase):
"""MultilingualApikeyDescription unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testMultilingualApikeyDescription(self):
"""Test MultilingualApikeyDescription"""
# FIXME: construct object with mandatory attributes with example values
# model = MultilingualApikeyDescription() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | b411f65c6fede812e625c79dc101ead546000f7c | '''
Test two outbound start delayed hooks
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
Test.Summary = '''
Test different combinations of TLS handshake hooks to ensure they are applied consistently.
'''
Test.SkipUnless(Condition.HasProgram("grep", "grep needs to be installed on system for this test to work"))
ts = Test.MakeATSProcess("ts", select_ports=False)
server = Test.MakeOriginServer("server", ssl=True)
request_header = {"headers": "GET / HTTP/1.1\r\nHost: www.example.com\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
# desired response form the origin server
response_header = {"headers": "HTTP/1.1 200 OK\r\nConnection: close\r\n\r\n", "timestamp": "1469733493.993", "body": ""}
server.addResponse("sessionlog.json", request_header, response_header)
ts.addSSLfile("ssl/server.pem")
ts.addSSLfile("ssl/server.key")
ts.Variables.ssl_port = 4443
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 1,
'proxy.config.diags.debug.tags': 'ssl_hook_test',
'proxy.config.ssl.server.cert.path': '{0}'.format(ts.Variables.SSLDir),
'proxy.config.ssl.server.private_key.path': '{0}'.format(ts.Variables.SSLDir),
# enable ssl port
'proxy.config.http.server_ports': '{0}:ssl'.format(ts.Variables.ssl_port),
'proxy.config.ssl.client.verify.server': 0,
'proxy.config.ssl.server.cipher_suite': 'ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-SHA256:ECDHE-RSA-AES256-SHA384:AES128-GCM-SHA256:AES256-GCM-SHA384:ECDHE-RSA-RC4-SHA:ECDHE-RSA-AES128-SHA:ECDHE-RSA-AES256-SHA:RC4-SHA:RC4-MD5:AES128-SHA:AES256-SHA:DES-CBC3-SHA!SRP:!DSS:!PSK:!aNULL:!eNULL:!SSLv2',
})
ts.Disk.ssl_multicert_config.AddLine(
'dest_ip=* ssl_cert_name=server.pem ssl_key_name=server.key'
)
ts.Disk.remap_config.AddLine(
'map https://example.com:4443 https://127.0.0.1:{0}'.format(server.Variables.Port)
)
Test.PreparePlugin(os.path.join(Test.Variables.AtsTestToolsDir, 'plugins', 'ssl_hook_test.cc'), ts, '-out_start_delay=2')
tr = Test.AddTestRun("Test outbound delay start")
tr.Processes.Default.StartBefore(server)
tr.Processes.Default.StartBefore(Test.Processes.ts, ready=When.PortOpen(ts.Variables.ssl_port))
tr.StillRunningAfter = ts
tr.StillRunningAfter = server
tr.Processes.Default.Command = 'curl -k -H \'host:example.com:{0}\' https://127.0.0.1:{0}'.format(ts.Variables.ssl_port)
tr.Processes.Default.ReturnCode = 0
ts.Streams.stderr = "gold/ts-out-delay-start-2.gold"
tr.Processes.Default.TimeOut = 5
tr.TimeOut = 5
|
py | b411f6d73f4a6f2899f3d3a8c8aa6fb9ac1737c5 | """
Unit and regression test for the {{cookiecutter.repo_name}} package.
"""
# Import package, test suite, and other packages as needed
import {{cookiecutter.repo_name}}
import pytest
import sys
def test_{{cookiecutter.repo_name}}_imported():
"""Sample test, will always pass so long as import statement worked."""
assert "{{cookiecutter.repo_name}}" in sys.modules
|
py | b411f7214b4ea25da9b6e807422003a276168099 | # 2020-03-04 Lyon
import tensorflow as tf
from tensorflow import keras
class LeNetModel(tf.keras.Model):
"""构建LeNet模型——通过自定义子类化Model实现(subclassing the Model class)"""
def __init__(self):
super(LeNetModel, self).__init__()
self.conv1 = keras.layers.Conv2D(filters=6, kernel_size=5, activation='relu', input_shape=(28,28,1))
self.pool = keras.layers.MaxPool2D(pool_size=2, strides=2)
self.conv2 = keras.layers.Conv2D(filters=16, kernel_size=5, activation = 'relu')
self.flatten = keras.layers.Flatten()
self.dense1 = keras.layers.Dense(120, activation='relu')
self.dense2 = keras.layers.Dense(84, activation='relu')
self.dense3 = keras.layers.Dense(10, activation='softmax')
self.dropout = keras.layers.Dropout(0.25)
def call(self, inputs, training=False):
x = self.dense1(self.flatten(self.pool(self.conv2(self.pool(self.conv1(inputs))))))
if training:
x = self.dropout(self.dense2(self.dropout(x, training=training)))
else:
x = self.dense2(x)
return self.dense3(x)
def build_suquential_model():
"""构建LeNet模型——通过Sequential()实现"""
net = keras.models.Sequential([
# Conv2D为平面卷积层,输入参数28,28,1表示输入为28*28像素的单通道图片,filter=6即表示有6卷积核,kernel_size=5表示单个卷积核尺寸为5*5
keras.layers.Conv2D(filters=6, kernel_size=5, activation='relu', input_shape=(28,28,1)),
# MaxPool2D为池化层(最大池化),池化核尺寸为2*2,步长为2,即保证了使整体输入尺寸缩小一半的效果
keras.layers.MaxPool2D(pool_size=2, strides=2),
keras.layers.Conv2D(filters=16, kernel_size=5, activation = 'relu'),
keras.layers.MaxPool2D(pool_size=2, strides=2),
# Flatten()即将上一层拍扁成一维数组,方便后面接上全连接层Dense
keras.layers.Flatten(),
keras.layers.Dense(120, activation='relu'),
keras.layers.Dropout(0.25),
keras.layers.Dense(84, activation='relu'),
keras.layers.Dropout(0.25),
keras.layers.Dense(10, activation='softmax')])
return net
def build_functional_model():
"""构建LeNet模型——functional API实现"""
inputs = keras.layers.Input([28,28,1])
conv1 = keras.layers.Conv2D(filters=6, kernel_size=5, activation='relu', input_shape=(28,28,1))(inputs)
pool1 = keras.layers.MaxPool2D(pool_size=2, strides=2)(conv1)
conv2 = keras.layers.Conv2D(filters=16, kernel_size=5, activation = 'relu')(pool1)
flatten = keras.layers.Flatten()(conv2)
dense1 = keras.layers.Dense(120, activation='relu')(flatten)
dropout1 = keras.layers.Dropout(0.25)(dense1)
dense2 = keras.layers.Dense(84, activation='relu')(dropout1)
dropout2 = keras.layers.Dropout(0.25)(dense2)
dense3 = keras.layers.Dense(10, activation=None)(dropout2)
outputs = tf.nn.softmax(dense3)
net = keras.Model(inputs=inputs, outputs=outputs)
return net
def build_lenet(keyword='sequential'):
if keyword=='sequential':
return build_suquential_model()
if keyword=='functional':
return build_functional_model()
if keyword=='subclass':
return LeNetModel()
|
py | b411f7694926e085bce1a41cf0755bc72e4869fc | import math
import random
class State:
chromosome = None
fitness = None
def __eq__(self, other):
return self.chromosome == other.chromosome and self.fitness == other.fitness
def calculate_fitness(distances, chromosome):
f = 0
problem_length = len(chromosome)
for i in range(problem_length - 1):
f += distances[chromosome[i]][chromosome[i + 1]]
return -f
def create_initial_state(problem_length):
chromosome = []
while len(chromosome) != problem_length:
rand = random.randint(0, problem_length - 1)
if rand not in chromosome:
chromosome.append(rand)
s = State()
s.chromosome = chromosome
return s
def read_distances(file):
distances = []
file = open(file + '_dist.txt')
for i in range(7):
next(file)
for line in file:
line = line.rstrip().lstrip().split(" ")
for num in line:
try:
distances.append(int(num))
except:
x = 0
n = int(math.sqrt(len(distances)))
l = distances
return [l[i:i + n] for i in range(0, len(l), n)]
def read_coordinates(file):
coordinates = []
file = open(file + '_xy.txt')
for i in range(7):
next(file)
for line in file:
line = ' '.join(line.lstrip().rstrip().split(' ')).split()
coordinates.append((float(line[0]), float(line[1])))
return coordinates
|
py | b411f77427866de57abd060b602eedabb5a3bd5f | __author__ = 'nc4sq'
import os.path
from Crypto.Hash import SHA256
from Crypto import Random
from Crypto.PublicKey import RSA
from Crypto.Cipher import AES
def pad(s):
return s + b"\0" * (AES.block_size - len(s) % AES.block_size)
def secret_string(input_message, enc_input_key):
message = input_message.encode()
public_key = enc_input_key.publickey()
enc_data = public_key.encrypt(message, 32)
return enc_data
# print(enc_data)
# print(enc_input_key.decrypt(enc_data))
def encrypt(message, enc_input_key, key_size=256):
key = str(enc_input_key)
message = pad(message)
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CBC, iv)
return iv + cipher.encrypt(message)
def decrypt(ciphertext, dec_input_key):
key = str(dec_input_key)
iv = ciphertext[:AES.block_size]
cipher = AES.new(key, AES.MODE_CBC, iv)
plaintext = cipher.decrypt(ciphertext[AES.block_size:])
return plaintext.rstrip(b"\0")
def encrypt_file(enc_input_filename, enc_file_input_key):
try:
file = open(enc_input_filename, 'rb')
except IOError:
print('There was an error opening the file!')
return False
newKey = str(enc_file_input_key)
if len(str(enc_file_input_key)) < 16:
string_val = 'abcdabcdabcdabcd'
newKey = enc_file_input_key + string_val
key16 = newKey[0:16]
# key = str.encode(key16)
key = key16
with open(enc_input_filename, 'rb') as f:
plaintext = f.read()
enc = encrypt(plaintext, key)
with open(enc_input_filename + ".enc", 'wb') as f:
f.write(enc)
return True
return False
def decrypt_file(dec_input_filename, dec_file_input_key):
if not os.path.isfile(dec_input_filename):
print('There was an error opening the file!')
return False
else:
newKey = str(dec_file_input_key)
if len(str(dec_file_input_key)) < 16:
string_val = 'abcdabcdabcdabcd'
newKey = dec_file_input_key + string_val
key16 = newKey[0:16]
# key = str.encode(key16)
key = key16
with open(dec_input_filename, 'rb') as f:
ciphertext = f.read()
dec = decrypt(ciphertext, key)
with open("DEC_" + dec_input_filename[:-4], 'wb') as f:
f.write(dec)
return True
return False
# if __name__ == '__main__':
random_generator = Random.new().read
key = RSA.generate(1024, random_generator)
print("Testing secret_string\n")
print(secret_string('my name is deepak', key))
print("Testing encrypt_file\n")
print(encrypt_file('test.txt', key))
print("Testing decrypt_file\n")
print(decrypt_file('test.txt.enc', key)) |
py | b411f7b096118d9ba78a5759b6ad6fe1da24861d | import random
def insertion(ulist):
n = len(ulist)
for i in range(1,n):
currentVal = ulist[i]
j = i - 1
while j >= 0 and ulist[j] > currentVal:
ulist[j+1] = ulist[j]
j = j - 1
ulist[j+1] = currentVal
return ulist
print("Lista Desordenada")
unordered = list(range(10))
random.shuffle(unordered)
print(unordered)
ordered=insertion(unordered)
print(ordered) |
py | b411f7dd822819c0cab27dad5a110505730dfec6 | from ..player_view import PlayerView
from random import sample
def data_zero(max_number, pieces_per_player):
"""
Force player0 to have at least half of his pieces of zero number, including double zero.
Randomly distribute pieces among the other players.
Valid pieces are all integer tuples of the form:
(i, j) 0 <= i <= j <= max_number
Each player will have `pieces_per_player`.
"""
data_number = 0
cant = (min(max_number + 1, pieces_per_player) // 2)
hand = [(data_number, i) for i in sample(list(range(max_number + 1)), cant)]
if not (0, 0) in hand:
hand.pop()
hand.append((0, 0))
pieces = [(i, j) for i in range(max_number + 1) for j in range(max_number + 1) if i <= j and (i, j) not in hand]
assert 4 * pieces_per_player <= len(pieces) + len(hand)
hand.extend(sample(pieces, 4 * pieces_per_player - len(hand)))
hands = [hand[i:i+pieces_per_player] for i in range(0, 4 * pieces_per_player, pieces_per_player)]
return [PlayerView(h) for h in hands]
|
py | b411f809cd02ea990341291ad57d0f455532c7da | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
import frappe
from frappe.model.document import Document
from frappe.utils import cint, has_gravatar, format_datetime, now_datetime, get_formatted_email, today
from frappe import throw, msgprint, _
from frappe.utils.password import update_password as _update_password
from frappe.desk.notifications import clear_notifications
from frappe.desk.doctype.notification_settings.notification_settings import create_notification_settings
from frappe.utils.user import get_system_managers
from bs4 import BeautifulSoup
import frappe.permissions
import frappe.share
import re
import json
from frappe.website.utils import is_signup_enabled
from frappe.utils.background_jobs import enqueue
STANDARD_USERS = ("Guest", "Administrator")
class MaxUsersReachedError(frappe.ValidationError): pass
class User(Document):
__new_password = None
def __setup__(self):
# because it is handled separately
self.flags.ignore_save_passwords = ['new_password']
def autoname(self):
"""set name as Email Address"""
if self.get("is_admin") or self.get("is_guest"):
self.name = self.first_name
else:
self.email = self.email.strip().lower()
self.name = self.email
def onload(self):
from frappe.config import get_modules_from_all_apps
self.set_onload('all_modules',
[m.get("module_name") for m in get_modules_from_all_apps()])
def before_insert(self):
self.flags.in_insert = True
throttle_user_creation()
def after_insert(self):
create_notification_settings(self.name)
def validate(self):
self.check_demo()
# clear new password
self.__new_password = self.new_password
self.new_password = ""
if not frappe.flags.in_test:
self.password_strength_test()
if self.name not in STANDARD_USERS:
self.validate_email_type(self.email)
self.validate_email_type(self.name)
self.add_system_manager_role()
self.set_system_user()
self.set_full_name()
self.check_enable_disable()
self.ensure_unique_roles()
self.remove_all_roles_for_guest()
self.validate_username()
self.remove_disabled_roles()
self.validate_user_email_inbox()
ask_pass_update()
self.validate_roles()
self.validate_user_image()
if self.language == "Loading...":
self.language = None
if (self.name not in ["Administrator", "Guest"]) and (not self.get_social_login_userid("frappe")):
self.set_social_login_userid("frappe", frappe.generate_hash(length=39))
def validate_roles(self):
if self.role_profile_name:
role_profile = frappe.get_doc('Role Profile', self.role_profile_name)
self.set('roles', [])
self.append_roles(*[role.role for role in role_profile.roles])
def validate_user_image(self):
if self.user_image and len(self.user_image) > 2000:
frappe.throw(_("Not a valid User Image."))
def on_update(self):
# clear new password
self.share_with_self()
clear_notifications(user=self.name)
frappe.clear_cache(user=self.name)
self.send_password_notification(self.__new_password)
frappe.enqueue(
'frappe.core.doctype.user.user.create_contact',
user=self,
ignore_mandatory=True
)
if self.name not in ('Administrator', 'Guest') and not self.user_image:
frappe.enqueue('frappe.core.doctype.user.user.update_gravatar', name=self.name)
def has_website_permission(self, ptype, user, verbose=False):
"""Returns true if current user is the session user"""
return self.name == frappe.session.user
def check_demo(self):
if frappe.session.user == '[email protected]':
frappe.throw(_('Cannot change user details in demo. Please signup for a new account at https://erpnext.com'), title=_('Not Allowed'))
def set_full_name(self):
self.full_name = " ".join(filter(None, [self.first_name, self.last_name]))
def check_enable_disable(self):
# do not allow disabling administrator/guest
if not cint(self.enabled) and self.name in STANDARD_USERS:
frappe.throw(_("User {0} cannot be disabled").format(self.name))
if not cint(self.enabled):
self.a_system_manager_should_exist()
# clear sessions if disabled
if not cint(self.enabled) and getattr(frappe.local, "login_manager", None):
frappe.local.login_manager.logout(user=self.name)
def add_system_manager_role(self):
# if adding system manager, do nothing
if not cint(self.enabled) or ("System Manager" in [user_role.role for user_role in
self.get("roles")]):
return
if (self.name not in STANDARD_USERS and self.user_type == "System User" and not self.get_other_system_managers()
and cint(frappe.db.get_single_value('System Settings', 'setup_complete'))):
msgprint(_("Adding System Manager to this User as there must be atleast one System Manager"))
self.append("roles", {
"doctype": "Has Role",
"role": "System Manager"
})
if self.name == 'Administrator':
# Administrator should always have System Manager Role
self.extend("roles", [
{
"doctype": "Has Role",
"role": "System Manager"
},
{
"doctype": "Has Role",
"role": "Administrator"
}
])
def email_new_password(self, new_password=None):
if new_password and not self.flags.in_insert:
_update_password(user=self.name, pwd=new_password, logout_all_sessions=self.logout_all_sessions)
def set_system_user(self):
'''Set as System User if any of the given roles has desk_access'''
if self.has_desk_access() or self.name == 'Administrator':
self.user_type = 'System User'
else:
self.user_type = 'Website User'
def has_desk_access(self):
'''Return true if any of the set roles has desk access'''
if not self.roles:
return False
return len(frappe.db.sql("""select name
from `tabRole` where desk_access=1
and name in ({0}) limit 1""".format(', '.join(['%s'] * len(self.roles))),
[d.role for d in self.roles]))
def share_with_self(self):
if self.user_type=="System User":
frappe.share.add(self.doctype, self.name, self.name, write=1, share=1,
flags={"ignore_share_permission": True})
else:
frappe.share.remove(self.doctype, self.name, self.name,
flags={"ignore_share_permission": True, "ignore_permissions": True})
def validate_share(self, docshare):
if docshare.user == self.name:
if self.user_type=="System User":
if docshare.share != 1:
frappe.throw(_("Sorry! User should have complete access to their own record."))
else:
frappe.throw(_("Sorry! Sharing with Website User is prohibited."))
def send_password_notification(self, new_password):
try:
if self.flags.in_insert:
if self.name not in STANDARD_USERS:
if new_password:
# new password given, no email required
_update_password(user=self.name, pwd=new_password,
logout_all_sessions=self.logout_all_sessions)
if not self.flags.no_welcome_mail and self.send_welcome_email:
self.send_welcome_mail_to_user()
self.flags.email_sent = 1
if frappe.session.user != 'Guest':
msgprint(_("Welcome email sent"))
return
else:
self.email_new_password(new_password)
except frappe.OutgoingEmailError:
print(frappe.get_traceback())
pass # email server not set, don't send email
@Document.hook
def validate_reset_password(self):
pass
def reset_password(self, send_email=False, password_expired=False):
from frappe.utils import random_string, get_url
key = random_string(32)
self.db_set("reset_password_key", key)
url = "/update-password?key=" + key
if password_expired:
url = "/update-password?key=" + key + '&password_expired=true'
link = get_url(url)
if send_email:
self.password_reset_mail(link)
return link
def get_other_system_managers(self):
return frappe.db.sql("""select distinct `user`.`name` from `tabHas Role` as `user_role`, `tabUser` as `user`
where user_role.role='System Manager'
and `user`.docstatus<2
and `user`.enabled=1
and `user_role`.parent = `user`.name
and `user_role`.parent not in ('Administrator', %s) limit 1""", (self.name,))
def get_fullname(self):
"""get first_name space last_name"""
return (self.first_name or '') + \
(self.first_name and " " or '') + (self.last_name or '')
def password_reset_mail(self, link):
self.send_login_mail(_("Password Reset"),
"password_reset", {"link": link}, now=True)
def send_welcome_mail_to_user(self):
from frappe.utils import get_url
link = self.reset_password()
subject = None
method = frappe.get_hooks("welcome_email")
if method:
subject = frappe.get_attr(method[-1])()
if not subject:
site_name = frappe.db.get_default('site_name') or frappe.get_conf().get("site_name")
if site_name:
subject = _("Welcome to {0}").format(site_name)
else:
subject = _("Complete Registration")
self.send_login_mail(subject, "new_user",
dict(
link=link,
site_url=get_url(),
))
def send_login_mail(self, subject, template, add_args, now=None):
"""send mail with login details"""
from frappe.utils.user import get_user_fullname
from frappe.utils import get_url
full_name = get_user_fullname(frappe.session['user'])
if full_name == "Guest":
full_name = "Administrator"
args = {
'first_name': self.first_name or self.last_name or "user",
'user': self.name,
'title': subject,
'login_url': get_url(),
'user_fullname': full_name
}
args.update(add_args)
sender = frappe.session.user not in STANDARD_USERS and get_formatted_email(frappe.session.user) or None
frappe.sendmail(recipients=self.email, sender=sender, subject=subject,
template=template, args=args, header=[subject, "green"],
delayed=(not now) if now!=None else self.flags.delay_emails, retry=3)
def a_system_manager_should_exist(self):
if not self.get_other_system_managers():
throw(_("There should remain at least one System Manager"))
def on_trash(self):
frappe.clear_cache(user=self.name)
if self.name in STANDARD_USERS:
throw(_("User {0} cannot be deleted").format(self.name))
self.a_system_manager_should_exist()
# disable the user and log him/her out
self.enabled = 0
if getattr(frappe.local, "login_manager", None):
frappe.local.login_manager.logout(user=self.name)
# delete todos
frappe.db.sql("""DELETE FROM `tabToDo` WHERE `owner`=%s""", (self.name,))
frappe.db.sql("""UPDATE `tabToDo` SET `assigned_by`=NULL WHERE `assigned_by`=%s""",
(self.name,))
# delete events
frappe.db.sql("""delete from `tabEvent` where owner=%s
and event_type='Private'""", (self.name,))
# delete shares
frappe.db.sql("""delete from `tabDocShare` where user=%s""", self.name)
# delete messages
frappe.db.sql("""delete from `tabCommunication`
where communication_type in ('Chat', 'Notification')
and reference_doctype='User'
and (reference_name=%s or owner=%s)""", (self.name, self.name))
# unlink contact
frappe.db.sql("""update `tabContact`
set `user`=null
where `user`=%s""", (self.name))
def before_rename(self, old_name, new_name, merge=False):
self.check_demo()
frappe.clear_cache(user=old_name)
self.validate_rename(old_name, new_name)
def validate_rename(self, old_name, new_name):
# do not allow renaming administrator and guest
if old_name in STANDARD_USERS:
throw(_("User {0} cannot be renamed").format(self.name))
self.validate_email_type(new_name)
def validate_email_type(self, email):
from frappe.utils import validate_email_address
validate_email_address(email.strip(), True)
def after_rename(self, old_name, new_name, merge=False):
tables = frappe.db.get_tables()
for tab in tables:
desc = frappe.db.get_table_columns_description(tab)
has_fields = []
for d in desc:
if d.get('name') in ['owner', 'modified_by']:
has_fields.append(d.get('name'))
for field in has_fields:
frappe.db.sql("""UPDATE `%s`
SET `%s` = %s
WHERE `%s` = %s""" %
(tab, field, '%s', field, '%s'), (new_name, old_name))
if frappe.db.exists("Chat Profile", old_name):
frappe.rename_doc("Chat Profile", old_name, new_name, force=True, show_alert=False)
if frappe.db.exists("Notification Settings", old_name):
frappe.rename_doc("Notification Settings", old_name, new_name, force=True, show_alert=False)
# set email
frappe.db.sql("""UPDATE `tabUser`
SET email = %s
WHERE name = %s""", (new_name, new_name))
def append_roles(self, *roles):
"""Add roles to user"""
current_roles = [d.role for d in self.get("roles")]
for role in roles:
if role in current_roles:
continue
self.append("roles", {"role": role})
def add_roles(self, *roles):
"""Add roles to user and save"""
self.append_roles(*roles)
self.save()
def remove_roles(self, *roles):
existing_roles = dict((d.role, d) for d in self.get("roles"))
for role in roles:
if role in existing_roles:
self.get("roles").remove(existing_roles[role])
self.save()
def remove_all_roles_for_guest(self):
if self.name == "Guest":
self.set("roles", list(set(d for d in self.get("roles") if d.role == "Guest")))
def remove_disabled_roles(self):
disabled_roles = [d.name for d in frappe.get_all("Role", filters={"disabled":1})]
for role in list(self.get('roles')):
if role.role in disabled_roles:
self.get('roles').remove(role)
def ensure_unique_roles(self):
exists = []
for i, d in enumerate(self.get("roles")):
if (not d.role) or (d.role in exists):
self.get("roles").remove(d)
else:
exists.append(d.role)
def validate_username(self):
if not self.username and self.is_new() and self.first_name:
self.username = frappe.scrub(self.first_name)
if not self.username:
return
# strip space and @
self.username = self.username.strip(" @")
if self.username_exists():
if self.user_type == 'System User':
frappe.msgprint(_("Username {0} already exists").format(self.username))
self.suggest_username()
self.username = ""
def password_strength_test(self):
""" test password strength """
if self.flags.ignore_password_policy:
return
if self.__new_password:
user_data = (self.first_name, self.middle_name, self.last_name, self.email, self.birth_date)
result = test_password_strength(self.__new_password, '', None, user_data)
feedback = result.get("feedback", None)
if feedback and not feedback.get('password_policy_validation_passed', False):
handle_password_test_fail(result)
def suggest_username(self):
def _check_suggestion(suggestion):
if self.username != suggestion and not self.username_exists(suggestion):
return suggestion
return None
# @firstname
username = _check_suggestion(frappe.scrub(self.first_name))
if not username:
# @firstname_last_name
username = _check_suggestion(frappe.scrub("{0} {1}".format(self.first_name, self.last_name or "")))
if username:
frappe.msgprint(_("Suggested Username: {0}").format(username))
return username
def username_exists(self, username=None):
return frappe.db.get_value("User", {"username": username or self.username, "name": ("!=", self.name)})
def get_blocked_modules(self):
"""Returns list of modules blocked for that user"""
return [d.module for d in self.block_modules] if self.block_modules else []
def validate_user_email_inbox(self):
""" check if same email account added in User Emails twice """
email_accounts = [ user_email.email_account for user_email in self.user_emails ]
if len(email_accounts) != len(set(email_accounts)):
frappe.throw(_("Email Account added multiple times"))
def get_social_login_userid(self, provider):
try:
for p in self.social_logins:
if p.provider == provider:
return p.userid
except:
return None
def set_social_login_userid(self, provider, userid, username=None):
social_logins = {
"provider": provider,
"userid": userid
}
if username:
social_logins["username"] = username
self.append("social_logins", social_logins)
def get_restricted_ip_list(self):
if not self.restrict_ip:
return
return [i.strip() for i in self.restrict_ip.split(",")]
@frappe.whitelist()
def get_timezones():
import pytz
return {
"timezones": pytz.all_timezones
}
@frappe.whitelist()
def get_all_roles(arg=None):
"""return all roles"""
active_domains = frappe.get_active_domains()
roles = frappe.get_all("Role", filters={
"name": ("not in", "Administrator,Guest,All"),
"disabled": 0
}, or_filters={
"ifnull(restrict_to_domain, '')": "",
"restrict_to_domain": ("in", active_domains)
}, order_by="name")
return [ role.get("name") for role in roles ]
@frappe.whitelist()
def get_roles(arg=None):
"""get roles for a user"""
return frappe.get_roles(frappe.form_dict['uid'])
@frappe.whitelist()
def get_perm_info(role):
"""get permission info"""
from frappe.permissions import get_all_perms
return get_all_perms(role)
@frappe.whitelist(allow_guest=True)
def update_password(new_password, logout_all_sessions=0, key=None, old_password=None):
result = test_password_strength(new_password, key, old_password)
feedback = result.get("feedback", None)
if feedback and not feedback.get('password_policy_validation_passed', False):
handle_password_test_fail(result)
res = _get_user_for_update_password(key, old_password)
if res.get('message'):
return res['message']
else:
user = res['user']
_update_password(user, new_password, logout_all_sessions=int(logout_all_sessions))
user_doc, redirect_url = reset_user_data(user)
# get redirect url from cache
redirect_to = frappe.cache().hget('redirect_after_login', user)
if redirect_to:
redirect_url = redirect_to
frappe.cache().hdel('redirect_after_login', user)
frappe.local.login_manager.login_as(user)
frappe.db.set_value("User", user, "last_password_reset_date", today())
frappe.db.set_value("User", user, "reset_password_key", "")
if user_doc.user_type == "System User":
return "/desk"
else:
return redirect_url if redirect_url else "/"
@frappe.whitelist(allow_guest=True)
def test_password_strength(new_password, key=None, old_password=None, user_data=[]):
from frappe.utils.password_strength import test_password_strength as _test_password_strength
password_policy = frappe.db.get_value("System Settings", None,
["enable_password_policy", "minimum_password_score"], as_dict=True) or {}
enable_password_policy = cint(password_policy.get("enable_password_policy", 0))
minimum_password_score = cint(password_policy.get("minimum_password_score", 0))
if not enable_password_policy:
return {}
if not user_data:
user_data = frappe.db.get_value('User', frappe.session.user,
['first_name', 'middle_name', 'last_name', 'email', 'birth_date'])
if new_password:
result = _test_password_strength(new_password, user_inputs=user_data)
password_policy_validation_passed = False
# score should be greater than 0 and minimum_password_score
if result.get('score') and result.get('score') >= minimum_password_score:
password_policy_validation_passed = True
result['feedback']['password_policy_validation_passed'] = password_policy_validation_passed
return result
#for login
@frappe.whitelist()
def has_email_account(email):
return frappe.get_list("Email Account", filters={"email_id": email})
@frappe.whitelist(allow_guest=False)
def get_email_awaiting(user):
waiting = frappe.db.sql("""select email_account,email_id
from `tabUser Email`
where awaiting_password = 1
and parent = %(user)s""", {"user":user}, as_dict=1)
if waiting:
return waiting
else:
frappe.db.sql("""update `tabUser Email`
set awaiting_password =0
where parent = %(user)s""",{"user":user})
return False
@frappe.whitelist(allow_guest=False)
def set_email_password(email_account, user, password):
account = frappe.get_doc("Email Account", email_account)
if account.awaiting_password:
account.awaiting_password = 0
account.password = password
try:
account.save(ignore_permissions=True)
except Exception:
frappe.db.rollback()
return False
return True
def setup_user_email_inbox(email_account, awaiting_password, email_id, enable_outgoing):
""" setup email inbox for user """
def add_user_email(user):
user = frappe.get_doc("User", user)
row = user.append("user_emails", {})
row.email_id = email_id
row.email_account = email_account
row.awaiting_password = awaiting_password or 0
row.enable_outgoing = enable_outgoing or 0
user.save(ignore_permissions=True)
udpate_user_email_settings = False
if not all([email_account, email_id]):
return
user_names = frappe.db.get_values("User", { "email": email_id }, as_dict=True)
if not user_names:
return
for user in user_names:
user_name = user.get("name")
# check if inbox is alreay configured
user_inbox = frappe.db.get_value("User Email", {
"email_account": email_account,
"parent": user_name
}, ["name"]) or None
if not user_inbox:
add_user_email(user_name)
else:
# update awaiting password for email account
udpate_user_email_settings = True
if udpate_user_email_settings:
frappe.db.sql("""UPDATE `tabUser Email` SET awaiting_password = %(awaiting_password)s,
enable_outgoing = %(enable_outgoing)s WHERE email_account = %(email_account)s""", {
"email_account": email_account,
"enable_outgoing": enable_outgoing,
"awaiting_password": awaiting_password or 0
})
else:
users = " and ".join([frappe.bold(user.get("name")) for user in user_names])
frappe.msgprint(_("Enabled email inbox for user {0}").format(users))
ask_pass_update()
def remove_user_email_inbox(email_account):
""" remove user email inbox settings if email account is deleted """
if not email_account:
return
users = frappe.get_all("User Email", filters={
"email_account": email_account
}, fields=["parent as name"])
for user in users:
doc = frappe.get_doc("User", user.get("name"))
to_remove = [ row for row in doc.user_emails if row.email_account == email_account ]
[ doc.remove(row) for row in to_remove ]
doc.save(ignore_permissions=True)
def ask_pass_update():
# update the sys defaults as to awaiting users
from frappe.utils import set_default
users = frappe.db.sql("""SELECT DISTINCT(parent) as user FROM `tabUser Email`
WHERE awaiting_password = 1""", as_dict=True)
password_list = [ user.get("user") for user in users ]
set_default("email_user_password", u','.join(password_list))
def _get_user_for_update_password(key, old_password):
# verify old password
if key:
user = frappe.db.get_value("User", {"reset_password_key": key})
if not user:
return {
'message': _("Cannot Update: Incorrect / Expired Link.")
}
elif old_password:
# verify old password
frappe.local.login_manager.check_password(frappe.session.user, old_password)
user = frappe.session.user
else:
return
return {
'user': user
}
def reset_user_data(user):
user_doc = frappe.get_doc("User", user)
redirect_url = user_doc.redirect_url
user_doc.reset_password_key = ''
user_doc.redirect_url = ''
user_doc.save(ignore_permissions=True)
return user_doc, redirect_url
@frappe.whitelist()
def verify_password(password):
frappe.local.login_manager.check_password(frappe.session.user, password)
@frappe.whitelist(allow_guest=True)
def sign_up(email, full_name, redirect_to):
if not is_signup_enabled():
frappe.throw(_('Sign Up is disabled'), title='Not Allowed')
user = frappe.db.get("User", {"email": email})
if user:
if user.disabled:
return 0, _("Registered but disabled")
else:
return 0, _("Already Registered")
else:
if frappe.db.sql("""select count(*) from tabUser where
HOUR(TIMEDIFF(CURRENT_TIMESTAMP, TIMESTAMP(modified)))=1""")[0][0] > 300:
frappe.respond_as_web_page(_('Temporarily Disabled'),
_('Too many users signed up recently, so the registration is disabled. Please try back in an hour'),
http_status_code=429)
from frappe.utils import random_string
user = frappe.get_doc({
"doctype":"User",
"email": email,
"first_name": full_name,
"enabled": 1,
"new_password": random_string(10),
"user_type": "Website User"
})
user.flags.ignore_permissions = True
user.flags.ignore_password_policy = True
user.insert()
# set default signup role as per Portal Settings
default_role = frappe.db.get_value("Portal Settings", None, "default_role")
if default_role:
user.add_roles(default_role)
if redirect_to:
frappe.cache().hset('redirect_after_login', user.name, redirect_to)
if user.flags.email_sent:
return 1, _("Please check your email for verification")
else:
return 2, _("Please ask your administrator to verify your sign-up")
@frappe.whitelist(allow_guest=True)
def reset_password(user):
if user=="Administrator":
return 'not allowed'
try:
user = frappe.get_doc("User", user)
if not user.enabled:
return 'disabled'
user.validate_reset_password()
user.reset_password(send_email=True)
return frappe.msgprint(_("Password reset instructions have been sent to your email"))
except frappe.DoesNotExistError:
frappe.clear_messages()
return 'not found'
def user_query(doctype, txt, searchfield, start, page_len, filters):
from frappe.desk.reportview import get_match_cond
user_type_condition = "and user_type = 'System User'"
if filters and filters.get('ignore_user_type'):
user_type_condition = ''
txt = "%{}%".format(txt)
return frappe.db.sql("""SELECT `name`, CONCAT_WS(' ', first_name, middle_name, last_name)
FROM `tabUser`
WHERE `enabled`=1
{user_type_condition}
AND `docstatus` < 2
AND `name` NOT IN ({standard_users})
AND ({key} LIKE %(txt)s
OR CONCAT_WS(' ', first_name, middle_name, last_name) LIKE %(txt)s)
{mcond}
ORDER BY
CASE WHEN `name` LIKE %(txt)s THEN 0 ELSE 1 END,
CASE WHEN concat_ws(' ', first_name, middle_name, last_name) LIKE %(txt)s
THEN 0 ELSE 1 END,
NAME asc
LIMIT %(page_len)s OFFSET %(start)s""".format(
user_type_condition = user_type_condition,
standard_users=", ".join([frappe.db.escape(u) for u in STANDARD_USERS]),
key=searchfield, mcond=get_match_cond(doctype)),
dict(start=start, page_len=page_len, txt=txt))
def get_total_users():
"""Returns total no. of system users"""
return frappe.db.sql('''SELECT SUM(`simultaneous_sessions`)
FROM `tabUser`
WHERE `enabled` = 1
AND `user_type` = 'System User'
AND `name` NOT IN ({})'''.format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS)[0][0]
def get_system_users(exclude_users=None, limit=None):
if not exclude_users:
exclude_users = []
elif not isinstance(exclude_users, (list, tuple)):
exclude_users = [exclude_users]
limit_cond = ''
if limit:
limit_cond = 'limit {0}'.format(limit)
exclude_users += list(STANDARD_USERS)
system_users = frappe.db.sql_list("""select name from `tabUser`
where enabled=1 and user_type != 'Website User'
and name not in ({}) {}""".format(", ".join(["%s"]*len(exclude_users)), limit_cond),
exclude_users)
return system_users
def get_active_users():
"""Returns No. of system users who logged in, in the last 3 days"""
return frappe.db.sql("""select count(*) from `tabUser`
where enabled = 1 and user_type != 'Website User'
and name not in ({})
and hour(timediff(now(), last_active)) < 72""".format(", ".join(["%s"]*len(STANDARD_USERS))), STANDARD_USERS)[0][0]
def get_website_users():
"""Returns total no. of website users"""
return frappe.db.sql("""select count(*) from `tabUser`
where enabled = 1 and user_type = 'Website User'""")[0][0]
def get_active_website_users():
"""Returns No. of website users who logged in, in the last 3 days"""
return frappe.db.sql("""select count(*) from `tabUser`
where enabled = 1 and user_type = 'Website User'
and hour(timediff(now(), last_active)) < 72""")[0][0]
def get_permission_query_conditions(user):
if user=="Administrator":
return ""
else:
return """(`tabUser`.name not in ({standard_users}))""".format(
standard_users = ", ".join(frappe.db.escape(user) for user in STANDARD_USERS))
def has_permission(doc, user):
if (user != "Administrator") and (doc.name in STANDARD_USERS):
# dont allow non Administrator user to view / edit Administrator user
return False
def notify_admin_access_to_system_manager(login_manager=None):
if (login_manager
and login_manager.user == "Administrator"
and frappe.local.conf.notify_admin_access_to_system_manager):
site = '<a href="{0}" target="_blank">{0}</a>'.format(frappe.local.request.host_url)
date_and_time = '<b>{0}</b>'.format(format_datetime(now_datetime(), format_string="medium"))
ip_address = frappe.local.request_ip
access_message = _('Administrator accessed {0} on {1} via IP Address {2}.').format(
site, date_and_time, ip_address)
frappe.sendmail(
recipients=get_system_managers(),
subject=_("Administrator Logged In"),
template="administrator_logged_in",
args={'access_message': access_message},
header=['Access Notification', 'orange']
)
def extract_mentions(txt):
"""Find all instances of @mentions in the html."""
soup = BeautifulSoup(txt, 'html.parser')
emails = []
for mention in soup.find_all(class_='mention'):
email = mention['data-id']
emails.append(email)
return emails
def handle_password_test_fail(result):
suggestions = result['feedback']['suggestions'][0] if result['feedback']['suggestions'] else ''
warning = result['feedback']['warning'] if 'warning' in result['feedback'] else ''
suggestions += "<br>" + _("Hint: Include symbols, numbers and capital letters in the password") + '<br>'
frappe.throw(' '.join([_('Invalid Password:'), warning, suggestions]))
def update_gravatar(name):
gravatar = has_gravatar(name)
if gravatar:
frappe.db.set_value('User', name, 'user_image', gravatar)
@frappe.whitelist(allow_guest=True)
def send_token_via_sms(tmp_id,phone_no=None,user=None):
try:
from frappe.core.doctype.sms_settings.sms_settings import send_request
except:
return False
if not frappe.cache().ttl(tmp_id + '_token'):
return False
ss = frappe.get_doc('SMS Settings', 'SMS Settings')
if not ss.sms_gateway_url:
return False
token = frappe.cache().get(tmp_id + '_token')
args = {ss.message_parameter: 'verification code is {}'.format(token)}
for d in ss.get("parameters"):
args[d.parameter] = d.value
if user:
user_phone = frappe.db.get_value('User', user, ['phone','mobile_no'], as_dict=1)
usr_phone = user_phone.mobile_no or user_phone.phone
if not usr_phone:
return False
else:
if phone_no:
usr_phone = phone_no
else:
return False
args[ss.receiver_parameter] = usr_phone
status = send_request(ss.sms_gateway_url, args, use_post=ss.use_post)
if 200 <= status < 300:
frappe.cache().delete(tmp_id + '_token')
return True
else:
return False
@frappe.whitelist(allow_guest=True)
def send_token_via_email(tmp_id,token=None):
import pyotp
user = frappe.cache().get(tmp_id + '_user')
count = token or frappe.cache().get(tmp_id + '_token')
if ((not user) or (user == 'None') or (not count)):
return False
user_email = frappe.db.get_value('User',user, 'email')
if not user_email:
return False
otpsecret = frappe.cache().get(tmp_id + '_otp_secret')
hotp = pyotp.HOTP(otpsecret)
frappe.sendmail(
recipients=user_email, sender=None, subject='Verification Code',
message='<p>Your verification code is {0}</p>'.format(hotp.at(int(count))),
delayed=False, retry=3)
return True
@frappe.whitelist(allow_guest=True)
def reset_otp_secret(user):
otp_issuer = frappe.db.get_value('System Settings', 'System Settings', 'otp_issuer_name')
user_email = frappe.db.get_value('User',user, 'email')
if frappe.session.user in ["Administrator", user] :
frappe.defaults.clear_default(user + '_otplogin')
frappe.defaults.clear_default(user + '_otpsecret')
email_args = {
'recipients':user_email, 'sender':None, 'subject':'OTP Secret Reset - {}'.format(otp_issuer or "Frappe Framework"),
'message':'<p>Your OTP secret on {} has been reset. If you did not perform this reset and did not request it, please contact your System Administrator immediately.</p>'.format(otp_issuer or "Frappe Framework"),
'delayed':False,
'retry':3
}
enqueue(method=frappe.sendmail, queue='short', timeout=300, event=None, is_async=True, job_name=None, now=False, **email_args)
return frappe.msgprint(_("OTP Secret has been reset. Re-registration will be required on next login."))
else:
return frappe.throw(_("OTP secret can only be reset by the Administrator."))
def throttle_user_creation():
if frappe.flags.in_import:
return
if frappe.db.get_creation_count('User', 60) > frappe.local.conf.get("throttle_user_limit", 60):
frappe.throw(_('Throttled'))
@frappe.whitelist()
def get_role_profile(role_profile):
roles = frappe.get_doc('Role Profile', {'role_profile': role_profile})
return roles.roles
def update_roles(role_profile):
users = frappe.get_all('User', filters={'role_profile_name': role_profile})
role_profile = frappe.get_doc('Role Profile', role_profile)
roles = [role.role for role in role_profile.roles]
for d in users:
user = frappe.get_doc('User', d)
user.set('roles', [])
user.add_roles(*roles)
def create_contact(user, ignore_links=False, ignore_mandatory=False):
from frappe.contacts.doctype.contact.contact import get_contact_name
if user.name in ["Administrator", "Guest"]: return
contact_exists = get_contact_name(user.email)
if not contact_exists:
contact = frappe.get_doc({
"doctype": "Contact",
"first_name": user.first_name,
"last_name": user.last_name,
"user": user.name,
"gender": user.gender,
})
if user.email:
contact.add_email(user.email, is_primary=True)
if user.phone:
contact.add_phone(user.phone, is_primary_phone=True)
if user.mobile_no:
contact.add_phone(user.mobile_no, is_primary_mobile_no=True)
contact.insert(ignore_permissions=True, ignore_links=ignore_links, ignore_mandatory=ignore_mandatory)
else:
contact = frappe.get_doc("Contact", contact_exists)
contact.first_name = user.first_name
contact.last_name = user.last_name
contact.gender = user.gender
# Add mobile number if phone does not exists in contact
if user.phone and not any(new_contact.phone == user.phone for new_contact in contact.phone_nos):
# Set primary phone if there is no primary phone number
contact.add_phone(
user.phone,
is_primary_phone=not any(
new_contact.is_primary_phone == 1 for new_contact in contact.phone_nos
)
)
# Add mobile number if mobile does not exists in contact
if user.mobile_no and not any(new_contact.phone == user.mobile_no for new_contact in contact.phone_nos):
# Set primary mobile if there is no primary mobile number
contact.add_phone(
user.mobile_no,
is_primary_mobile_no=not any(
new_contact.is_primary_mobile_no == 1 for new_contact in contact.phone_nos
)
)
contact.save(ignore_permissions=True)
@frappe.whitelist()
def generate_keys(user):
"""
generate api key and api secret
:param user: str
"""
if "System Manager" in frappe.get_roles():
user_details = frappe.get_doc("User", user)
api_secret = frappe.generate_hash(length=15)
# if api key is not set generate api key
if not user_details.api_key:
api_key = frappe.generate_hash(length=15)
user_details.api_key = api_key
user_details.api_secret = api_secret
user_details.save()
return {"api_secret": api_secret}
frappe.throw(frappe._("Not Permitted"), frappe.PermissionError)
|
py | b411f81f223f7ab9c5b5af4b1aeef6267056500c | readMe = '''
This is a script to set attributes of a switch port, if a client with a matching MAC OUI is found connected to it.
*** TO PREVENT UNWANTED CHANGES, PLEASE RUN IN SIMULATION MODE FIRST ***
Usage:
python setSwitchPortOnMacOui.py -k <api key> -f <MAC file> -c <Conf file> [-o <org name>] [-m <mode>]
Parameters:
-k <api key> : Mandatory. Your Meraki Dashboard API key
-f <MAC file> : Mandatory. Path to file containing MAC/OUI definitions to match
-c <CFG file> : Mandatory. Path to file containing configuration changes to execute
-o <org name> : Optional. Name of the organization you want to process. Use keyword "/all" to explicitly
specify all orgs. Default is "/all"
-m <mode> : Optional. Defines whether changes will be committed to the Meraki cloud. Valid values:
simulation Do not commit changes, just print logs (default)
commit Commit changes to Meraki cloud
Example:
python setSwitchPortOnMacOui.py -k 1234 -o "Big Industries Inc" -f macs.txt -c cfg.txt -m commit
Example MAC OUI file:
https://github.com/meraki/automation-scripts/blob/master/setSwitchPortOnMacOui/ouilist.txt
Example configuration file:
https://github.com/meraki/automation-scripts/blob/master/setSwitchPortOnMacOui/cmdlist.txt
Notes:
* This script uses two endpoints that were in Beta at time of writing. If the script fails to fetch client lists
with a status code of 404, you will need to have these enabled by Meraki for your organization:
"List the clients that have used this network in the timespan"
"Action batches"
* In Windows, use double quotes ("") to enter command line parameters containing spaces.
* This script was built for Python 3.7.1.
* Depending on your operating system, the command to start python can be either "python" or "python3".
Required Python modules:
Requests : http://docs.python-requests.org
After installing Python, you can install these additional modules using pip with the following commands:
pip install requests
Depending on your operating system, the command can be "pip3" instead of "pip".'''
import sys, getopt, requests, json, time, datetime
#SECTION: GLOBAL VARIABLES: MODIFY TO CHANGE SCRIPT BEHAVIOUR
API_EXEC_DELAY = 0.21 #Used in merakiRequestThrottler() to avoid hitting dashboard API max request rate
#connect and read timeouts for the Requests module in seconds
REQUESTS_CONNECT_TIMEOUT = 90
REQUESTS_READ_TIMEOUT = 90
#page length for network clients' call. Range: 3-1000 clients/page
NET_CLIENTS_PAGE_LENGTH = 1000
#fetch info on clients present during the past X days. Range: 1-30 days
NET_CLIENT_LOOKUP_TIME_DAYS = 7
#how many configuration changes the script will attempt to commit with a single call. Range: 1-1000 changes.
ACTION_BATCH_SIZE = 100
#SECTION: GLOBAL VARIABLES AND CLASSES: DO NOT MODIFY
LAST_MERAKI_REQUEST = datetime.datetime.now() #used by merakiRequestThrottler()
ARG_APIKEY = '' #DO NOT STATICALLY SET YOUR API KEY HERE
ARG_ORGNAME = '' #DO NOT STATICALLY SET YOUR ORGANIZATION NAME HERE
SECONDS_IN_DAY = 86400
class c_Net:
def __init__(self):
self.id = ''
self.name = ''
self.shard = 'api.meraki.com'
self.devices = []
class c_Organization:
def __init__(self):
self.id = ''
self.name = ''
self.shard = 'api.meraki.com'
self.nets = []
#SECTION: General use functions
def merakiRequestThrottler():
#makes sure there is enough time between API requests to Dashboard not to hit shaper
global LAST_MERAKI_REQUEST
if (datetime.datetime.now()-LAST_MERAKI_REQUEST).total_seconds() < (API_EXEC_DELAY):
time.sleep(API_EXEC_DELAY)
LAST_MERAKI_REQUEST = datetime.datetime.now()
return
def printhelp():
print(readMe)
def matchOui(p_mac, p_ouiList):
for oui in p_ouiList:
if p_mac.lower().startswith(oui.lower()):
return True
return False
def loadFile(p_fileName):
returnValue = []
try:
f = open(p_fileName, 'r')
for line in f:
if len(line) > 0:
returnValue.append(line.strip())
f.close()
except:
print('ERROR 06: Error loading file "%s"' % p_fileName)
return None
return returnValue
def parseConfig(p_rawConfig):
ret = []
for line in p_rawConfig:
splitLine = line.split(':')
if len(splitLine) == 2:
ret.append([splitLine[0].strip(), splitLine[1].strip()])
else:
return None
return ret
def buildAccessSwitchList(p_org):
returnValue = []
orgInventory = getInventory(p_org)
if orgInventory == None:
return None
for device in orgInventory:
if device['model'].startswith('MS'):
if not device['model'].startswith('MS4'):
returnValue.append(device)
return returnValue
def checkIfOnValidAccessSwitch(p_serial, p_switchList):
for switch in p_switchList:
if p_serial == switch['serial']:
return True
return False
#SECTION: Meraki Dashboard API communication functions
def getInventory(p_org):
#returns a list of all networks in an organization
merakiRequestThrottler()
if True:
r = requests.get('https://%s/api/v0/organizations/%s/inventory' % (p_org.shard, p_org.id), headers={'X-Cisco-Meraki-API-Key': ARG_APIKEY, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT) )
else:
print('ERROR 00: Unable to contact Meraki cloud')
return(None)
if r.status_code != requests.codes.ok:
return(None)
return(r.json())
def getNetworks(p_org):
#returns a list of all networks in an organization
merakiRequestThrottler()
try:
r = requests.get('https://%s/api/v0/organizations/%s/networks' % (p_org.shard, p_org.id), headers={'X-Cisco-Meraki-API-Key': ARG_APIKEY, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT) )
except:
print('ERROR 01: Unable to contact Meraki cloud')
return(None)
if r.status_code != requests.codes.ok:
return(None)
return(r.json())
def getOrgs():
#returns the organizations' list for a specified admin, with filters applied
merakiRequestThrottler()
try:
r = requests.get('https://api.meraki.com/api/v0/organizations', headers={'X-Cisco-Meraki-API-Key': ARG_APIKEY, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT) )
except:
print('ERROR 02: Unable to contact Meraki cloud')
return(None)
if r.status_code != requests.codes.ok:
return(None)
rjson = r.json()
orglist = []
listlen = -1
if ARG_ORGNAME.lower() == '/all':
for org in rjson:
orglist.append(c_Organization())
listlen += 1
orglist[listlen].id = org['id']
orglist[listlen].name = org['name']
else:
for org in rjson:
if org['name'] == ARG_ORGNAME:
orglist.append(c_Organization())
listlen += 1
orglist[listlen].id = org['id']
orglist[listlen].name = org['name']
return(orglist)
def getShardHost(p_org):
#patch
return("api.meraki.com")
def getNetworkClients(p_org, p_net):
# Returns all clients in a network, or None on failure
returnValue = []
networkHasMoreClientPages = True
requestUrl = 'https://%s/api/v0/networks/%s/clients?perPage=%s×pan=%s' % (p_org.shard, p_net, NET_CLIENTS_PAGE_LENGTH, NET_CLIENT_LOOKUP_TIME_DAYS*SECONDS_IN_DAY)
while networkHasMoreClientPages:
merakiRequestThrottler()
try:
r = requests.get( requestUrl, headers={'X-Cisco-Meraki-API-Key': ARG_APIKEY, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT) )
except:
print('ERROR 04: Error fetching client list for network %s' % p_net)
return None
if r.status_code != requests.codes.ok:
print('ERROR 05: Error fetching client list for network %s (Status: %s)' % (p_net, r.status_code))
return None
returnValue += r.json()
responseHeaders = r.headers
if 'Link' in responseHeaders:
link = responseHeaders['Link']
nextPageEnd = link.find('>; rel=next')
if nextPageEnd == -1:
networkHasMoreClientPages = False
else:
croppedLink = link[:nextPageEnd]
nextPageStart = croppedLink.rfind('https://')
requestUrl = croppedLink[nextPageStart:]
return returnValue
def executeActionBatch (p_org, p_portList, p_config):
print('Executing action batch...')
requestUrl = 'https://%s/api/v0/organizations/%s/actionBatches' % (p_org.shard, p_org.id)
payload = {'confirmed':True, 'synchronous':False}
actions = []
for item in p_portList:
a = {'resource':'/devices/%s/switchPorts/%s' % (item[0],item[1]), 'operation': 'update'}
b = {}
for cmd in p_config:
b[cmd[0]] = cmd[1]
a['body'] = b
actions.append(a)
payload['actions'] = actions
merakiRequestThrottler()
try:
r = requests.post(requestUrl, data=json.dumps(payload), headers={'X-Cisco-Meraki-API-Key': ARG_APIKEY, 'Content-Type': 'application/json'}, timeout=(REQUESTS_CONNECT_TIMEOUT, REQUESTS_READ_TIMEOUT) )
except:
print('ERROR 07: Unable to contact Meraki cloud')
return False
if r.status_code >= 400:
print (r.status_code)
return False
return True
#SECTION: main
def main(argv):
global ARG_APIKEY
global ARG_ORGNAME
#initialize command line arguments
ARG_APIKEY = ''
ARG_ORGNAME = ''
arg_macFile = ''
arg_cfgFile = ''
arg_mode = ''
#get command line arguments
try:
opts, args = getopt.getopt(argv, 'hk:f:c:o:m:')
except getopt.GetoptError:
printhelp()
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
printhelp()
sys.exit()
elif opt == '-k':
ARG_APIKEY = arg
elif opt == '-f':
arg_macFile = arg
elif opt == '-c':
arg_cfgFile = arg
elif opt == '-o':
ARG_ORGNAME = arg
elif opt == '-m':
arg_mode = arg
#check that all mandatory arguments have been given
if ARG_APIKEY == '' or arg_macFile == '' or arg_cfgFile == '':
printhelp()
sys.exit(2)
#set defaults for empty command line arguments
if ARG_ORGNAME == '':
ARG_ORGNAME = '/all'
if arg_mode == '':
arg_mode = 'simulation'
#script main body
ouiList = loadFile(arg_macFile)
if ouiList is None:
print ('ERROR 08: Unable to load OUI file')
sys.exit(2)
rawCfg = loadFile(arg_cfgFile)
if rawCfg is None:
print ('ERROR 09: Unable to load config file')
sys.exit(2)
cfgList = parseConfig(rawCfg)
if cfgList is None:
print ('ERROR 10: Unable to parse configuration')
sys.exit(2)
orglist = getOrgs()
if not orglist is None:
for org in orglist:
print('Processing org "%s"' % org.name)
actionBatchQueue = []
orgshard = getShardHost(org)
if not orgshard is None:
org.shard = orgshard
netlist = getNetworks(org)
devlist = buildAccessSwitchList(org)
if not devlist is None and not netlist is None:
for net in netlist:
clientList = getNetworkClients(org, net['id'])
if not clientList is None:
for client in clientList:
if matchOui(client['mac'], ouiList):
if checkIfOnValidAccessSwitch(client['recentDeviceSerial'], devlist):
print ('QUEUED: Will edit port for MAC %s: "%s" (%s) Port %s' % (client['mac'], client['recentDeviceName'], client['recentDeviceSerial'], client['switchport']) )
if arg_mode == 'commit':
actionBatchQueue.append([client['recentDeviceSerial'], client['switchport']])
if len(actionBatchQueue) >= ACTION_BATCH_SIZE:
batchSuccess = executeActionBatch (org, actionBatchQueue, cfgList)
if batchSuccess:
print ('SUCCESS: Batch operation successful')
else:
print ('ERROR 11: Batch operation failed')
actionBatchQueue = []
else:
print ('Skipping client "%s". Owner device "%s" (%s) not an access switch' % (client['mac'], client['recentDeviceName'], client['recentDeviceSerial']))
if arg_mode == 'commit':
if len(actionBatchQueue) > 0:
batchSuccess = executeActionBatch (org, actionBatchQueue, cfgList)
if batchSuccess:
print ('SUCCESS: Batch operation successful')
else:
print ('ERROR 12: Batch operation failed')
if __name__ == '__main__':
main(sys.argv[1:]) |
py | b411f8a50594694fb2e3e1ec4125301282e60186 | import os
import itertools
import nbformat
from nbformat.v4.nbbase import new_markdown_cell
from generate_contents import (NOTEBOOK_DIR, REG,
iter_notebooks, get_notebook_title,
is_title)
def prev_this_next(it):
a, b, c = itertools.tee(it, 3)
next(c)
return zip(itertools.chain([None], a), b, itertools.chain(c, [None]))
PREV_TEMPLATE = "< [{title}]({url}) "
CONTENTS = "| [Contents](00.00-index.ipynb) |"
NEXT_TEMPLATE = " [{title}]({url}) >"
NAV_COMMENT = "<!--NAVIGATION-->\n"
def iter_navbars():
for prev_nb, nb, next_nb in prev_this_next(iter_notebooks(NOTEBOOK_DIR)):
navbar = NAV_COMMENT
if prev_nb:
navbar += PREV_TEMPLATE.format(title=get_notebook_title(prev_nb),
url=prev_nb)
navbar += CONTENTS
if next_nb:
navbar += NEXT_TEMPLATE.format(title=get_notebook_title(next_nb),
url=next_nb)
yield os.path.join(NOTEBOOK_DIR, nb), navbar
def write_navbars():
for nb_name, navbar in iter_navbars():
nb = nbformat.read(nb_name, as_version=4)
nb_file = os.path.basename(nb_name)
is_comment = lambda cell: cell.source.startswith(NAV_COMMENT)
for idx, cell in enumerate(nb.cells):
if is_comment(cell):
print("- amending navbar for {0}".format(nb_file))
cell.source = navbar
break
elif is_title(cell):
print("- inserting navbar for {0}".format(nb_file))
nb.cells.insert(idx, new_markdown_cell(source=navbar))
break
if is_comment(nb.cells[-1]):
nb.cells[-1].source = navbar
else:
nb.cells.append(new_markdown_cell(source=navbar))
nbformat.write(nb, nb_name)
if __name__ == '__main__':
write_navbars()
|
py | b411f8c65b5570ba122070140604221722255c9f | # pylint: disable=g-import-not-at-top
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""contrib module containing volatile or experimental code."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.tools import component_api_helper
component_api_helper.package_hook(
parent_package_str=(
"tensorflow.contrib"),
child_package_str=(
"tensorflow_estimator.contrib.estimator"))
del component_api_helper
# Add projects here, they will show up under tf.contrib.
from tensorflow.contrib import autograph
from tensorflow.contrib import batching
from tensorflow.contrib import bayesflow
from tensorflow.contrib import checkpoint
if os.name != "nt":
from tensorflow.contrib import cloud
from tensorflow.contrib import cluster_resolver
from tensorflow.contrib import coder
from tensorflow.contrib import compiler
from tensorflow.contrib import constrained_optimization
from tensorflow.contrib import copy_graph
from tensorflow.contrib import crf
from tensorflow.contrib import cudnn_rnn
from tensorflow.contrib import data
from tensorflow.contrib import deprecated
from tensorflow.contrib import distribute
from tensorflow.contrib import distributions
from tensorflow.contrib import estimator
from tensorflow.contrib import factorization
from tensorflow.contrib import feature_column
from tensorflow.contrib import framework
from tensorflow.contrib import gan
from tensorflow.contrib import graph_editor
from tensorflow.contrib import grid_rnn
from tensorflow.contrib import image
from tensorflow.contrib import input_pipeline
from tensorflow.contrib import integrate
from tensorflow.contrib import keras
from tensorflow.contrib import kernel_methods
from tensorflow.contrib import labeled_tensor
from tensorflow.contrib import layers
from tensorflow.contrib import learn
from tensorflow.contrib import legacy_seq2seq
from tensorflow.contrib import linear_optimizer
from tensorflow.contrib import lookup
from tensorflow.contrib import losses
from tensorflow.contrib import memory_stats
from tensorflow.contrib import metrics
from tensorflow.contrib import mixed_precision
from tensorflow.contrib import model_pruning
from tensorflow.contrib import nccl
from tensorflow.contrib import nn
from tensorflow.contrib import opt
from tensorflow.contrib import periodic_resample
from tensorflow.contrib import predictor
from tensorflow.contrib import proto
from tensorflow.contrib import quantization
from tensorflow.contrib import quantize
from tensorflow.contrib import reduce_slice_ops
from tensorflow.contrib import resampler
from tensorflow.contrib import rnn
from tensorflow.contrib import rpc
from tensorflow.contrib import saved_model
from tensorflow.contrib import seq2seq
from tensorflow.contrib import signal
from tensorflow.contrib import slim
from tensorflow.contrib import solvers
from tensorflow.contrib import sparsemax
from tensorflow.contrib import staging
from tensorflow.contrib import stat_summarizer
from tensorflow.contrib import stateless
from tensorflow.contrib import tensor_forest
from tensorflow.contrib import tensorboard
from tensorflow.contrib import testing
from tensorflow.contrib import tfprof
from tensorflow.contrib import timeseries
from tensorflow.contrib import tpu
from tensorflow.contrib import training
from tensorflow.contrib import util
from tensorflow.contrib.eager.python import tfe as eager
from tensorflow.contrib.lite.python import lite
from tensorflow.contrib.optimizer_v2 import optimizer_v2_symbols as optimizer_v2
from tensorflow.contrib.receptive_field import receptive_field_api as receptive_field
from tensorflow.contrib.recurrent.python import recurrent_api as recurrent
from tensorflow.contrib.remote_fused_graph import pylib as remote_fused_graph
from tensorflow.contrib.specs import python as specs
from tensorflow.contrib.summary import summary
from tensorflow.python.util.lazy_loader import LazyLoader
ffmpeg = LazyLoader("ffmpeg", globals(),
"tensorflow.contrib.ffmpeg")
del os
del LazyLoader
del absolute_import
del division
del print_function
|
py | b411f8e2b318fb61dd6f207a2002bc548c065944 | import ckan.controllers.group as group
class OrganizationController(group.GroupController):
''' The organization controller is pretty much just the group
controller. It has a few templates defined that are different and sets
the group_type to organization so that the group controller knows that
it is in fact the organization controller. All the main logical
differences are therefore in the group controller.
The main differences the group controller provides for organizations are
a few wrapper functions that swap organization for group when rendering
templates, redirecting or calling logic actions '''
# this makes us use organization actions
group_type = 'organization'
def _group_form(self, group_type=None):
return 'organization/new_organization_form.html'
def _form_to_db_schema(self, group_type=None):
return lookup_group_plugin(group_type).form_to_db_schema()
def _db_to_form_schema(self, group_type=None):
'''This is an interface to manipulate data from the database
into a format suitable for the form (optional)'''
pass
def _setup_template_variables(self, context, data_dict, group_type=None):
pass
def _new_template(self, group_type):
return 'organization/new.html'
def _about_template(self, group_type):
return 'organization/about.html'
def _index_template(self, group_type):
return 'organization/index.html'
def _admins_template(self, group_type):
return 'organization/admins.html'
def _bulk_process_template(self, group_type):
return 'organization/bulk_process.html'
def _read_template(self, group_type):
return 'organization/read.html'
def _history_template(self, group_type):
return lookup_group_plugin(group_type).history_template()
def _edit_template(self, group_type):
return 'organization/edit.html'
def _activity_template(self, group_type):
return 'organization/activity_stream.html'
def _guess_group_type(self, expecting_name=False):
return 'organization'
|
py | b411fa132b5d47aeaaf96bf3f7db7d06c70719ef | from app import app
from importlib import import_module
from app.framework.routes.route_action import getFunction, getModule
class Route:
def get(self, url, view_function, name=None, middleware=None):
if isinstance(view_function, str):
try:
function = import_module(str(getModule(view_function)))
res = getattr(function, str(getFunction(view_function)))()
except:
raise ImportError('Module:', view_function, 'not found')
else:
res = view_function
middleware()
app.add_url_rule(url, name, view_func=res, methods=['GET'])
def post(self, url, view_function, name=None, middleware=None):
if isinstance(view_function, str):
try:
function = import_module(str(getModule(view_function)))
res = getattr(function, str(getFunction(view_function)))()
except:
raise ImportError('Module:', view_function, 'not found')
else:
res = view_function
app.add_url_rule(url, name, view_func=res, methods=['POST'])
route = Route() |
py | b411facafcd4f482910041583e67e5c9bbea9c0b | # coding: utf-8
from __future__ import absolute_import, unicode_literals
from itertools import chain
import sys
from enum import EnumMeta
from six import reraise
from six.moves import map # pylint:disable=redefined-builtin
from .ordered_dict import OrderedDict
__all__ = list(map(str, ['ExtendableEnumMeta']))
class ExtendableEnumMeta(EnumMeta):
"""A metaclass for enum hierarchies.
This allows you to define hierarchies such as this:
from box.util.compat import with_metaclass
class EnumBase(with_metaclass(ExtendableEnumMeta, Enum)): pass
class Enum1(EnumBase):
A = 'A'
class Enum2(EnumBase): pass
class Enum2_1(Enum2):
B = 'B'
class Enum2_2(Enum2):
C = 'C'
and have all members be accessible on EnumBase (as well as have all members
of Enum2_1 and Enum2_2 be available on Enum2) as if they had been defined
there.
Non-leaf classes still may not have members directly defined on them, as
with standard enums.
Most of the usual enum magic methods are extended: __contains__, __dir__,
__getitem__, __getattr__, __iter__, __len__, and __reversed__. Only __new__
is not extended; instead, a new method `lookup` is provided. The
__members__ property is also extended.
"""
def lookup(cls, value):
"""Custom value lookup, which does recursive lookups on subclasses.
If this is a leaf enum class with defined members, this acts the same
as __new__().
But if this is a base class with no defined members of its own, it
tries doing a value lookup on all its subclasses until it finds the
value.
NOTE: Because of the implementation details of Enum, this must be a new
classmethod, and can't be implemented as __new__() [1].
[1] <https://docs.python.org/3.5/library/enum.html#finer-points>
:param value:
The value to look up. Can be a value, or an enum instance.
:type value:
`varies`
:raises:
:class:`ValueError` if the value isn't found anywhere.
"""
try:
return cls(value)
except ValueError:
exc_info = sys.exc_info()
for subclass in cls.__subclasses__():
try:
return subclass.lookup(value)
except ValueError:
pass
# This needs to be `reraise()`, and not just `raise`. Otherwise,
# the inner exception from the previous line is re-raised, which
# isn't desired.
reraise(*exc_info)
@property
def __members__(cls):
members = OrderedDict(super(ExtendableEnumMeta, cls).__members__)
for subclass in cls.__subclasses__():
members.update(subclass.__members__)
return members
def __contains__(cls, member):
if super(ExtendableEnumMeta, cls).__contains__(member):
return True
def in_(subclass):
return member in subclass
return any(map(in_, cls.__subclasses__()))
def __dir__(cls):
return list(set(super(ExtendableEnumMeta, cls).__dir__()).union(*map(dir, cls.__subclasses__())))
def __getitem__(cls, name):
try:
return super(ExtendableEnumMeta, cls).__getitem__(name)
except KeyError:
exc_info = sys.exc_info()
for subclass in cls.__subclasses__():
try:
return subclass[name]
except KeyError:
pass
# This needs to be `reraise()`, and not just `raise`. Otherwise,
# the inner exception from the previous line is re-raised, which
# isn't desired.
reraise(*exc_info)
def __getattr__(cls, name):
try:
return super(ExtendableEnumMeta, cls).__getattr__(name)
except AttributeError:
exc_info = sys.exc_info()
try:
# If the super() call fails, don't call getattr() on all of the
# subclasses. Instead, use __getitem__ to do this. This is
# because we don't want to grab arbitrary attributes from
# subclasses, only enum members. For enum members, __getattr__
# and __getitem__ have the same behavior. And __getitem__ has
# the advantage of never grabbing anything other than enum
# members.
return cls[name]
except KeyError:
pass
# This needs to be `reraise()`, and not just `raise`. Otherwise,
# the inner exception from the previous line is re-raised, which
# isn't desired.
reraise(*exc_info)
def __iter__(cls):
return chain(super(ExtendableEnumMeta, cls).__iter__(), chain.from_iterable(map(iter, cls.__subclasses__())))
def __len__(cls):
return super(ExtendableEnumMeta, cls).__len__() + sum(map(len, cls.__subclasses__()))
def __reversed__(cls):
return reversed(list(cls))
|
py | b411fb01d156ed758bc0a91604a68b3a9943fa32 | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from facebook_business.adobjects.abstractobject import AbstractObject
"""
This class is auto-generated.
For any issues or feature requests related to this class, please let us know on
github and we'll fix in our codegen framework. We'll not be able to accept
pull request for this class.
"""
class KeyValue(
AbstractObject,
):
def __init__(self, api=None):
super(KeyValue, self).__init__()
self._isKeyValue = True
self._api = api
class Field(AbstractObject.Field):
key = 'key'
value = 'value'
_field_types = {
'key': 'string',
'value': 'string',
}
@classmethod
def _get_field_enum_info(cls):
field_enum_info = {}
return field_enum_info
|
py | b411fb34ec753e5bbd59aa126fd9133fc185cb6e | import logging
import random
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
from transformers import AdamW
from torch.optim import Adam
from tensorize import CorefDataProcessor
import util
import time
from os.path import join
from metrics import CorefEvaluator
from datetime import datetime
from torch.optim.lr_scheduler import LambdaLR
from model import CorefModel
import conll
import sys
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger()
class Runner:
def __init__(self, config_name, gpu_id=0, seed=None, config_dir="./"):
self.name = config_name
self.name_suffix = datetime.now().strftime('%b%d_%H-%M-%S')
self.gpu_id = gpu_id
self.seed = seed
# Set up config
self.config = util.initialize_config(config_name, config_dir)
# Set up logger
log_path = join(self.config['log_dir'], 'log_' + self.name_suffix + '.txt')
logger.addHandler(logging.FileHandler(log_path, 'a'))
logger.info('Log file path: %s' % log_path)
# Set up seed
if seed:
util.set_seed(seed)
# Set up device
self.device = torch.device('cpu' if gpu_id is None else f'cuda:{gpu_id}')
# Set up data
self.data = CorefDataProcessor(self.config)
def initialize_model(self, saved_suffix=None):
model = CorefModel(self.config, self.device)
if saved_suffix:
self.load_model_checkpoint(model, saved_suffix)
return model
def train(self, model):
conf = self.config
logger.info(conf)
epochs, grad_accum = conf['num_epochs'], conf['gradient_accumulation_steps']
model.to(self.device)
logger.info('Model parameters:')
for name, param in model.named_parameters():
logger.info('%s: %s' % (name, tuple(param.shape)))
# Set up tensorboard
tb_path = join(conf['tb_dir'], self.name + '_' + self.name_suffix)
tb_writer = SummaryWriter(tb_path, flush_secs=30)
logger.info('Tensorboard summary path: %s' % tb_path)
# Set up data
examples_train, examples_dev, examples_test = self.data.get_tensor_examples()
stored_info = self.data.get_stored_info()
# Set up optimizer and scheduler
total_update_steps = len(examples_train) * epochs // grad_accum
optimizers = self.get_optimizer(model)
schedulers = self.get_scheduler(optimizers, total_update_steps)
# Get model parameters for grad clipping
bert_param, task_param = model.get_params()
# Start training
logger.info('*******************Training*******************')
logger.info('Num samples: %d' % len(examples_train))
logger.info('Num epochs: %d' % epochs)
logger.info('Gradient accumulation steps: %d' % grad_accum)
logger.info('Total update steps: %d' % total_update_steps)
loss_during_accum = [] # To compute effective loss at each update
loss_during_report = 0.0 # Effective loss during logging step
loss_history = [] # Full history of effective loss; length equals total update steps
max_f1 = 0
start_time = time.time()
model.zero_grad()
early_stopping = conf["early_stopping"]
for epo in range(epochs):
random.shuffle(examples_train) # Shuffle training set
for doc_key, example in examples_train:
# Forward pass
model.train()
example_gpu = [d.to(self.device) for d in example]
_, loss = model(*example_gpu)
# Backward; accumulate gradients and clip by grad norm
if grad_accum > 1:
loss /= grad_accum
loss.backward()
if conf['max_grad_norm']:
torch.nn.utils.clip_grad_norm_(bert_param, conf['max_grad_norm'])
torch.nn.utils.clip_grad_norm_(task_param, conf['max_grad_norm'])
loss_during_accum.append(loss.item())
# Update
if len(loss_during_accum) % grad_accum == 0:
for optimizer in optimizers:
optimizer.step()
model.zero_grad()
for scheduler in schedulers:
scheduler.step()
# Compute effective loss
effective_loss = np.sum(loss_during_accum).item()
loss_during_accum = []
loss_during_report += effective_loss
loss_history.append(effective_loss)
# Report
if len(loss_history) % conf['report_frequency'] == 0:
# Show avg loss during last report interval
avg_loss = loss_during_report / conf['report_frequency']
loss_during_report = 0.0
end_time = time.time()
logger.info('Step %d: avg loss %.2f; steps/sec %.2f' %
(len(loss_history), avg_loss, conf['report_frequency'] / (end_time - start_time)))
start_time = end_time
tb_writer.add_scalar('Training_Loss', avg_loss, len(loss_history))
tb_writer.add_scalar('Learning_Rate_Bert', schedulers[0].get_last_lr()[0], len(loss_history))
tb_writer.add_scalar('Learning_Rate_Task', schedulers[1].get_last_lr()[-1], len(loss_history))
# Evaluate
if len(loss_history) > 0 and len(loss_history) % conf['eval_frequency'] == 0:
f1, _ = self.evaluate(model, examples_dev, stored_info, len(loss_history), official=False, conll_path=self.config['conll_eval_path'], tb_writer=tb_writer)
if f1 > max_f1:
max_f1 = f1
self.save_model_checkpoint(model, len(loss_history))
early_stopping = conf["early_stopping"]
else:
early_stopping -= 1
logger.info('Eval max f1: %.2f' % max_f1)
start_time = time.time()
if early_stopping == 0:
break
logger.info('**********Finished training**********')
logger.info('Actual update steps: %d' % len(loss_history))
# Wrap up
tb_writer.close()
return loss_history
def evaluate(self, model, tensor_examples, stored_info, step, official=False, conll_path=None, tb_writer=None):
logger.info('Step %d: evaluating on %d samples...' % (step, len(tensor_examples)))
model.to(self.device)
evaluator = CorefEvaluator()
doc_to_prediction = {}
model.eval()
for i, (doc_key, tensor_example) in enumerate(tensor_examples):
gold_clusters = stored_info['gold'][doc_key]
tensor_example = tensor_example[:7] # Strip out gold
example_gpu = [d.to(self.device) for d in tensor_example]
with torch.no_grad():
_, _, _, span_starts, span_ends, antecedent_idx, antecedent_scores = model(*example_gpu)
span_starts, span_ends = span_starts.tolist(), span_ends.tolist()
antecedent_idx, antecedent_scores = antecedent_idx.tolist(), antecedent_scores.tolist()
predicted_clusters = model.update_evaluator(span_starts, span_ends, antecedent_idx, antecedent_scores, gold_clusters, evaluator)
doc_to_prediction[doc_key] = predicted_clusters
p, r, f = evaluator.get_prf()
metrics = {'Eval_Avg_Precision': p * 100, 'Eval_Avg_Recall': r * 100, 'Eval_Avg_F1': f * 100}
for name, score in metrics.items():
logger.info('%s: %.2f' % (name, score))
if tb_writer:
tb_writer.add_scalar(name, score, step)
if official:
conll_results = conll.evaluate_conll(conll_path, doc_to_prediction, stored_info['subtoken_maps'])
official_f1 = sum(results["f"] for results in conll_results.values()) / len(conll_results)
logger.info('Official avg F1: %.4f' % official_f1)
return f * 100, metrics
def predict(self, model, tensor_examples):
logger.info('Predicting %d samples...' % len(tensor_examples))
model.to(self.device)
predicted_spans, predicted_antecedents, predicted_clusters = [], [], []
model.eval()
for i, (doc_key, tensor_example) in enumerate(tensor_examples):
tensor_example = tensor_example[:7]
example_gpu = [d.to(self.device) for d in tensor_example]
with torch.no_grad():
_, _, _, span_starts, span_ends, antecedent_idx, antecedent_scores = model(*example_gpu)
span_starts, span_ends = span_starts.tolist(), span_ends.tolist()
antecedent_idx, antecedent_scores = antecedent_idx.tolist(), antecedent_scores.tolist()
clusters, mention_to_cluster_id, antecedents = model.get_predicted_clusters(span_starts, span_ends, antecedent_idx, antecedent_scores)
spans = [(span_start, span_end) for span_start, span_end in zip(span_starts, span_ends)]
predicted_spans.append(spans)
predicted_antecedents.append(antecedents)
predicted_clusters.append(clusters)
return predicted_clusters, predicted_spans, predicted_antecedents
def get_optimizer(self, model):
no_decay = ['bias', 'LayerNorm.weight']
bert_param, task_param = model.get_params(named=True)
grouped_bert_param = [
{
'params': [p for n, p in bert_param if not any(nd in n for nd in no_decay)],
'lr': self.config['bert_learning_rate'],
'weight_decay': self.config['adam_weight_decay']
}, {
'params': [p for n, p in bert_param if any(nd in n for nd in no_decay)],
'lr': self.config['bert_learning_rate'],
'weight_decay': 0.0
}
]
optimizers = [
AdamW(grouped_bert_param, lr=self.config['bert_learning_rate'], eps=self.config['adam_eps']),
Adam(model.get_params()[1], lr=self.config['task_learning_rate'], eps=self.config['adam_eps'], weight_decay=0)
]
return optimizers
# grouped_parameters = [
# {
# 'params': [p for n, p in bert_param if not any(nd in n for nd in no_decay)],
# 'lr': self.config['bert_learning_rate'],
# 'weight_decay': self.config['adam_weight_decay']
# }, {
# 'params': [p for n, p in bert_param if any(nd in n for nd in no_decay)],
# 'lr': self.config['bert_learning_rate'],
# 'weight_decay': 0.0
# }, {
# 'params': [p for n, p in task_param if not any(nd in n for nd in no_decay)],
# 'lr': self.config['task_learning_rate'],
# 'weight_decay': self.config['adam_weight_decay']
# }, {
# 'params': [p for n, p in task_param if any(nd in n for nd in no_decay)],
# 'lr': self.config['task_learning_rate'],
# 'weight_decay': 0.0
# }
# ]
# optimizer = AdamW(grouped_parameters, lr=self.config['task_learning_rate'], eps=self.config['adam_eps'])
# return optimizer
def get_scheduler(self, optimizers, total_update_steps):
# Only warm up bert lr
warmup_steps = int(total_update_steps * self.config['warmup_ratio'])
def lr_lambda_bert(current_step):
if current_step < warmup_steps:
return float(current_step) / float(max(1, warmup_steps))
return max(
0.0, float(total_update_steps - current_step) / float(max(1, total_update_steps - warmup_steps))
)
def lr_lambda_task(current_step):
return max(0.0, float(total_update_steps - current_step) / float(max(1, total_update_steps)))
schedulers = [
LambdaLR(optimizers[0], lr_lambda_bert),
LambdaLR(optimizers[1], lr_lambda_task)
]
return schedulers
# return LambdaLR(optimizer, [lr_lambda_bert, lr_lambda_bert, lr_lambda_task, lr_lambda_task])
def save_model_checkpoint(self, model, step):
path_ckpt = join(self.config['log_dir'], f'model_{self.name_suffix}_{step}.bin')
torch.save(model.state_dict(), path_ckpt)
logger.info('Saved model to %s' % path_ckpt)
def load_model_checkpoint(self, model, suffix):
path_ckpt = join(self.config['log_dir'], f'model_{suffix}.bin')
model.load_state_dict(torch.load(path_ckpt, map_location=torch.device('cpu')), strict=False)
logger.info('Loaded model from %s' % path_ckpt)
if __name__ == '__main__':
config_name, gpu_id, config_dir = sys.argv[1], int(sys.argv[2]), sys.argv[3]
saved_suffix=None
if len(sys.argv) == 5:
saved_suffix = sys.argv[4]
runner = Runner(config_name, gpu_id, config_dir=config_dir)
model = runner.initialize_model(saved_suffix=saved_suffix)
runner.train(model)
|
py | b411fba75e65008bc9b40cf7cddc036c33e3a990 | #!"C:\Users\jacoe\Documents\Progetti\Progetti Web\EVoting-TW\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.7')()
)
|
py | b411fbfcc9f469b78be2f773c1fa878ad06bf799 | from .BaseNetModels import *
from .SharedModels import * |
py | b411fca07bbaab787d8b345a05ca4d0e9018c829 | import os
import numpy as np
def parse_model_cfg(path: str):
# 检查文件是否存在
if not path.endswith(".cfg") or not os.path.exists(path):
raise FileNotFoundError("the cfg file not exist...")
# 读取文件信息
with open(path, "r") as f:
lines = f.read().split("\n")
# 去除空行和注释行
lines = [x for x in lines if x and not x.startswith("#")]
# 去除每行开头和结尾的空格符
lines = [x.strip() for x in lines]
mdefs = [] # module definitions
for line in lines:
if line.startswith("["): # this marks the start of a new block
mdefs.append({})
mdefs[-1]["type"] = line[1:-1].strip() # 记录module类型
# 如果是卷积模块,设置默认不使用BN(普通卷积层后面会重写成1,最后的预测层conv保持为0)
if mdefs[-1]["type"] == "convolutional":
mdefs[-1]["batch_normalize"] = 0
else:
key, val = line.split("=")
key = key.strip()
val = val.strip()
if key == "anchors":
# anchors = 10,13, 16,30, 33,23, 30,61, 62,45, 59,119, 116,90, 156,198, 373,326
val = val.replace(" ", "") # 将空格去除
mdefs[-1][key] = np.array([float(x) for x in val.split(",")]).reshape((-1, 2)) # np anchors
elif (key in ["from", "layers", "mask"]) or (key == "size" and "," in val):
mdefs[-1][key] = [int(x) for x in val.split(",")]
else:
# TODO: .isnumeric() actually fails to get the float case
if val.isnumeric(): # return int or float 如果是数值的情况
mdefs[-1][key] = int(val) if (int(val) - float(val)) == 0 else float(val)
else:
mdefs[-1][key] = val # return string 是字符的情况
# check all fields are supported
supported = ['type', 'batch_normalize', 'filters', 'size', 'stride', 'pad', 'activation', 'layers', 'groups',
'from', 'mask', 'anchors', 'classes', 'num', 'jitter', 'ignore_thresh', 'truth_thresh', 'random',
'stride_x', 'stride_y', 'weights_type', 'weights_normalization', 'scale_x_y', 'beta_nms', 'nms_kind',
'iou_loss', 'iou_normalizer', 'cls_normalizer', 'iou_thresh', 'probability']
# 遍历检查每个模型的配置
for x in mdefs[1:]: # 0对应net配置
# 遍历每个配置字典中的key值
for k in x:
if k not in supported:
raise ValueError("Unsupported fields:{} in cfg".format(k))
return mdefs
def parse_data_cfg(path):
# Parses the data configuration file
if not os.path.exists(path) and os.path.exists('data' + os.sep + path): # add data/ prefix if omitted
path = 'data' + os.sep + path
with open(path, 'r') as f:
lines = f.readlines()
options = dict()
for line in lines:
line = line.strip()
if line == '' or line.startswith('#'):
continue
key, val = line.split('=')
options[key.strip()] = val.strip()
return options
|
py | b411fd1adafd2790c8817a1d6f2424c5367d6033 |
import string
import operator as op
import numpy as np
from . import nodal_corrections as nc
from functools import reduce
class BaseConstituent(object):
xdo_int = {
'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'H': 8, 'I': 9,
'J': 10, 'K': 11, 'L': 12, 'M': 13, 'N': 14, 'O': 15, 'P': 16, 'Q': 17,
'R': -8, 'S': -7, 'T': -6, 'U': -5, 'V': -4, 'W': -3, 'X': -2, 'Y': -1,
'Z': 0
}
int_xdo = {v:k for k, v in xdo_int.items()}
def __init__(self, name, xdo='', coefficients=[], u=nc.u_zero, f=nc.f_unity):
if xdo == '':
self.coefficients = np.array(coefficients)
else:
self.coefficients = np.array(self.xdo_to_coefficients(xdo))
self.name = name
self.u = u
self.f = f
def xdo_to_coefficients(self, xdo):
return [self.xdo_int[l.upper()] for l in xdo if l in string.ascii_letters]
def coefficients_to_xdo(self, coefficients):
return ''.join([self.int_xdo[c] for c in cooefficients])
def V(self, astro):
return np.dot(self.coefficients, self.astro_values(astro))
def xdo(self):
return self.coefficients_to_xdo(self.coefficients)
def speed(self, a):
return np.dot(self.coefficients, self.astro_speeds(a))
def astro_xdo(self, a):
return [a['T+h-s'], a['s'], a['h'], a['p'], a['N'], a['pp'], a['90']]
def astro_speeds(self, a):
return np.array([each.speed for each in self.astro_xdo(a)])
def astro_values(self, a):
return np.array([each.value for each in self.astro_xdo(a)])
#Consider two out of phase constituents which travel at the same speed to
#be identical
def __eq__(self, c):
return np.all(self.coefficients[:-1] == c.coefficients[:-1])
def __hash__(self):
return hash(tuple(self.coefficients[:-1]))
class CompoundConstituent(BaseConstituent):
def __init__(self, members = [], **kwargs):
self.members = members
if 'u' not in kwargs:
kwargs['u'] = self.u
if 'f' not in kwargs:
kwargs['f'] = self.f
super(CompoundConstituent,self).__init__(**kwargs)
self.coefficients = reduce(op.add,[c.coefficients * n for (c,n) in members])
def speed(self, a):
return reduce(op.add, [n * c.speed(a) for (c,n) in self.members])
def V(self, a):
return reduce(op.add, [n * c.V(a) for (c,n) in self.members])
def u(self, a):
return reduce(op.add, [n * c.u(a) for (c,n) in self.members])
def f(self, a):
return reduce(op.mul, [c.f(a) ** abs(n) for (c,n) in self.members])
###### Base Constituents
#Long Term
_Z0 = BaseConstituent(name = 'Z0', xdo = 'Z ZZZ ZZZ', u = nc.u_zero, f = nc.f_unity)
_Sa = BaseConstituent(name = 'Sa', xdo = 'Z ZAZ ZZZ', u = nc.u_zero, f = nc.f_unity)
_Ssa = BaseConstituent(name = 'Ssa', xdo = 'Z ZBZ ZZZ', u = nc.u_zero, f = nc.f_unity)
_Mm = BaseConstituent(name = 'Mm', xdo = 'Z AZY ZZZ', u = nc.u_zero, f = nc.f_Mm)
_Mf = BaseConstituent(name = 'Mf', xdo = 'Z BZZ ZZZ', u = nc.u_Mf, f = nc.f_Mf)
#Diurnals
_Q1 = BaseConstituent(name = 'Q1', xdo = 'A XZA ZZA', u = nc.u_O1, f = nc.f_O1)
_O1 = BaseConstituent(name = 'O1', xdo = 'A YZZ ZZA', u = nc.u_O1, f = nc.f_O1)
_K1 = BaseConstituent(name = 'K1', xdo = 'A AZZ ZZY', u = nc.u_K1, f = nc.f_K1)
_J1 = BaseConstituent(name = 'J1', xdo = 'A BZY ZZY', u = nc.u_J1, f = nc.f_J1)
#M1 is a tricky business for reasons of convention, rather than theory. The
#reasons for this are best summarised by Schureman paragraphs 126, 127 and in
#the comments found in congen_input.txt of xtides, so I won't go over all this
#again here.
_M1 = BaseConstituent(name = 'M1', xdo = 'A ZZZ ZZA', u = nc.u_M1, f = nc.f_M1)
_P1 = BaseConstituent(name = 'P1', xdo = 'A AXZ ZZA', u = nc.u_zero, f = nc.f_unity)
_S1 = BaseConstituent(name = 'S1', xdo = 'A AYZ ZZZ', u = nc.u_zero, f = nc.f_unity)
_OO1 = BaseConstituent(name = 'OO1', xdo = 'A CZZ ZZY', u = nc.u_OO1, f = nc.f_OO1)
#Semi-Diurnals
_2N2 = BaseConstituent(name = '2N2', xdo = 'B XZB ZZZ', u = nc.u_M2, f = nc.f_M2)
_N2 = BaseConstituent(name = 'N2', xdo = 'B YZA ZZZ', u = nc.u_M2, f = nc.f_M2)
_nu2 = BaseConstituent(name = 'nu2', xdo = 'B YBY ZZZ', u = nc.u_M2, f = nc.f_M2)
_M2 = BaseConstituent(name = 'M2', xdo = 'B ZZZ ZZZ', u = nc.u_M2, f = nc.f_M2)
_lambda2 = BaseConstituent(name = 'lambda2', xdo = 'B AXA ZZB', u = nc.u_M2, f = nc.f_M2)
_L2 = BaseConstituent(name = 'L2', xdo = 'B AZY ZZB', u = nc.u_L2, f = nc.f_L2)
_T2 = BaseConstituent(name = 'T2', xdo = 'B BWZ ZAZ', u = nc.u_zero, f = nc.f_unity)
_S2 = BaseConstituent(name = 'S2', xdo = 'B BXZ ZZZ', u = nc.u_zero, f = nc.f_unity)
_R2 = BaseConstituent(name = 'R2', xdo = 'B BYZ ZYB', u = nc.u_zero, f = nc.f_unity)
_K2 = BaseConstituent(name = 'K2', xdo = 'B BZZ ZZZ', u = nc.u_K2, f = nc.f_K2)
#Third-Diurnals
_M3 = BaseConstituent(name = 'M3', xdo = 'C ZZZ ZZZ', u = lambda a: nc.u_Modd(a,3), f = lambda a: nc.f_Modd(a,3))
###### Compound Constituents
#Long Term
_MSF = CompoundConstituent(name = 'MSF', members = [(_S2, 1), (_M2, -1)])
#Diurnal
_2Q1 = CompoundConstituent(name = '2Q1', members = [(_N2, 1), (_J1, -1)])
_rho1 = CompoundConstituent(name = 'rho1', members = [(_nu2, 1), (_K1, -1)])
#Semi-Diurnal
_mu2 = CompoundConstituent(name = 'mu2', members = [(_M2, 2), (_S2, -1)]) #2MS2
_2SM2 = CompoundConstituent(name = '2SM2', members = [(_S2, 2), (_M2, -1)])
#Third-Diurnal
_2MK3 = CompoundConstituent(name = '2MK3', members = [(_M2, 1), (_O1, 1)])
_MK3 = CompoundConstituent(name = 'MK3', members = [(_M2, 1), (_K1, 1)])
#Quarter-Diurnal
_MN4 = CompoundConstituent(name = 'MN4', members = [(_M2, 1), (_N2, 1)])
_M4 = CompoundConstituent(name = 'M4', members = [(_M2, 2)])
_MS4 = CompoundConstituent(name = 'MS4', members = [(_M2, 1), (_S2, 1)])
_S4 = CompoundConstituent(name = 'S4', members = [(_S2, 2)])
#Sixth-Diurnal
_M6 = CompoundConstituent(name = 'M6', members = [(_M2, 3)])
_S6 = CompoundConstituent(name = 'S6', members = [(_S2, 3)])
#Eighth-Diurnals
_M8 = CompoundConstituent(name = 'M8', members = [(_M2, 4)])
noaa = [
_M2, _S2, _N2, _K1, _M4, _O1, _M6, _MK3, _S4, _MN4, _nu2, _S6, _mu2,
_2N2, _OO1, _lambda2, _S1, _M1, _J1, _Mm, _Ssa, _Sa, _MSF, _Mf,
_rho1, _Q1, _T2, _R2, _2Q1, _P1, _2SM2, _M3, _L2, _2MK3, _K2,
_M8, _MS4
]
|
py | b411fdb17fc7787ce5ce8338366d8789ed2717bc | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworksOperations:
"""VirtualNetworksOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
virtual_network_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_network_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
async def get(
self,
resource_group_name: str,
virtual_network_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.VirtualNetwork":
"""Gets the specified virtual network by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetwork, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_06_01.models.VirtualNetwork
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
virtual_network_name: str,
parameters: "_models.VirtualNetwork",
**kwargs
) -> "_models.VirtualNetwork":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetwork')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
virtual_network_name: str,
parameters: "_models.VirtualNetwork",
**kwargs
) -> AsyncLROPoller["_models.VirtualNetwork"]:
"""Creates or updates a virtual network in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param parameters: Parameters supplied to the create or update virtual network operation.
:type parameters: ~azure.mgmt.network.v2018_06_01.models.VirtualNetwork
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualNetwork or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_06_01.models.VirtualNetwork]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
virtual_network_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> "_models.VirtualNetwork":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
virtual_network_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> AsyncLROPoller["_models.VirtualNetwork"]:
"""Updates a virtual network tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param parameters: Parameters supplied to update virtual network tags.
:type parameters: ~azure.mgmt.network.v2018_06_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualNetwork or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2018_06_01.models.VirtualNetwork]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["_models.VirtualNetworkListResult"]:
"""Gets all virtual networks in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_06_01.models.VirtualNetworkListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworks'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.VirtualNetworkListResult"]:
"""Gets all virtual networks in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_06_01.models.VirtualNetworkListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks'} # type: ignore
async def check_ip_address_availability(
self,
resource_group_name: str,
virtual_network_name: str,
ip_address: Optional[str] = None,
**kwargs
) -> "_models.IPAddressAvailabilityResult":
"""Checks whether a private IP address is available for use.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param ip_address: The private IP address to be verified.
:type ip_address: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IPAddressAvailabilityResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_06_01.models.IPAddressAvailabilityResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IPAddressAvailabilityResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
# Construct URL
url = self.check_ip_address_availability.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if ip_address is not None:
query_parameters['ipAddress'] = self._serialize.query("ip_address", ip_address, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IPAddressAvailabilityResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_ip_address_availability.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/CheckIPAddressAvailability'} # type: ignore
def list_usage(
self,
resource_group_name: str,
virtual_network_name: str,
**kwargs
) -> AsyncIterable["_models.VirtualNetworkListUsageResult"]:
"""Lists usage stats.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkListUsageResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2018_06_01.models.VirtualNetworkListUsageResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkListUsageResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_usage.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkListUsageResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_usage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/usages'} # type: ignore
|
py | b411fdf5f2f82dc76029062f71617ae99d2a59e3 | # coding: utf-8
"""
server
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DisassociatePublicIpFromServerInstanceRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'public_ip_instance_no': 'str'
}
attribute_map = {
'public_ip_instance_no': 'publicIpInstanceNo'
}
def __init__(self, public_ip_instance_no=None): # noqa: E501
"""DisassociatePublicIpFromServerInstanceRequest - a model defined in Swagger""" # noqa: E501
self._public_ip_instance_no = None
self.discriminator = None
self.public_ip_instance_no = public_ip_instance_no
@property
def public_ip_instance_no(self):
"""Gets the public_ip_instance_no of this DisassociatePublicIpFromServerInstanceRequest. # noqa: E501
공인IP인스턴스번호 # noqa: E501
:return: The public_ip_instance_no of this DisassociatePublicIpFromServerInstanceRequest. # noqa: E501
:rtype: str
"""
return self._public_ip_instance_no
@public_ip_instance_no.setter
def public_ip_instance_no(self, public_ip_instance_no):
"""Sets the public_ip_instance_no of this DisassociatePublicIpFromServerInstanceRequest.
공인IP인스턴스번호 # noqa: E501
:param public_ip_instance_no: The public_ip_instance_no of this DisassociatePublicIpFromServerInstanceRequest. # noqa: E501
:type: str
"""
if public_ip_instance_no is None:
raise ValueError("Invalid value for `public_ip_instance_no`, must not be `None`") # noqa: E501
self._public_ip_instance_no = public_ip_instance_no
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DisassociatePublicIpFromServerInstanceRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b411ff1c809ab03a6a2afeff91c78aea4e6d0b40 | import bz2
import argparse
import os
import json
import re
import sys
FILE_SUFFIX = ".bz2"
OUTPUT_FILE = "output.bz2"
REPORT_FILE = "RC_report.txt"
def main():
assert sys.version_info >= (3, 3), \
"Must be run in Python 3.3 or later. You are running {}".format(sys.version)
parser = argparse.ArgumentParser()
parser.add_argument('--input_file', type=str, default='reddit_data',
help='data file or directory containing bz2 archive of json reddit data')
parser.add_argument('--logdir', type=str, default='output/',
help='directory to save the output and report')
parser.add_argument('--config_file', type=str, default='parser_config_standard.json',
help='json parameters for parsing')
parser.add_argument('--comment_cache_size', type=int, default=1e7,
help='max number of comments to cache in memory before flushing')
parser.add_argument('--output_file_size', type=int, default=2e8,
help='max size of each output file (give or take one conversation)')
parser.add_argument('--print_every', type=int, default=1000,
help='print an update to the screen this often')
parser.add_argument('--min_conversation_length', type=int, default=5,
help='conversations must have at least this many comments for inclusion')
parser.add_argument('--print_subreddit', type=str2bool, nargs='?',
const=False, default=False,
help='set to true to print the name of the subreddit before each conversation'
+ ' to facilitate more convenient blacklisting in the config json file.'
+ ' (Remember to disable before constructing training data.)')
args = parser.parse_args()
parse_main(args)
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'): return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False
else: raise argparse.ArgumentTypeError('Boolean value expected.')
class RedditComment(object):
def __init__(self, json_object, record_subreddit=False):
self.body = json_object['body']
if 'score' in json_object:
self.score = json_object['score']
elif 'ups' in json_object and 'down' in json_object:
self.score = json_object['ups'] - json_object['downs']
else: raise ValueError("Reddit comment did not include a score attribute. "
+ "Comment was as follows: " + json_object)
self.author = json_object['author']
parent_id = json_object['parent_id']
# t1_ prefixes indicate comments. t3_ prefix would indicate a link submission.
if parent_id.startswith('t1_'): self.parent_id = parent_id
else: self.parent_id = None
self.child_id = None
if record_subreddit: self.subreddit = json_object['subreddit']
def parse_main(args):
if not os.path.isfile(args.config_file):
print("File not found: {}".format(args.input_file))
return
with open(args.config_file, 'r') as f:
config = json.load(f)
subreddit_blacklist = set(config['subreddit_blacklist'])
subreddit_whitelist = set(config['subreddit_whitelist'])
substring_blacklist = set(config['substring_blacklist'])
if not os.path.exists(args.input_file):
print("File not found: {}".format(args.input_file))
return
if os.path.isfile(args.logdir):
print("File already exists at output directory location: {}".format(args.logdir))
return
if not os.path.exists(args.logdir):
os.makedirs(args.logdir)
subreddit_dict = {}
comment_dict = {}
raw_data = raw_data_generator(args.input_file)
output_handler = OutputHandler(os.path.join(args.logdir, OUTPUT_FILE), args.output_file_size)
done = False
total_read = 0
while not done:
done, i = read_comments_into_cache(raw_data, comment_dict, args.print_every, args.print_subreddit,
args.comment_cache_size, subreddit_dict, substring_blacklist, subreddit_whitelist, substring_blacklist)
total_read += i
process_comment_cache(comment_dict, args.print_every)
write_comment_cache(comment_dict, output_handler, args.print_every,
args.print_subreddit, args.min_conversation_length)
write_report(os.path.join(args.logdir, REPORT_FILE), subreddit_dict)
comment_dict.clear()
print("\nRead all {:,d} lines from {}.".format(total_read, args.input_file))
def read_comments_into_cache(raw_data, comment_dict, print_every, print_subreddit, comment_cache_size,
subreddit_dict, subreddit_blacklist, subreddit_whitelist, substring_blacklist):
done = False
cache_count = 0
for i, line in enumerate(raw_data):
# Ignore certain kinds of malformed JSON
if len(line) > 1 and (line[-1] == '}' or line[-2] == '}'):
comment = json.loads(line)
if post_qualifies(comment, subreddit_blacklist, # Also preprocesses the post.
subreddit_whitelist, substring_blacklist):
sub = comment['subreddit']
if sub in subreddit_dict:
subreddit_dict[sub] += 1
else: subreddit_dict[sub] = 1
comment_dict[comment['id']] = RedditComment(comment, print_subreddit)
cache_count += 1
if cache_count % print_every == 0:
print("\rCached {:,d} comments".format(cache_count), end='')
sys.stdout.flush()
if cache_count > comment_cache_size: break
else: # raw_data has been exhausted.
done = True
print()
return done, i
def raw_data_generator(path):
if os.path.isdir(path):
for walk_root, walk_dir, walk_files in os.walk(path):
for file_name in walk_files:
file_path = os.path.join(walk_root, file_name)
if file_path.endswith(FILE_SUFFIX):
print("\nReading from {}".format(file_path))
with bz2.open(file_path, "rt") as raw_data:
try:
for line in raw_data: yield line
except IOError:
print("IOError from file {}".format(file_path))
continue
else: print("Skipping file {} (doesn't end with {})".format(file_path, FILE_SUFFIX))
elif os.path.isfile(path):
print("Reading from {}".format(path))
with bz2.open(path, "rt") as raw_data:
for line in raw_data: yield line
class OutputHandler():
def __init__(self, path, output_file_size):
if path.endswith(FILE_SUFFIX):
path = path[:-len(FILE_SUFFIX)]
self.base_path = path
self.output_file_size = output_file_size
self.file_reference = None
def write(self, data):
if self.file_reference is None:
self._get_current_path()
self.file_reference.write(data)
self.current_file_size += len(data)
if self.current_file_size >= self.output_file_size:
self.file_reference.close()
self.file_reference = None
def _get_current_path(self):
i = 1
while True:
path = "{} {}{}".format(self.base_path, i, FILE_SUFFIX)
if not os.path.exists(path): break
i += 1
self.current_path = path
self.current_file_size = 0
self.file_reference = bz2.open(self.current_path, mode="wt")
def post_qualifies(json_object, subreddit_blacklist,
subreddit_whitelist, substring_blacklist):
body = json_object['body']
post_length = len(body)
if post_length < 4 or post_length > 200: return False
subreddit = json_object['subreddit']
if len(subreddit_whitelist) > 0 and subreddit not in subreddit_whitelist: return False
if len(subreddit_blacklist) > 0 and subreddit in subreddit_blacklist: return False
if len(substring_blacklist) > 0:
for substring in substring_blacklist:
if body.find(substring) >= 0: return False
# Preprocess the comment text.
body = re.sub('[ \t\n\r]+', ' ', body) # Replace runs of whitespace with a single space.
body = re.sub('\^', '', body) # Strip out carets.
body = re.sub('\\\\', '', body) # Strip out backslashes.
body = re.sub('<', '<', body) # Replace '<' with '<'
body = re.sub('>', '>', body) # Replace '>' with '>'
body = re.sub('&', '&', body) # Replace '&' with '&'
post_length = len(body)
# Check the length again, now that we've preprocessed it.
if post_length < 4 or post_length > 200: return False
json_object['body'] = body # Save our changes
# Make sure the ID has the 't1_' prefix because that is how child comments refer to their parents.
if not json_object['id'].startswith('t1_'): json_object['id'] = 't1_' + json_object['id']
return True
def process_comment_cache(comment_dict, print_every):
i = 0
for my_id, my_comment in comment_dict.items():
i += 1
if i % print_every == 0:
print("\rProcessed {:,d} comments".format(i), end='')
sys.stdout.flush()
if my_comment.parent_id is not None: # If we're not a top-level post...
if my_comment.parent_id in comment_dict: # ...and the parent is in our data set...
parent = comment_dict[my_comment.parent_id]
if parent.child_id is None: # If my parent doesn't already have a child, adopt me!
parent.child_id = my_id
else: # My parent already has a child.
parent_previous_child = comment_dict[parent.child_id]
if parent.parent_id in comment_dict: # If my grandparent is in our data set...
grandparent = comment_dict[parent.parent_id]
if my_comment.author == grandparent.author:
# If I share an author with grandparent, adopt me!
parent.child_id = my_id
elif (parent_previous_child.author != grandparent.author
and my_comment.score > parent_previous_child.score):
# If the existing child doesn't share an author with grandparent,
# higher score prevails.
parent.child_id = my_id
elif my_comment.score > parent_previous_child.score:
# If there's no grandparent, the higher-score child prevails.
parent.child_id = my_id
else:
# Parent IDs that aren't in the data set get de-referenced.
my_comment.parent_id = None
print()
def write_comment_cache(comment_dict, output_file, print_every,
record_subreddit=False, min_conversation_length=5):
i = 0
prev_print_count = 0
for k, v in comment_dict.items():
if v.parent_id is None and v.child_id is not None:
comment = v
depth = 0
if record_subreddit: output_string = "/r/" + comment.subreddit + '\n'
else: output_string = ""
while comment is not None:
depth += 1
output_string += '> ' + comment.body + '\n'
if comment.child_id in comment_dict:
comment = comment_dict[comment.child_id]
else:
comment = None
if depth >= min_conversation_length:
output_file.write(output_string + '\n')
i += depth
if i > prev_print_count + print_every:
prev_print_count = i
print("\rWrote {:,d} comments".format(i), end='')
sys.stdout.flush()
print()
def write_report(report_file_path, subreddit_dict):
print("Updating subreddit report file")
subreddit_list = sorted(subreddit_dict.items(), key=lambda x: -x[1])
with open(report_file_path, "w") as f:
for item in subreddit_list:
f.write("{}: {}\n".format(*item))
if __name__ == '__main__':
main()
|
py | b411ff7f18f8ca3c2ab74517204e542a609002f1 | #! author : Inzamamul Alam
import os
from typing import Iterator, Dict, Any
from shutil import copyfile
import elasticsearch
import time
import re
from datetime import datetime, timedelta
import re
from pathlib import Path
from datetime import datetime as dt, timedelta
import json
import logging
import sched
import fileinput
import sys
#ES culster configuration--multiple/single cluster setup with authentication
es = elasticsearch.Elasticsearch(
['https://52.77.45.172:9200'],
http_auth = ('elastic','m83CzwCMR6Fyig9afYeL'),
ca_certs= False,
verify_certs= False,
ssl_show_warn= False,
retry_on_timeout = True,
timeout = 30
)
#DNS track file create and get all indexs starts with dns
my_file = Path("log_temp") #needn't use
#stating dns record for resume session
#track_dns = Path("track_dns.txt")
track_dns = Path("/opt/elk-migration/track_dns.txt")
if track_dns.is_file() == False:
#dns_track = open('track_dns.txt', 'a')
dns_track = open('/opt/elk-migration/track_dns.txt', 'a')
for i in reversed(es.indices.get('*')):
if i.startswith("dns") == True:
dns_track.write("%s\n" % i)
dns_track.close()
#track latest file for collect the last timestamp of that file for resume session
def find_latest_file(object):
if object.is_file():
#file_track = open('track_dns.txt', 'r')
file_track = open('/opt/elk-migration/track_dns.txt', 'r')
for line in file_track:
latest_file = line
break
file_track.close()
else:
latest_file = None
return latest_file
#individual file generate for elk migration w.r.t to index name
def write_batch(docs):
#if Path(docs[0]['index']+'.log').is_file() == False:
if Path('/opt/elk-migration/data/'+docs[0]['index']+'.log').is_file() == False:
with open('/opt/elk-migration/data/'+docs[0]['index']+'.log', 'a+') as f:
for item in docs:
f.write("%s resolved: %s %s\n" % (item["message"], item["domain_ip"],item["timestamp"])) #collecting and writing only message, dnslookup
#and ingest timestamp
#if file is create write the others iteration
else :
#with open(docs[0]['index']+'.log', 'a+') as f:
with open('/opt/elk-migration/data/'+docs[0]['index']+'.log', 'a+') as f:
for item in docs:
f.write("%s resolved: %s %s\n" % (item["message"], item["domain_ip"],item["timestamp"]))
#ES query call by querying,sorting and condition based upon on resume session
def query_call(index: str = ' ') -> Iterator[Dict[str, Any]]:
if gte_timestamp is not None:
body = {
'size' : 10000,
"sort" : [
{ "@timestamp" : {"order": "asc"}}
],
"query": {
"bool": {
"must": [
{ "match_all": {} },
{
"range": {
"@timestamp": {
"format": "strict_date_optional_time",
"gte": gte_timestamp,
"lte": dt.now()
}
}
}
],
}
}
}
else:
body = {
'size' : 10000,
"sort" : [
{ "@timestamp" : {"order": "asc"}}
],
"query": { "match_all": {} }
}
iter = 1
scroll = None
while True:
if iter == 1:
try:
res = es.search(index=index, body=body, scroll='1d')
scroll = res['_scroll_id']
except elasticsearch.NotFoundError as err:
print(err)
res = None
except elasticsearch.ElasticsearchException as err:
print(err)
res = None
#pagination use for fetching next page with while loop
else:
try:
res = es.scroll(scroll_id = scroll, scroll = '1d')
scroll = res['_scroll_id']
#if index is not found
except elasticsearch.NotFoundError as err:
print(err)
res = None
#if es cluster connection has problem
except elasticsearch.ElasticsearchException as err:
print(err)
res = None
#if no index occur break all
if not res:
break
yield res['hits']['hits']
iter += 1
#get data and core controller for dns record save
def get_data(indices: Iterator[Dict[str, Any]])->None:
while True:
try:
start = time.perf_counter()
hits = next(indices)
#print("Reading slot:{0}".format(len(hits)))
elapsed = time.perf_counter() - start
#print(f'Time: {elapsed:0.4f} seconds')
#controller for dns record save
if not hits:
with open("/opt/elk-migration/track_dns.txt", "r") as t_file:
for line in t_file:
line = line.rstrip()
if line == track:
mod_line=line.replace(line,line+" Done\n")
else:
mod_line= None
for temp in fileinput.input("/opt/elk-migration/track_dns.txt", inplace=True):
if temp.strip().startswith(track):
if mod_line is not None:
fin_line = mod_line
sys.stdout.write(fin_line)
else:
sys.stdout.write(temp)
return True
#data save into a list
doc = []
for hit in hits:
data = {}
source = hit['_source']
index = hit['_index'] #variable for using cursor which dns index we are now
data['timestamp'] = source['@timestamp']
if 'domainIp' in source:
data['domain_ip'] = source['domainIp']
else:
data['domain_ip'] = None
if 'tags' in source:
data['tags'] = json.dumps(source['tags'])
data['message'] = str(source['message'])
data['index'] = index
doc.append(data)
start = time.perf_counter()
write_batch(doc)
#print("Writing slot:{0}".format(len(doc)))
elapsed = time.perf_counter() - start
#print(f'Time: {elapsed:0.4f} seconds')
#Stop the loop when iteration is closed
except StopIteration:
break
return True
def Reverse(lst):
return [ele for ele in reversed(lst)]
#Main function
def main():
todayIndex = "dns-{today}"
todayTime = dt.now()
todayIndexStr = todayIndex.format(today = todayTime.strftime('%Y.%m.%d'))
#create a list for working in a loop as a list behavior
with open(track_dns) as file:
lines = file.readlines()
lines = [line.rstrip() for line in lines]
#lines = Reverse(lines)
#print (lines)
#time.sleep(10)
#indexLists = ['dns-2020.11.20']
#Main controller
for index in lines:
global track # variable for dns index
global gte_timestamp
track = index
current_file = find_latest_file (track_dns)
#print(current_file)
if Path(current_file.rstrip()+'.log').is_file():
# file exists
with open(current_file.rstrip()+'.log', 'rb') as f:
try: # catch OSError in case of a one line file
f.seek(-2, os.SEEK_END)
while f.read(1) != b'\n':
f.seek(-2, os.SEEK_CUR) #bigdata last record fetch
except OSError:
f.seek(0)
last_line = f.readline().decode()
match = re.search(r'\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{3}Z', last_line) #regex pattern match for extract timestamp for stating timestamp record
gte_timestamp = datetime.strptime(match.group(0), '%Y-%m-%dT%H:%M:%S.%fZ')
print(gte_timestamp)
else:
gte_timestamp = None
try:
es.indices.put_settings(index=index,body= {"index" : {"max_result_window" : 100000 }})
dataFrme = get_data(query_call(str(index)))
except elasticsearch.NotFoundError as err:
continue
if __name__ == "__main__":
format = "%(asctime)s: %(message)s"
logging.basicConfig(format=format, level=logging.INFO,
datefmt="%H:%M:%S")
logging.info("Main : before creating Event")
start = time.perf_counter()
main()
elapsed = time.perf_counter() - start
#print(f'Total Time spend: {elapsed:0.4f} seconds')
logging.info("Main : before running Event") |
py | b411ffd26977e35433e4aa6f32347f9bfc10c77c | """
Example backend that marks requests as done without doing anything.
Used for testing.
"""
from proxytest import backend
from proxytest.context import ProxyTestContext
class FakeException(Exception):
pass
class DummyBackend(backend.AbstractBackend):
name = 'dummy-exception'
def process(self, context: ProxyTestContext):
raise FakeException('Fake exception')
|
py | b41200b60c92b78d53582a961cfc8b645893bf28 | from base import BaseDataSet, BaseDataLoader
from utils import pallete
import numpy as np
import os
import scipy
import torch
from PIL import Image
import cv2
from torch.utils.data import Dataset
from torchvision import transforms
import json
import random
import cv2
class RandomGaussianBlur(object):
def __init__(self, radius=5):
self.radius = radius
def __call__(self, image):
image = np.asarray(image)
if random.random() < 0.5:
image = cv2.GaussianBlur(image, (self.radius, self.radius), 0)
image = transforms.functional.to_pil_image(image)
return image
class PairThermalDataset(BaseDataSet):
def __init__(self, **kwargs):
self.num_classes = 17
self.void_classes = [0]
self.valid_classes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
self.ignore_index = 255
self.class_map = dict(zip(self.valid_classes, range(17)))
self.datalist = kwargs.pop("datalist")
self.stride = kwargs.pop('stride')
self.iou_bound = kwargs.pop('iou_bound')
self.palette = pallete.get_voc_pallete(self.num_classes)
super(PairThermalDataset, self).__init__(**kwargs)
self.train_transform = transforms.Compose([
transforms.ToPILImage(),
RandomGaussianBlur(),
transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.ToTensor(),
self.normalize,
])
def _set_files(self):
prefix = "dataloaders/thermal_splits{}".format(self.datalist)
if self.split == "val":
file_list = os.path.join(prefix, f"{self.split}" + ".txt")
elif self.split in ["train_supervised", "train_unsupervised"]:
file_list = os.path.join(prefix, f"{self.n_labeled_examples}_{self.split}" + ".txt")
else:
raise ValueError(f"Invalid split name {self.split}")
file_list = [line.rstrip().split(' ') for line in tuple(open(file_list, "r"))]
self.files, self.labels = list(zip(*file_list))
def _load_data(self, index):
image_path = os.path.join(self.root, self.files[index][:][1:])
image = np.asarray(Image.open(image_path), dtype=np.float32)
image_id = self.files[index].split("/")[-1].split(".")[0]
if self.use_weak_lables:
label_path = os.path.join(self.weak_labels_output, image_id+".png")
else:
label_path = os.path.join(self.root, self.labels[index][:][1:])
label = np.asarray(Image.open(label_path), dtype=np.int32)
label = self.encode_segmap(np.array(label, dtype=np.uint8))
return image, label, image_id
def __getitem__(self, index):
image_path = os.path.join(self.root, self.files[index][:][1:])
image = np.asarray(Image.open(image_path))
if self.use_weak_lables:
label_path = os.path.join(self.weak_labels_output, image_id+".png")
else:
label_path = os.path.join(self.root, self.labels[index][:][1:])
label = np.asarray(Image.open(label_path), dtype=np.int32)
h, w, _ = image.shape
longside = random.randint(int(self.base_size*0.8), int(self.base_size*2.0))
h, w = (longside, int(1.0 * longside * w / h + 0.5)) if h > w else (int(1.0 * longside * h / w + 0.5), longside)
image = np.asarray(Image.fromarray(np.uint8(image)).resize((w, h), Image.BICUBIC))
label = cv2.resize(label, (w, h), interpolation=cv2.INTER_NEAREST)
crop_h, crop_w = self.crop_size, self.crop_size
pad_h = max(0, crop_h - h)
pad_w = max(0, crop_w - w)
pad_kwargs = {
"top": 0,
"bottom": pad_h,
"left": 0,
"right": pad_w,
"borderType": cv2.BORDER_CONSTANT,}
if pad_h > 0 or pad_w > 0:
image = cv2.copyMakeBorder(image, value=self.image_padding, **pad_kwargs)
label = cv2.copyMakeBorder(label, value=self.ignore_index, **pad_kwargs)
x1 = random.randint(0, w+pad_w-crop_w)
y1 = random.randint(0, h+pad_h-crop_h)
max_iters = 50
k = 0
while k < max_iters:
x2 = random.randint(0, w+pad_w-crop_w)
y2 = random.randint(0, h+pad_h-crop_h)
# crop relative coordinates should be a multiple of 8
x2 = (x2-x1) // self.stride * self.stride + x1
y2 = (y2-y1) // self.stride * self.stride + y1
if x2 < 0: x2 += self.stride
if y2 < 0: y2 += self.stride
if (crop_w - abs(x2-x1)) > 0 and (crop_h - abs(y2-y1)) > 0:
inter = (crop_w - abs(x2-x1)) * (crop_h - abs(y2-y1))
union = 2*crop_w*crop_h - inter
iou = inter / union
if iou >= self.iou_bound[0] and iou <= self.iou_bound[1]:
break
k += 1
if k == max_iters:
x2 = x1
y2 = y1
overlap1_ul = [max(0, y2-y1), max(0, x2-x1)]
overlap1_br = [min(self.crop_size, self.crop_size+y2-y1, h//self.stride * self.stride), min(self.crop_size, self.crop_size+x2-x1, w//self.stride * self.stride)]
overlap2_ul = [max(0, y1-y2), max(0, x1-x2)]
overlap2_br = [min(self.crop_size, self.crop_size+y1-y2, h//self.stride * self.stride), min(self.crop_size, self.crop_size+x1-x2, w//self.stride * self.stride)]
try:
assert (overlap1_br[0]-overlap1_ul[0]) * (overlap1_br[1]-overlap1_ul[1]) == (overlap2_br[0]-overlap2_ul[0]) * (overlap2_br[1]-overlap2_ul[1])
assert overlap1_br[0] >= 0 and overlap1_ul[0] >= 0 and overlap1_br[1] >= 0 and overlap1_ul[1] >= 0
assert overlap2_br[0] >= 0 and overlap2_ul[0] >= 0 and overlap2_br[1] >= 0 and overlap2_ul[1] >= 0
except:
print("k: {}".format(k))
print("h: {}, w: {}".format(h, w))
print("image.shape: ", image.shape)
print("x1: {}, x2: {}, y1: {}, y2: {}".format(x1, x2, y1, y2))
print("image_path:", image_path)
print("ul1: ", overlap1_ul)
print("br1: ", overlap1_br)
print("ul2: ", overlap2_ul)
print("br2: ", overlap2_br)
print("index: ", index)
exit()
image1 = image[y1:y1+self.crop_size, x1:x1+self.crop_size].copy()
image2 = image[y2:y2+self.crop_size, x2:x2+self.crop_size].copy()
label1 = label[y1:y1+self.crop_size, x1:x1+self.crop_size].copy()
label2 = label[y2:y2+self.crop_size, x2:x2+self.crop_size].copy()
try:
assert image1[overlap1_ul[0]:overlap1_br[0], overlap1_ul[1]:overlap1_br[1]].shape == image2[overlap2_ul[0]:overlap2_br[0], overlap2_ul[1]:overlap2_br[1]].shape
except:
print("k: {}".format(k))
print("h: {}, w: {}".format(h, w))
print("image.shape: ", image.shape)
print("x1: {}, x2: {}, y1: {}, y2: {}".format(x1, x2, y1, y2))
print("image_path:", image_path)
print("ul1: ", overlap1_ul)
print("br1: ", overlap1_br)
print("ul2: ", overlap2_ul)
print("br2: ", overlap2_br)
print("index: ", index)
exit()
flip1 = False
if random.random() < 0.5:
image1 = np.fliplr(image1)
label1 = np.fliplr(label1)
flip1 = True
flip2 = False
if random.random() < 0.5:
image2 = np.fliplr(image2)
label2 = np.fliplr(label2)
flip2 = True
flip = [flip1, flip2]
image1 = self.train_transform(image1)
image2 = self.train_transform(image2)
images = torch.stack([image1, image2])
labels = torch.from_numpy(np.stack([label1, label2]))
return images, labels, overlap1_ul, overlap1_br, overlap2_ul, overlap2_br, flip
def decode_segmap(self,temp):
r = temp.copy()
g = temp.copy()
b = temp.copy()
colors = self.get_class_colors()
for l in range(0, self.num_classes):
r[temp == l] = colors[l][0]
g[temp == l] = colors[l][1]
b[temp == l] = colors[l][2]
rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
return rgb
def encode_segmap(self,mask):
# Put all void classes to zero
for _voidc in self.void_classes:
mask[mask == _voidc] = self.ignore_index
for _validc in self.valid_classes:
mask[mask == _validc] = self.class_map[_validc]
return mask
class PairThermal(BaseDataLoader):
def __init__(self, kwargs):
self.MEAN = [0.485, 0.456, 0.406]
self.STD = [0.229, 0.224, 0.225]
self.batch_size = kwargs.pop('batch_size')
kwargs['mean'] = self.MEAN
kwargs['std'] = self.STD
kwargs['ignore_index'] = 255
sampler_shuffle = kwargs.pop('shuffle')
num_workers = kwargs.pop('num_workers')
self.dataset = PairThermalDataset(**kwargs)
shuffle = False
dist_sampler = torch.utils.data.distributed.DistributedSampler(self.dataset, shuffle=sampler_shuffle)
super(PairThermal, self).__init__(self.dataset, self.batch_size, shuffle, num_workers, val_split=None, dist_sampler=dist_sampler)
class ThermalDataset(BaseDataSet):
def __init__(self, **kwargs):
self.num_classes = 17
self.datalist = kwargs.pop("datalist")
self.palette = pallete.get_voc_pallete(self.num_classes)
self.void_classes = [0]
self.valid_classes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]
self.ignore_index = 255
self.class_map = dict(zip(self.valid_classes, range(17)))
super(ThermalDataset, self).__init__(**kwargs)
def _set_files(self):
prefix = "dataloaders/thermal_splits{}".format(self.datalist)
if self.split == "val":
file_list = os.path.join(prefix, f"{self.split}" + ".txt")
elif self.split in ["train_supervised", "train_unsupervised"]:
file_list = os.path.join(prefix, f"{self.n_labeled_examples}_{self.split}" + ".txt")
else:
raise ValueError(f"Invalid split name {self.split}")
file_list = [line.rstrip().split(' ') for line in tuple(open(file_list, "r"))]
self.files, self.labels = list(zip(*file_list))
def _load_data(self, index):
image_path = os.path.join(self.root, self.files[index][:][1:])
image = np.asarray(Image.open(image_path), dtype=np.float32)
image_id = self.files[index].split("/")[-1].split(".")[0]
if self.use_weak_lables:
label_path = os.path.join(self.weak_labels_output, image_id+".png")
else:
label_path = os.path.join(self.root, self.labels[index][:][1:])
label = np.asarray(Image.open(label_path), dtype=np.int32)
label = self.encode_segmap(np.array(label, dtype=np.uint8))
return image, label, image_id
def decode_segmap(self,temp):
r = temp.copy()
g = temp.copy()
b = temp.copy()
colors = self.get_class_colors()
for l in range(0, self.num_classes):
r[temp == l] = colors[l][0]
g[temp == l] = colors[l][1]
b[temp == l] = colors[l][2]
rgb = np.zeros((temp.shape[0], temp.shape[1], 3))
rgb[:, :, 0] = r / 255.0
rgb[:, :, 1] = g / 255.0
rgb[:, :, 2] = b / 255.0
return rgb
def encode_segmap(self,mask):
# Put all void classes to zero
for _voidc in self.void_classes:
mask[mask == _voidc] = self.ignore_index
for _validc in self.valid_classes:
mask[mask == _validc] = self.class_map[_validc]
return mask
def get_class_colors(*args):
return [[128, 64, 128],
[244, 35, 232],
[220, 20, 60],
[255, 0, 0],
[0, 0, 142],
[0, 0, 70],
[0, 60, 100],
[119, 11, 32],
[70, 70, 70],
[190, 153, 153],
[150, 100, 100],
[153, 153, 153],
[220, 220, 0],
[250, 170, 30],
[107, 142, 35],
[152, 251, 152],
[70, 130, 180]]
class Thermal(BaseDataLoader):
def __init__(self, kwargs):
self.MEAN = [0.485, 0.456, 0.406]
self.STD = [0.229, 0.224, 0.225]
self.batch_size = kwargs.pop('batch_size')
kwargs['mean'] = self.MEAN
kwargs['std'] = self.STD
kwargs['ignore_index'] = 255
sampler_shuffle = kwargs.pop('shuffle')
num_workers = kwargs.pop('num_workers')
self.dataset = ThermalDataset(**kwargs)
shuffle = False
dist_sampler = torch.utils.data.distributed.DistributedSampler(self.dataset, shuffle=sampler_shuffle)
super(Thermal, self).__init__(self.dataset, self.batch_size, shuffle, num_workers, val_split=None, dist_sampler=dist_sampler)
|
py | b412016d1ee4f6f68e12e531270af31a9b6fcaa2 | class PacMan:
def __init__(self, longueurCase):
"""
Argument:
longueurCase = La longueur d'une case
"""
self.longueurCase = longueurCase
self.r = 20 # Rayon du personnage pacman
self.x = 10.5 * longueurCase # Coordonnees de creation de pacman
self.y = 15.5 * longueurCase
self.directionAttente = ""
self.direction = "" # Stocke la direction en cours afin de faire bouger pacman
def reset(self):
"""
Cette fonction reset pacman lors du demarrage d'une partie
Fonction appelee lors d'un gameover
"""
self.x = 10.5 * self.longueurCase
self.y = 15.5 * self.longueurCase
self.buffed = False
self.directionAttente = ""
self.direction = ""
def dessine(self):
"""
Cette fonction permet de dessiner pac man sur la grille
"""
rectMode(CENTER)
fill(color(255, 213, 0))
ellipse(self.x, self.y, self.r, self.r)
def est_au_centre(self):
"""
Cette fonction permet de verifier si pacman est centre de la case dans la quelle il se situe
"""
coo = [int(self.x // self.longueurCase), int(self.y // self.longueurCase)] # Les coordonnees de pacman pour regarder les cases aux alentours
X, Y = coo[0], coo[1]
milieuX = round(X * self.longueurCase + self.longueurCase / 2, 2)
milieuY = round(Y * self.longueurCase + self.longueurCase / 2, 2)
return milieuX == round(self.x, 2) and milieuY == round(self.y, 2)
def deplacement_valide(self, grille):
"""
Cette fonction va verifier si le deplacement de pacman est valide (Si il y a un mur ou non)
Argument:
grille = La grille de jeu (Les cases) pour analyser l'environnement (Detecter murs etc..)
"""
coo = [int(self.x // self.longueurCase), int(self.y // self.longueurCase)] # Les coordonnees de pacman pour regarder les cases aux alentours
X, Y = coo[0], coo[1]
# Verification des murs dans chaque direction (Return not pour retourner True si l'emplacement suivant n'est pas un mur, False sinon)
if self.directionAttente == "up":
return not grille[X][Y - 1] == 10
if self.directionAttente == "down":
return not grille[X][Y + 1] == 10
if self.directionAttente == "left":
return not grille[X - 1][Y] == 10
if self.directionAttente == "right":
return not grille[X + 1][Y] == 10
def verifiePacGums(self, grille):
"""
Cette fonction va verifier si pacman se trouve sur une pacgum, si oui retourne un tuple (Booleen si il est sur une pacgum, la coordonnee X et Y)
Argument:
grille = La grille de jeu (Les cases) pour analyser l'environnement (Detecter murs etc..)
"""
coo = [int(self.x // self.longueurCase), int(self.y // self.longueurCase)] # Les coordonnees de pacman pour regarder les cases aux alentours
X, Y = coo[0], coo[1]
if grille[X][Y] in [1, 5]: # Est une pacgum / super pacgum
return (True, X, Y)
return (False, X, Y)
def avancer(self, vitesse):
"""
Cette fonction permet de faire avancer pacman dans une direction avec une vitesse definie
Argument:
vitesse = La vitesse de pacman
"""
vitesse = vitesse * self.longueurCase
if self.direction == "up":
self.y -= vitesse
elif self.direction == "down":
self.y += vitesse
elif self.direction == "left":
self.x -= vitesse
elif self.direction == "right":
self.x += vitesse
def deplacement(self, grille):
"""
Cette fonction permet de controler pac man
Argument:
grille = La grille de jeu (Les cases) pour analyser l'environnement (Detecter murs etc..)
"""
# Pour chaques conditions, on va changer le deplacement en cours (Quand les touches du clavier sont utilisees
if keyPressed and key == CODED:
if keyCode == UP:
self.directionAttente = "up"
elif keyCode == DOWN:
self.directionAttente = "down"
elif keyCode == LEFT:
self.directionAttente = "left"
elif keyCode == RIGHT:
self.directionAttente = "right"
# Verifie si pacman est au centre, si oui le deplace
if self.est_au_centre():
if self.deplacement_valide(grille):
self.direction = self.directionAttente
self.avancer(.1)
else:
self.directionAttente = self.direction
else:
self.avancer(.1)
|
py | b412019b14dfe73ef8625876c39b2eea1855f4db | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
from test.unit.rules import BaseRuleTestCase
from cfnlint.rules.functions.SubNeeded import SubNeeded # pylint: disable=E0401
class TestSubNeeded(BaseRuleTestCase):
"""Test Rules Get Att """
def setUp(self):
"""Setup"""
super(TestSubNeeded, self).setUp()
self.collection.register(SubNeeded())
self.success_templates = [
'test/fixtures/templates/good/functions/sub.yaml',
'test/fixtures/templates/good/functions/sub_needed.yaml',
'test/fixtures/templates/good/functions/sub_needed_transform.yaml',
]
def test_file_positive(self):
"""Test Positive"""
self.helper_file_positive()
def test_template_config(self):
"""Test custom excludes configuration"""
self.helper_file_rule_config(
'test/fixtures/templates/good/functions/sub_needed_custom_excludes.yaml',
{'custom_excludes': '^\\$\\{Stage\\}$'}, 0
)
self.helper_file_rule_config(
'test/fixtures/templates/good/functions/sub_needed_custom_excludes.yaml',
{}, 1
)
def test_file_negative(self):
"""Test failure"""
self.helper_file_negative('test/fixtures/templates/bad/functions/sub_needed.yaml', 7)
|
py | b41201d8d7d6bc9e037004579834d4278964f5cf | import chainer
import chainerx
from chainerx_tests import array_utils
from chainerx_tests import dtype_utils
from chainerx_tests import op_utils
n_step_lstm_dtypes_valid = dtype_utils._permutate_dtype_mapping([
# Floats.
(('float16', ), ()),
(('float32', ), ()),
(('float64', ), ()),
])
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'n_layers,hidden_size,input_size,batches', [
(2, 2, 1, (1, 1, 1)),
(2, 2, 3, (3, 2, 1)),
(3, 8, 4, (4, 2, 1)),
(4, 12, 4, (4, 3, 2)),
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes, out_dtype', n_step_lstm_dtypes_valid)
])
))
class TestNStepLstm(op_utils.ChainerOpTest):
def setup(self):
self.check_forward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update({
'rtol': 5e-3, 'atol': 5e-2})
if self.in_dtypes[0] == 'float16':
self.check_forward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_double_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
device = chainerx.get_default_device()
if device.backend.name == 'cuda':
if self.in_dtypes[0] != 'float32':
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
h_shape = (self.n_layers, self.batches[0], self.hidden_size)
dtype = self.in_dtypes[0]
h = array_utils.uniform(h_shape, dtype)
c = array_utils.uniform(h_shape, dtype)
in_size = self.input_size
out_size = self.hidden_size
xs = [array_utils.uniform((self.batches[b], in_size), dtype)
for b in range(len(self.batches))]
def w_in(i, j):
return in_size if i == 0 and j < 4 else out_size
inputs = []
inputs.append(h)
inputs.append(c)
for i in range(len(self.batches)):
inputs.append(xs[i])
for n in range(self.n_layers):
for i in range(8):
inputs.append(array_utils.uniform(
(out_size, w_in(n, i)), dtype))
for i in range(8):
inputs.append(array_utils.uniform((out_size,), dtype))
return tuple(inputs)
def process_input(self, inputs):
h = inputs[0]
c = inputs[1]
xs = inputs[2:2 + len(self.batches)]
ws = []
bs = []
index = 2 + len(self.batches)
for n in range(self.n_layers):
ws.append(inputs[index: index + 8])
bs.append(inputs[index + 8: index + 16])
index += 16
return h, c, ws, bs, xs
def forward_chainerx(self, inputs):
h, c, ws, bs, xs = self.process_input(inputs)
out = chainerx.n_step_lstm(self.n_layers, h, c, ws, bs, xs)
rets = []
rets.append(out[0])
rets.append(out[1])
for i in range(len(out[2])):
rets.append(out[2][i])
return tuple(rets)
def forward_chainer(self, inputs):
h, c, ws, bs, xs = self.process_input(inputs)
out = chainer.functions.n_step_lstm(
self.n_layers, 0.0, h, c, ws, bs, xs)
rets = []
rets.append(out[0])
rets.append(out[1])
for i in range(len(out[2])):
rets.append(out[2][i])
return tuple(rets)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'n_layers,hidden_size,input_size,batches', [
(1, 2, 1, (1, 1, 1)),
(2, 6, 8, (4, 2, 2)),
(3, 8, 4, (4, 2, 1)),
(4, 12, 4, (4, 3, 2)),
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes,out_dtype', n_step_lstm_dtypes_valid)
])
))
class TestNStepBiLstm(op_utils.ChainerOpTest):
def setup(self):
self.check_forward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update({
'rtol': 5e-3, 'atol': 5e-2})
if self.in_dtypes[0] == 'float16':
self.check_forward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_double_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
device = chainerx.get_default_device()
if device.backend.name == 'cuda':
if self.in_dtypes[0] != 'float32':
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
h_shape = (self.n_layers * 2, self.batches[0], self.hidden_size)
dtype = self.in_dtypes[0]
h = array_utils.uniform(h_shape, dtype)
c = array_utils.uniform(h_shape, dtype)
in_size = self.input_size
out_size = self.hidden_size
xs = [array_utils.uniform((self.batches[b], in_size), dtype)
for b in range(len(self.batches))]
def w_in(i, j):
if i == 0 and j < 4:
return in_size
elif i > 0 and j < 4:
return out_size * 2
else:
return out_size
inputs = []
inputs.append(h)
inputs.append(c)
for i in range(len(self.batches)):
inputs.append(xs[i])
for n in range(self.n_layers):
for direction in (0, 1):
for i in range(8):
inputs.append(array_utils.uniform(
(out_size, w_in(n, i)), dtype))
for i in range(8):
inputs.append(array_utils.uniform((out_size,), dtype))
return tuple(inputs)
def process_input(self, inputs):
h = inputs[0]
c = inputs[1]
xs = inputs[2:2 + len(self.batches)]
ws = []
bs = []
index = 2 + len(self.batches)
for n in range(self.n_layers):
ws.append(inputs[index: index + 8])
bs.append(inputs[index + 8: index + 16])
ws.append(inputs[index + 16: index + 24])
bs.append(inputs[index + 24: index + 32])
index += 32
return h, c, ws, bs, xs
def forward_chainerx(self, inputs):
h, c, ws, bs, xs = self.process_input(inputs)
out = chainerx.n_step_bilstm(self.n_layers, h, c, ws, bs, xs)
rets = []
rets.append(out[0])
rets.append(out[1])
for i in range(len(out[2])):
rets.append(out[2][i])
return tuple(rets)
def forward_chainer(self, inputs):
h, c, ws, bs, xs = self.process_input(inputs)
out = chainer.functions.n_step_bilstm(
self.n_layers, 0.0, h, c, ws, bs, xs)
rets = []
rets.append(out[0])
rets.append(out[1])
for i in range(len(out[2])):
rets.append(out[2][i])
return tuple(rets)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'n_layers,hidden_size,input_size,batches', [
(2, 2, 1, (1, 1, 1)),
(2, 2, 3, (3, 2, 1)),
(3, 8, 4, (4, 2, 1)),
(4, 6, 4, (4, 3, 2)),
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes, out_dtype', n_step_lstm_dtypes_valid)
])
))
@op_utils.fix_random() # This test is unstable.
class TestNStepGru(op_utils.ChainerOpTest):
def setup(self):
self.check_forward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update({
'rtol': 5e-3, 'atol': 5e-2})
if self.in_dtypes[0] == 'float16':
self.check_forward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_double_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
device = chainerx.get_default_device()
if device.backend.name == 'cuda':
if self.in_dtypes[0] != 'float32':
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
h_shape = (self.n_layers, self.batches[0], self.hidden_size)
dtype = self.in_dtypes[0]
h = array_utils.uniform(h_shape, dtype)
in_size = self.input_size
out_size = self.hidden_size
xs = [array_utils.uniform((self.batches[b], in_size), dtype)
for b in range(len(self.batches))]
def w_in(i, j):
return in_size if i == 0 and j < 3 else out_size
inputs = []
inputs.append(h)
for i in range(len(self.batches)):
inputs.append(xs[i])
for n in range(self.n_layers):
for i in range(6):
inputs.append(array_utils.uniform(
(out_size, w_in(n, i)), dtype))
for i in range(6):
inputs.append(array_utils.uniform((out_size,), dtype))
return tuple(inputs)
def process_input(self, inputs):
h = inputs[0]
xs = inputs[1:1 + len(self.batches)]
ws = []
bs = []
index = 1 + len(self.batches)
for n in range(self.n_layers):
ws.append(inputs[index: index + 6])
bs.append(inputs[index + 6: index + 12])
index += 12
return h, ws, bs, xs
def forward_chainerx(self, inputs):
h, ws, bs, xs = self.process_input(inputs)
out = chainerx.n_step_gru(self.n_layers, h, ws, bs, xs)
rets = []
rets.append(out[0])
for i in range(len(out[1])):
rets.append(out[1][i])
return tuple(rets)
def forward_chainer(self, inputs):
h, ws, bs, xs = self.process_input(inputs)
out = chainer.functions.n_step_gru(
self.n_layers, 0.0, h, ws, bs, xs)
rets = []
rets.append(out[0])
for i in range(len(out[1])):
rets.append(out[1][i])
return tuple(rets)
@op_utils.op_test(['native:0', 'cuda:0'])
@chainer.testing.parameterize(*(
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'n_layers,hidden_size,input_size,batches', [
(2, 2, 1, (1, 1, 1)),
(2, 2, 3, (3, 2, 1)),
(3, 4, 4, (4, 2, 1)),
(4, 5, 4, (4, 3, 2)),
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes,out_dtype', n_step_lstm_dtypes_valid)
])
))
class TestNStepBiGRU(op_utils.ChainerOpTest):
def setup(self):
self.check_forward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update({
'rtol': 5e-2, 'atol': 5e-2})
if self.in_dtypes[0] == 'float16':
self.check_forward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_double_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
device = chainerx.get_default_device()
if device.backend.name == 'cuda':
if self.in_dtypes[0] != 'float32':
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
h_shape = (self.n_layers * 2, self.batches[0], self.hidden_size)
dtype = self.in_dtypes[0]
h = array_utils.uniform(h_shape, dtype)
in_size = self.input_size
out_size = self.hidden_size
xs = [array_utils.uniform((self.batches[b], in_size), dtype)
for b in range(len(self.batches))]
def w_in(i, j):
if i == 0 and j < 3:
return in_size
elif i > 0 and j < 3:
return out_size * 2
else:
return out_size
inputs = []
inputs.append(h)
for i in range(len(self.batches)):
inputs.append(xs[i])
for n in range(self.n_layers):
for direction in (0, 1):
for i in range(6):
inputs.append(array_utils.uniform(
(out_size, w_in(n, i)), dtype))
for i in range(6):
inputs.append(array_utils.uniform((out_size,), dtype))
return tuple(inputs)
def process_input(self, inputs):
h = inputs[0]
xs = inputs[1:1 + len(self.batches)]
ws = []
bs = []
index = 1 + len(self.batches)
for n in range(self.n_layers):
ws.append(inputs[index: index + 6])
bs.append(inputs[index + 6: index + 12])
ws.append(inputs[index + 12: index + 18])
bs.append(inputs[index + 18: index + 24])
index += 24
return h, ws, bs, xs
def forward_chainerx(self, inputs):
h, ws, bs, xs = self.process_input(inputs)
out = chainerx.n_step_bigru(self.n_layers, h, ws, bs, xs)
rets = []
rets.append(out[0])
for i in range(len(out[1])):
rets.append(out[1][i])
return tuple(rets)
def forward_chainer(self, inputs):
h, ws, bs, xs = self.process_input(inputs)
out = chainer.functions.n_step_bigru(
self.n_layers, 0.0, h, ws, bs, xs)
rets = []
rets.append(out[0])
for i in range(len(out[1])):
rets.append(out[1][i])
return tuple(rets)
@op_utils.op_test(['native:0', 'cuda:0'])
# ReLU activation is unstable around 0 but can seemingly not be dodged
# automatically.
@op_utils.fix_random()
@chainer.testing.parameterize(*(
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'n_layers,hidden_size,input_size,batches,activation', [
(2, 2, 1, (1, 1, 1), "tanh"),
(2, 2, 1, (1, 1, 1), "relu"),
(2, 2, 3, (3, 2, 1), "tanh"),
(2, 2, 3, (3, 2, 1), "relu"),
(3, 4, 4, (4, 2, 1), "tanh"),
(3, 4, 4, (4, 2, 1), "relu"),
(4, 5, 4, (4, 3, 2), "tanh"),
(4, 5, 4, (4, 3, 2), "relu"),
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes, out_dtype', n_step_lstm_dtypes_valid)
])
))
class TestNStepRNN(op_utils.ChainerOpTest):
check_numpy_strides_compliance = False
dodge_nondifferentiable = True
def setup(self):
self.check_forward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update({
'rtol': 5e-2, 'atol': 5e-2})
if self.in_dtypes[0] == 'float16':
self.check_forward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_double_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
device = chainerx.get_default_device()
if device.backend.name == 'cuda':
if self.in_dtypes[0] != 'float32':
self.skip_forward_test = True
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
h_shape = (self.n_layers, self.batches[0], self.hidden_size)
dtype = self.in_dtypes[0]
h = array_utils.uniform(h_shape, dtype)
in_size = self.input_size
out_size = self.hidden_size
xs = [array_utils.uniform((self.batches[b], in_size), dtype)
for b in range(len(self.batches))]
def w_in(i, j):
return in_size if i == 0 and j < 1 else out_size
inputs = []
inputs.append(h)
for i in range(len(self.batches)):
inputs.append(xs[i])
for n in range(self.n_layers):
for i in range(2):
inputs.append(array_utils.uniform(
(out_size, w_in(n, i)), dtype))
for i in range(2):
inputs.append(array_utils.uniform((out_size,), dtype))
return tuple(inputs)
def process_input(self, inputs):
h = inputs[0]
xs = inputs[1:1 + len(self.batches)]
ws = []
bs = []
index = 1 + len(self.batches)
for n in range(self.n_layers):
ws.append(inputs[index: index + 2])
bs.append(inputs[index + 2: index + 4])
index += 4
return h, ws, bs, xs
def forward_chainerx(self, inputs):
h, ws, bs, xs = self.process_input(inputs)
out = chainerx.n_step_rnn(
self.n_layers, h, ws, bs, xs, self.activation)
rets = []
rets.append(out[0])
for i in range(len(out[1])):
rets.append(out[1][i])
return tuple(rets)
def forward_chainer(self, inputs):
h, ws, bs, xs = self.process_input(inputs)
out = chainer.functions.n_step_rnn(
self.n_layers, 0.0, h, ws, bs, xs, self.activation)
rets = []
rets.append(out[0])
for i in range(len(out[1])):
rets.append(out[1][i])
return tuple(rets)
@op_utils.op_test(['native:0', 'cuda:0'])
# ReLU activation is unstable around 0 but can seemingly not be dodged
# automatically.
@op_utils.fix_random()
@chainer.testing.parameterize(*(
chainer.testing.product([
chainer.testing.from_pytest_parameterize(
'n_layers,hidden_size,input_size,batches,activation', [
(2, 2, 1, (1, 1, 1), "tanh"),
(2, 2, 1, (1, 1, 1), "relu"),
(2, 2, 3, (3, 2, 1), "tanh"),
(2, 2, 3, (3, 2, 1), "relu"),
(3, 4, 4, (4, 2, 1), "tanh"),
(3, 4, 4, (4, 2, 1), "relu"),
]),
chainer.testing.from_pytest_parameterize(
'in_dtypes,out_dtype', n_step_lstm_dtypes_valid)
])
))
class TestNStepBiRNN(op_utils.ChainerOpTest):
check_numpy_strides_compliance = False
dodge_nondifferentiable = True
def setup(self):
self.check_forward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_backward_options.update({
'rtol': 1e-2, 'atol': 1e-2})
self.check_double_backward_options.update({
'rtol': 5e-2, 'atol': 5e-2})
if self.in_dtypes[0] == 'float16':
self.check_forward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
self.check_double_backward_options.update({
'rtol': 1e-1, 'atol': 1e-1})
device = chainerx.get_default_device()
if device.backend.name == 'cuda':
if self.in_dtypes[0] != 'float32':
self.skip_forward_test = True
self.skip_backward_test = True
self.skip_double_backward_test = True
def generate_inputs(self):
h_shape = (self.n_layers * 2, self.batches[0], self.hidden_size)
dtype = self.in_dtypes[0]
low = -1.0
high = 1.0
if dtype == 'float16':
low = -0.5
high = 0.5
h = array_utils.uniform(h_shape, dtype)
in_size = self.input_size
out_size = self.hidden_size
xs = [array_utils.uniform((self.batches[b], in_size),
dtype, low=low, high=high)
for b in range(len(self.batches))]
def w_in(i, j):
if i == 0 and j < 1:
return in_size
elif i > 0 and j < 1:
return out_size * 2
else:
return out_size
inputs = []
inputs.append(h)
for i in range(len(self.batches)):
inputs.append(xs[i])
for n in range(self.n_layers):
for direction in (0, 1):
for i in range(2):
inputs.append(array_utils.uniform(
(out_size, w_in(n, i)), dtype, low=low, high=high))
for i in range(2):
inputs.append(array_utils.uniform(
(out_size,), dtype, low=low, high=high))
return tuple(inputs)
def process_input(self, inputs):
h = inputs[0]
xs = inputs[1:1 + len(self.batches)]
ws = []
bs = []
index = 1 + len(self.batches)
for n in range(self.n_layers):
ws.append(inputs[index: index + 2])
bs.append(inputs[index + 2: index + 4])
ws.append(inputs[index + 4: index + 6])
bs.append(inputs[index + 6: index + 8])
index += 8
return h, ws, bs, xs
def forward_chainerx(self, inputs):
h, ws, bs, xs = self.process_input(inputs)
out = chainerx.n_step_birnn(
self.n_layers, h, ws, bs, xs, self.activation)
rets = []
rets.append(out[0])
for i in range(len(out[1])):
rets.append(out[1][i])
return tuple(rets)
def forward_chainer(self, inputs):
h, ws, bs, xs = self.process_input(inputs)
out = chainer.functions.n_step_birnn(
self.n_layers, 0.0, h, ws, bs, xs, self.activation)
rets = []
rets.append(out[0])
for i in range(len(out[1])):
rets.append(out[1][i])
return tuple(rets)
|
py | b41201f6ff942737f6494166e276e51b963932f9 | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 12, transform = "Anscombe", sigma = 0.0, exog_count = 100, ar_order = 12); |
py | b412031b0f001f6f4354699235cd0937476b2884 | # vim: expandtab tabstop=4 shiftwidth=4
class DSPFTWException(Exception):
pass
class DataTypeException(DSPFTWException):
pass
class EndiannessException(DSPFTWException):
pass
class FileNameException(DSPFTWException):
pass
class NumberSpaceException(DSPFTWException):
pass
class SignalTypeException(DSPFTWException):
pass
class WriteModeException(DSPFTWException):
pass
|
py | b412035e03ffe85d43c243f1d5d70537dfd8eba6 | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('duplicates', '0008_auto_20201130_0847'),
]
operations = [
migrations.RemoveField(
model_name='duplicatebackendentry',
name='datetime_added',
),
migrations.AlterField(
model_name='storedduplicatebackend',
name='backend_data',
field=models.TextField(
blank=True, help_text='JSON encoded data for the backend '
'class.', verbose_name='Backend data'
),
),
]
|
py | b4120476afb2574c1c683eb5a0f958e7f0e43c1a | # Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""DarkNet model."""
import mindspore.nn as nn
import mindspore.ops as ops
def conv_block(in_channels,
out_channels,
kernel_size,
stride,
dilation=1):
"""Get a conv2d batchnorm and relu layer"""
pad_mode = 'same'
padding = 0
return nn.SequentialCell(
[nn.Conv2d(in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
pad_mode=pad_mode),
nn.BatchNorm2d(out_channels, momentum=0.1),
nn.ReLU()]
)
class ResidualBlock(nn.Cell):
"""
DarkNet V1 residual block definition.
Args:
in_channels: Integer. Input channel.
out_channels: Integer. Output channel.
Returns:
Tensor, output tensor.
Examples:
ResidualBlock(3, 208)
"""
expansion = 4
def __init__(self,
in_channels,
out_channels):
super(ResidualBlock, self).__init__()
out_chls = out_channels//2
self.conv1 = conv_block(in_channels, out_chls, kernel_size=1, stride=1)
self.conv2 = conv_block(out_chls, out_channels, kernel_size=3, stride=1)
self.add = ops.Add()
def construct(self, x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.add(out, identity)
return out
class DarkNet(nn.Cell):
"""
DarkNet V1 network.
Args:
block: Cell. Block for network.
layer_nums: List. Numbers of different layers.
in_channels: Integer. Input channel.
out_channels: Integer. Output channel.
detect: Bool. Whether detect or not. Default:False.
Returns:
Tuple, tuple of output tensor,(f1,f2,f3,f4,f5).
Examples:
DarkNet(ResidualBlock,
[1, 2, 8, 8, 4],
[32, 64, 128, 256, 512],
[64, 128, 256, 512, 1024],
100)
"""
def __init__(self,
block,
layer_nums,
in_channels,
out_channels,
detect=False):
super(DarkNet, self).__init__()
self.outchannel = out_channels[-1]
self.detect = detect
if not len(layer_nums) == len(in_channels) == len(out_channels) == 5:
raise ValueError("the length of layer_num, inchannel, outchannel list must be 5!")
self.conv0 = conv_block(3,
in_channels[0],
kernel_size=3,
stride=1)
self.conv1 = conv_block(in_channels[0],
out_channels[0],
kernel_size=3,
stride=2)
self.layer1 = self._make_layer(block,
layer_nums[0],
in_channel=out_channels[0],
out_channel=out_channels[0])
self.conv2 = conv_block(in_channels[1],
out_channels[1],
kernel_size=3,
stride=2)
self.layer2 = self._make_layer(block,
layer_nums[1],
in_channel=out_channels[1],
out_channel=out_channels[1])
self.conv3 = conv_block(in_channels[2],
out_channels[2],
kernel_size=3,
stride=2)
self.layer3 = self._make_layer(block,
layer_nums[2],
in_channel=out_channels[2],
out_channel=out_channels[2])
self.conv4 = conv_block(in_channels[3],
out_channels[3],
kernel_size=3,
stride=2)
self.layer4 = self._make_layer(block,
layer_nums[3],
in_channel=out_channels[3],
out_channel=out_channels[3])
self.conv5 = conv_block(in_channels[4],
out_channels[4],
kernel_size=3,
stride=2)
self.layer5 = self._make_layer(block,
layer_nums[4],
in_channel=out_channels[4],
out_channel=out_channels[4])
def _make_layer(self, block, layer_num, in_channel, out_channel):
"""
Make Layer for DarkNet.
:param block: Cell. DarkNet block.
:param layer_num: Integer. Layer number.
:param in_channel: Integer. Input channel.
:param out_channel: Integer. Output channel.
Examples:
_make_layer(ConvBlock, 1, 128, 256)
"""
layers = []
darkblk = block(in_channel, out_channel)
layers.append(darkblk)
for _ in range(1, layer_num):
darkblk = block(out_channel, out_channel)
layers.append(darkblk)
return nn.SequentialCell(layers)
def construct(self, x):
c1 = self.conv0(x)
c2 = self.conv1(c1)
c3 = self.layer1(c2)
c4 = self.conv2(c3)
c5 = self.layer2(c4)
c6 = self.conv3(c5)
c7 = self.layer3(c6)
c8 = self.conv4(c7)
c9 = self.layer4(c8)
c10 = self.conv5(c9)
c11 = self.layer5(c10)
if self.detect:
return c7, c9, c11
return c11
def get_out_channels(self):
return self.outchannel
def darknet53():
"""
Get DarkNet53 neural network.
Returns:
Cell, cell instance of DarkNet53 neural network.
Examples:
darknet53()
"""
return DarkNet(ResidualBlock, [1, 2, 8, 8, 4],
[32, 64, 128, 256, 512],
[64, 128, 256, 512, 1024])
|
py | b41204c9daa6e70c2f59742c08d52230ae03d3ce | '''Create a teacher's entity'''
__author__ = '[email protected]'
from models.entities import BaseEntity
from models.models import MemcacheManager
from models.models import PersonalProfile
from models.models import PersonalProfileDTO
from models.models import Student
from models.models import BaseJsonDao
from models import transforms
from google.appengine.ext import db
from google.appengine.api import namespace_manager
from google.appengine.api import users
import appengine_config
import logging
import datetime
import json
from json import JSONEncoder
from common import utils as common_utils
# We want to use memcache for both objects that exist and do not exist in the
# datastore. If object exists we cache its instance, if object does not exist
# we cache this object below.
NO_OBJECT = {}
class Teacher(BaseEntity):
"""Teacher data specific to a course instance, modeled after the student Entity"""
enrolled_on = db.DateTimeProperty(auto_now_add=True, indexed=True)
user_id = db.StringProperty(indexed=True)
name = db.StringProperty(indexed=False)
additional_fields = db.TextProperty(indexed=False)
is_enrolled = db.BooleanProperty(indexed=False)
is_active = db.BooleanProperty(indexed=False)
# Additional field for teachers
sections = db.TextProperty(indexed=False)
school = db.StringProperty(indexed=False)
email = db.StringProperty(indexed=False)
_PROPERTY_EXPORT_BLACKLIST = [
additional_fields, # Suppress all additional_fields items.
# Convenience items if not all additional_fields should be suppressed:
#'additional_fields.xsrf_token', # Not PII, but also not useful.
#'additional_fields.form01', # User's name on registration form.
name]
@classmethod
def safe_key(cls, db_key, transform_fn):
return db.Key.from_path(cls.kind(), transform_fn(db_key.id_or_name()))
def for_export(self, transform_fn):
"""Creates an ExportEntity populated from this entity instance."""
assert not hasattr(self, 'key_by_user_id')
model = super(Teacher, self).for_export(transform_fn)
model.user_id = transform_fn(self.user_id)
# Add a version of the key that always uses the user_id for the name
# component. This can be used to establish relationships between objects
# where the student key used was created via get_key(). In general,
# this means clients will join exports on this field, not the field made
# from safe_key().
model.key_by_user_id = self.get_key(transform_fn=transform_fn)
return model
@classmethod
def _memcache_key(cls, key):
"""Makes a memcache key from primary key."""
return 'entity:teacher:%s' % key
@classmethod
def _memcache_key(cls, key):
"""Makes a memcache key from primary key."""
return 'entity:teacher:%s' % key
def put(self):
"""Do the normal put() and also add the object to memcache."""
result = super(Teacher, self).put()
MemcacheManager.set(self._memcache_key(self.key().name()), self)
return result
def delete(self):
"""Do the normal delete() and also remove the object from memcache."""
super(Teacher, self).delete()
MemcacheManager.delete(self._memcache_key(self.key().name()))
@classmethod
def add_new_teacher_for_user(
cls, email, school, additional_fields, alerts):
TeacherProfileDAO.add_new_teacher_for_user(email, school, additional_fields, alerts)
@classmethod
def update_teacher_for_user(cls, email, school, active, additional_fields, alerts):
TeacherProfileDAO.update_teacher_for_user(email, school, active, additional_fields, alerts)
@classmethod
def get_by_email(cls, email):
return Teacher.get_by_key_name(email.encode('utf8'))
@classmethod
def get_teacher_by_user_id(cls):
"""Loads user and student and asserts both are present."""
user = users.get_current_user()
if not user:
raise Exception('No current user.')
teacher = cls.get_by_email(user.email())
if not teacher:
raise Exception('Teacher instance corresponding to user %s not '
'found.' % user.email())
return teacher
@classmethod
def get_teacher_by_user_id(cls, user_id):
teachers = cls.all().filter(cls.user_id.name, user_id).fetch(limit=2)
if len(teachers) == 2:
raise Exception(
'There is more than one teacher with user_id %s' % user_id)
return teachers[0] if teachers else None
@classmethod
def get_teacher_by_email(cls, email):
"""Returns enrolled teacher or None."""
# ehiller - not sure if memcache check is in the right place, feel like we might want to do that after
# checking datastore. this depends on what memcachemanager returns if a teacher hasn't been set there yet but
# still actually exists.
teacher = MemcacheManager.get(cls._memcache_key(email))
if NO_OBJECT == teacher:
return None
if not teacher:
teacher = Teacher.get_by_email(email)
if teacher:
MemcacheManager.set(cls._memcache_key(email), teacher)
else:
MemcacheManager.set(cls._memcache_key(email), NO_OBJECT)
if teacher: #ehiller - removed isEnrolled check, don't think we still need a teacher to be
# enrolled to get their data back
return teacher
else:
return None
@classmethod
def get_all_teachers_for_course(cls):
"""Returns all enrolled teachers or None."""
teachers = []
for teacher in TeacherProfileDAO.get_all_iter():
teachers.append(teacher)
if not teachers:
return None
return teachers
def get_key(self, transform_fn=None):
"""Gets a version of the key that uses user_id for the key name."""
if not self.user_id:
raise Exception('Teacher instance has no user_id set.')
user_id = transform_fn(self.user_id) if transform_fn else self.user_id
return db.Key.from_path(Teacher.kind(), user_id)
def has_same_key_as(self, key):
"""Checks if the key of the teacher and the given key are equal."""
return key == self.get_key()
class TeacherProfileDAO(object):
"""All access and mutation methods for PersonalProfile and Teacher."""
TARGET_NAMESPACE = appengine_config.DEFAULT_NAMESPACE_NAME
ENTITY = Teacher
# Each hook is called back after update() has completed without raising
# an exception. Arguments are:
# profile: The PersonalProfile object for the user
# student: The Student object for the user
# Subsequent arguments are identical to the arguments list to the update()
# call. Not documented here so as to not get out-of-date.
# The return value from hooks is discarded. Since these hooks run
# after update() has succeeded, they should run as best-effort, rather
# than raising exceptions.
UPDATE_POST_HOOKS = []
# Each hook is called back after _add_new_student_for_current_user has
# completed without raising an exception. Arguments are:
# student: The Student object for the user.
# The return value from hooks is discarded. Since these hooks run
# after update() has succeeded, they should run as best-effort, rather
# than raising exceptions.
ADD_STUDENT_POST_HOOKS = []
@classmethod
def _memcache_key(cls, key):
"""Makes a memcache key from primary key."""
return 'entity:personal-profile:%s' % key
# This method is going to largely depend on how we plan to register
# users as teachers
@classmethod
def add_new_teacher_for_user(
cls, email, school, additional_fields, alerts):
student_by_email = Student.get_by_email(email)
if not student_by_email:
alerts.append('This email is not registered as a student for this course')
return None
# assume a new teacher is active by default
teacher = cls._add_new_teacher_for_user(
student_by_email.user_id, email, student_by_email.name, school, True, additional_fields)
if teacher:
alerts.append('Teacher was successfully registered')
return teacher
@classmethod
def update_teacher_for_user(cls, email, school, active, additional_fields, errors):
teacher = Teacher.get_by_email(email)
if not teacher:
errors.append('No teacher exists associated with that email.')
return None
teacher = cls._update_teacher_for_user_in_txn(teacher.user_id, email, teacher.name, school, active,
additional_fields, errors)
return teacher
@classmethod
def _add_new_teacher_for_user(
cls, user_id, email, nick_name, school, active, additional_fields):
teacher = cls._add_new_teacher_for_user_in_txn(
user_id, email, nick_name, school, active, additional_fields)
#ehiller - may need to add hooks for adding a teacher
#common_utils.run_hooks(cls.ADD_STUDENT_POST_HOOKS, student)
return teacher
@classmethod
@db.transactional(xg=True)
def _add_new_teacher_for_user_in_txn(
cls, user_id, email, nick_name, school, active, additional_fields):
"""Create new teacher."""
# create profile if does not exist
# profile = cls._get_profile_by_user_id(user_id)
# if not profile:
# profile = cls._add_new_profile(user_id, email)
# create new teacher
teacher = Teacher.get_by_email(email)
if not teacher:
teacher = Teacher(key_name=email)
# update profile
#cls._update_attributes(
# profile, teacher, nick_name=nick_name, is_enrolled=True,
# labels=labels)
# update student
teacher.user_id = user_id
teacher.additional_fields = additional_fields
teacher.school = school
teacher.name = nick_name
teacher.is_active = active
teacher.email = email
# put both
#cls._put_profile(profile)
teacher.put()
return teacher
@classmethod
def _update_teacher_for_user_in_txn(cls, user_id, email, nick_name, school, active, additional_fields, errors):
#probably a better idea to get by user_id since the email may have been changed
teacher = Teacher.get_teacher_by_user_id(user_id)
if not teacher:
errors.append('No teacher exists associated with that email')
#not actually letting them update their email, used as key
teacher.name = nick_name
teacher.school = school
teacher.additional_fields = additional_fields
teacher.is_active = active
teacher.put()
return teacher
@classmethod
def get_all_iter(cls):
"""Return a generator that will produce all DTOs of a given type.
Yields:
A DTO for each row in the Entity type's table.
"""
prev_cursor = None
any_records = True
while any_records:
any_records = False
query = cls.ENTITY.all().with_cursor(prev_cursor)
for entity in query.run():
any_records = True
teacher = Teacher()
teacher.email = entity.email
teacher.user_id = entity.user_id
teacher.name = entity.name
teacher.is_active = entity.is_active
teacher.school = entity.school
if entity.sections:
teacher.sections = entity.sections
yield teacher
prev_cursor = query.cursor()
class CourseSectionEntity(object):
"""Course section information"""
created_datetime = str(datetime.MINYEAR)
section_id = ""
section_name = ""
section_description = ""
students = ""
is_active = False
section_year = ""
def __init__(self, course_section_decoded = None):
if course_section_decoded:
#self.created_datetime = course_section_decoded['created_datetime']
self.section_id = course_section_decoded['id']
self.section_name = course_section_decoded['name']
self.section_description = course_section_decoded['description']
self.students = course_section_decoded['students']
self.is_active = course_section_decoded['active']
if 'year' in course_section_decoded:
self.section_year = course_section_decoded['year']
def get_key(self):
user = users.get_current_user()
if not user:
return None
temp_key = user.email() + '_' + self.section_name.replace(' ', '').lower() + self.section_year
return temp_key
@classmethod
def json_encoder(cls, obj):
if isinstance(obj, cls):
return {
'id': obj.section_id,
'name': obj.section_name,
'description': obj.section_description,
'active': obj.is_active,
'students': obj.students,
'year': obj.section_year
}
return None
@classmethod
def add_new_course_section(cls, section_id, new_course_section, errors):
#initialize new course section
course_section = CourseSectionEntity()
user = users.get_current_user()
if not user:
errors.append('Unable to add course section. User not found.')
return False
#if section_id == None or len(section_id) == 0:
# section_id = user.email() + '_' + new_course_section.name.replace(' ', '')
#course_section.section_id = section_id
course_section.section_name = new_course_section.name
course_section.section_description = new_course_section.description
course_section.is_active = new_course_section.active
course_section.section_year = new_course_section.year
course_section.section_id = course_section.get_key()
teacher = Teacher.get_teacher_by_user_id(user.user_id())
if not teacher:
errors.append('Unable to add course section. Teacher Entity not found.')
return None
course_sections = CourseSectionEntity.get_course_sections_for_user()
#add new section to list of sections passed in. this should add it by reference and set the collection
course_sections[course_section.get_key()] = course_section
teacher.sections = transforms.dumps(course_sections, {})
teacher.put()
return section_id
@classmethod
def update_course_section(cls, section_id, new_course_section, errors):
course_sections = CourseSectionEntity.get_course_sections_for_user()
course_section = CourseSectionEntity()
course_section.section_id = section_id
course_section.section_name = new_course_section.name
course_section.section_description = new_course_section.description
course_section.is_active = new_course_section.active
course_section.students = new_course_section.students
course_section.section_year = new_course_section.year
course_sections[section_id] = course_section
user = users.get_current_user()
if not user:
errors.append('Unable to update course section. User not found.')
return False
teacher = Teacher.get_teacher_by_user_id(user.user_id())
if not teacher:
errors.append('Unable to update course section. Teacher Entity not found.')
return False
teacher.sections = transforms.dumps(course_sections, {})
teacher.put()
return True
@classmethod
def get_course_sections_for_user(cls):
user = users.get_current_user()
if not user:
return None
teacher = Teacher.get_by_email(user.email())
if not teacher:
return None
course_sections = dict()
if teacher.sections:
course_sections_decoded = transforms.loads(teacher.sections)
for course_section_key in course_sections_decoded:
course_section = CourseSectionEntity(course_sections_decoded[course_section_key])
course_sections[course_section.section_id] = course_section
return course_sections
@classmethod
def get_course_for_user(cls, key):
user = users.get_current_user()
if not user:
return None
teacher = Teacher.get_by_email(user.email())
if not teacher:
return None
if teacher.sections:
course_sections_decoded = transforms.loads(teacher.sections)
for course_section_key in course_sections_decoded:
if course_section_key == key:
return CourseSectionEntity(course_sections_decoded[course_section_key])
class CourseSectionDTO(object):
def __init__(self, section_id, data_dict):
self._id = section_id
self.dict = data_dict
@classmethod
def build(cls, name, description, active, students=None, year=None):
return CourseSectionDTO(None, {
'name': name,
'description': description,
'active': active,
'students': students,
'year': year
})
@property
def id(self):
return self._id
@property
def name(self):
return self.dict.get('name')
@property
def description(self):
return self.dict.get('description')
@property
def active(self):
return self.dict.get('active')
@property
def students(self):
return self.dict.get('students')
@property
def year(self):
return self.dict.get('year')
class CourseSectionDAO(BaseJsonDao):
DTO = CourseSectionDTO
ENTITY = CourseSectionEntity
ENTITY_KEY_TYPE = BaseJsonDao.EntityKeyTypeId
|
py | b41206659e46adc46d83134e4d4e58ad08bd3865 | # ===============================================================
# Author: Rodolfo Ferro
# Email: [email protected]
# Twitter: @FerroRodolfo
#
# ABOUT COPYING OR USING PARTIAL INFORMATION:
# This script was originally created by Rodolfo Ferro,
# for his workshop at RIIAA 2.0. Any explicit usage of
# this script or its contents is granted according to
# the license provided and its conditions.
# ===============================================================
# -*- coding: utf-8 -*-
from flask import Flask
from flask import jsonify
app = Flask(__name__)
@app.route('/')
def url_principal():
return "<h1>Hello world!</h1>"
@app.route('/<int:lucky_number>')
def luck_url(lucky_number):
response = {
"Status code": 200,
"Lucky number": lucky_number
}
return jsonify(response)
@app.route('/<string:name>')
def url_name(name):
html_code = f"""
<h1>Hello, {name}!
<p>Welcome to the matrix.</p>
"""
return html_code
if __name__ == '__main__':
app.run(debug=True, port=5000) |
py | b41206faf792a13b887737afa9e8d1ca4ddf9e5e | from binance_f import RequestClient
from binance_f.constant.test import *
from binance_f.base.printobject import *
from binance_f.model.constant import *
request_client = RequestClient(api_key=g_api_key, secret_key=g_secret_key)
result = request_client.cancel_order(symbol="BTCUSDT", orderId=534333508)
PrintBasic.print_obj(result)
|
py | b41209f7db82083b01254746dfafa6a98f68e4e8 | # coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
import os
import random
import tempfile
import unittest
from importlib import import_module
from typing import List, Tuple
from transformers import is_tf_available
from transformers.testing_utils import _tf_gpu_memory_limit, is_pt_tf_cross_test, require_tf, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TFSharedEmbeddings,
tf_top_k_top_p_filtering,
)
if _tf_gpu_memory_limit is not None:
gpus = tf.config.list_physical_devices("GPU")
for gpu in gpus:
# Restrict TensorFlow to only allocate x GB of memory on the GPUs
try:
tf.config.set_logical_device_configuration(
gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)]
)
logical_gpus = tf.config.list_logical_devices("GPU")
print("Logical GPUs", logical_gpus)
except RuntimeError as e:
# Virtual devices must be set before GPUs have been initialized
print(e)
def _config_zero_init(config):
configs_no_init = copy.deepcopy(config)
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key:
setattr(configs_no_init, key, 0.0)
return configs_no_init
@require_tf
class TFModelTesterMixin:
model_tester = None
all_model_classes = ()
all_generative_model_classes = ()
test_resize_embeddings = True
is_encoder_decoder = False
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict:
inputs_dict = copy.deepcopy(inputs_dict)
if model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
inputs_dict = {
k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(v, tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():
inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.values():
inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING.values():
inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in [
*TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.values(),
*TF_MODEL_FOR_CAUSAL_LM_MAPPING.values(),
*TF_MODEL_FOR_MASKED_LM_MAPPING.values(),
*TF_MODEL_FOR_PRETRAINING_MAPPING.values(),
*TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values(),
]:
inputs_dict["labels"] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32
)
return inputs_dict
def test_initialization(self):
pass
def test_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=False)
model = model_class.from_pretrained(tmpdirname)
after_outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assert_outputs_same(after_outputs, outputs)
def test_graph_mode(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
@tf.function
def run_in_graph_mode():
return model(inputs)
outputs = run_in_graph_mode()
self.assertIsNotNone(outputs)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
if model.config.is_encoder_decoder:
expected_arg_names = [
"input_ids",
"attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
]
expected_arg_names.extend(
["head_mask", "decoder_head_mask", "encoder_outputs"]
if "head_mask" and "decoder_head_mask" in arg_names
else ["encoder_outputs"]
)
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
else:
expected_arg_names = ["input_ids"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_saved_model_creation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = False
config.output_attentions = False
if hasattr(config, "use_cache"):
config.use_cache = False
model_class = self.all_model_classes[0]
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
model(class_inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=True)
saved_model_dir = os.path.join(tmpdirname, "saved_model", "1")
self.assertTrue(os.path.exists(saved_model_dir))
@slow
def test_saved_model_creation_extended(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
if hasattr(config, "use_cache"):
config.use_cache = True
for model_class in self.all_model_classes:
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
model(class_inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=True)
saved_model_dir = os.path.join(tmpdirname, "saved_model", "1")
self.assertTrue(os.path.exists(saved_model_dir))
@slow
def test_saved_model_with_hidden_states_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = False
if hasattr(config, "use_cache"):
config.use_cache = False
for model_class in self.all_model_classes:
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
num_out = len(model(class_inputs_dict))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=True)
saved_model_dir = os.path.join(tmpdirname, "saved_model", "1")
model = tf.keras.models.load_model(saved_model_dir)
outputs = model(class_inputs_dict)
if self.is_encoder_decoder:
output = outputs["encoder_hidden_states"]
else:
output = outputs["hidden_states"]
self.assertEqual(len(outputs), num_out)
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(output), expected_num_layers)
self.assertListEqual(
list(output[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
@slow
def test_saved_model_with_attentions_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_attentions = True
config.output_hidden_states = False
if hasattr(config, "use_cache"):
config.use_cache = False
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
for model_class in self.all_model_classes:
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
num_out = len(model(class_inputs_dict))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=True)
saved_model_dir = os.path.join(tmpdirname, "saved_model", "1")
model = tf.keras.models.load_model(saved_model_dir)
outputs = model(class_inputs_dict)
if self.is_encoder_decoder:
output = outputs["encoder_attentions"]
else:
output = outputs["attentions"]
self.assertEqual(len(outputs), num_out)
self.assertEqual(len(output), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(output[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_keras_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
tf_main_layer_classes = set(
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(module)
if module_member_name.endswith("MainLayer")
for module_member in (getattr(module, module_member_name),)
if isinstance(module_member, type)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(module_member, "_keras_serializable", False)
)
for main_layer_class in tf_main_layer_classes:
# T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
if "T5" in main_layer_class.__name__:
# Take the same values than in TFT5ModelTester for this shared layer
shared = TFSharedEmbeddings(99, 32, name="shared")
config.use_cache = inputs_dict.pop("use_cache", None)
main_layer = main_layer_class(config, embed_tokens=shared)
else:
main_layer = main_layer_class(config)
symbolic_inputs = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
model = tf.keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs))
outputs = model(inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
filepath = os.path.join(tmpdirname, "keras_model.h5")
model.save(filepath)
if "T5" in main_layer_class.__name__:
model = tf.keras.models.load_model(
filepath,
custom_objects={
main_layer_class.__name__: main_layer_class,
"TFSharedEmbeddings": TFSharedEmbeddings,
},
)
else:
model = tf.keras.models.load_model(
filepath, custom_objects={main_layer_class.__name__: main_layer_class}
)
assert isinstance(model, tf.keras.Model)
after_outputs = model(inputs_dict)
self.assert_outputs_same(after_outputs, outputs)
def assert_outputs_same(self, after_outputs, outputs):
# Make sure we don't have nans
if isinstance(after_outputs, tf.Tensor):
out_1 = after_outputs.numpy()
elif isinstance(after_outputs, dict):
out_1 = after_outputs[list(after_outputs.keys())[0]].numpy()
else:
out_1 = after_outputs[0].numpy()
out_2 = outputs[0].numpy()
self.assertEqual(out_1.shape, out_2.shape)
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
@is_pt_tf_cross_test
def test_pt_tf_model_equivalence(self):
import torch
import transformers
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
pt_model_class_name = model_class.__name__[2:] # Skip the "TF" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
config.output_hidden_states = True
tf_model = model_class(config)
pt_model = pt_model_class(config)
# Check we can load pt model in tf and vice-versa with model => model functions
tf_model = transformers.load_pytorch_model_in_tf2_model(
tf_model, pt_model, tf_inputs=self._prepare_for_class(inputs_dict, model_class)
)
pt_model = transformers.load_tf2_model_in_pytorch_model(pt_model, tf_model)
# Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
pt_model.eval()
pt_inputs_dict = {}
for name, key in self._prepare_for_class(inputs_dict, model_class).items():
if type(key) == bool:
pt_inputs_dict[name] = key
else:
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)
# need to rename encoder-decoder "inputs" for PyTorch
if "inputs" in pt_inputs_dict and self.is_encoder_decoder:
pt_inputs_dict["input_ids"] = pt_inputs_dict.pop("inputs")
with torch.no_grad():
pto = pt_model(**pt_inputs_dict)
tfo = tf_model(self._prepare_for_class(inputs_dict, model_class), training=False)
tf_hidden_states = tfo[0].numpy()
pt_hidden_states = pto[0].numpy()
tf_nans = np.copy(np.isnan(tf_hidden_states))
pt_nans = np.copy(np.isnan(pt_hidden_states))
pt_hidden_states[tf_nans] = 0
tf_hidden_states[tf_nans] = 0
pt_hidden_states[pt_nans] = 0
tf_hidden_states[pt_nans] = 0
max_diff = np.amax(np.abs(tf_hidden_states - pt_hidden_states))
self.assertLessEqual(max_diff, 4e-2)
# Check we can load pt model in tf and vice-versa with checkpoint => model functions
with tempfile.TemporaryDirectory() as tmpdirname:
pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin")
torch.save(pt_model.state_dict(), pt_checkpoint_path)
tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(tf_model, pt_checkpoint_path)
tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5")
tf_model.save_weights(tf_checkpoint_path)
pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path)
# Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
pt_model.eval()
pt_inputs_dict = {}
for name, key in self._prepare_for_class(inputs_dict, model_class).items():
if type(key) == bool:
key = np.array(key, dtype=bool)
pt_inputs_dict[name] = torch.from_numpy(key).to(torch.long)
else:
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)
# need to rename encoder-decoder "inputs" for PyTorch
if "inputs" in pt_inputs_dict and self.is_encoder_decoder:
pt_inputs_dict["input_ids"] = pt_inputs_dict.pop("inputs")
with torch.no_grad():
pto = pt_model(**pt_inputs_dict)
tfo = tf_model(self._prepare_for_class(inputs_dict, model_class))
tfo = tfo[0].numpy()
pto = pto[0].numpy()
tf_nans = np.copy(np.isnan(tfo))
pt_nans = np.copy(np.isnan(pto))
pto[tf_nans] = 0
tfo[tf_nans] = 0
pto[pt_nans] = 0
tfo[pt_nans] = 0
max_diff = np.amax(np.abs(tfo - pto))
self.assertLessEqual(max_diff, 4e-2)
def test_train_pipeline_custom_model(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
tf_main_layer_classes = set(
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(module)
if module_member_name.endswith("MainLayer")
for module_member in (getattr(module, module_member_name),)
if isinstance(module_member, type)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(module_member, "_keras_serializable", False)
)
for main_layer_class in tf_main_layer_classes:
# T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
if "T5" in main_layer_class.__name__:
# Take the same values than in TFT5ModelTester for this shared layer
shared = TFSharedEmbeddings(self.model_tester.vocab_size, self.model_tester.hidden_size, name="shared")
config.use_cache = False
main_layer = main_layer_class(config, embed_tokens=shared)
del inputs_dict["use_cache"]
else:
main_layer = main_layer_class(config)
symbolic_inputs = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
if hasattr(self.model_tester, "num_labels"):
num_labels = self.model_tester.num_labels
else:
num_labels = 2
X = tf.data.Dataset.from_tensor_slices(
(inputs_dict, np.ones((self.model_tester.batch_size, self.model_tester.seq_length, num_labels, 1)))
).batch(1)
hidden_states = main_layer(symbolic_inputs)[0]
outputs = tf.keras.layers.Dense(num_labels, activation="softmax", name="outputs")(hidden_states)
model = tf.keras.models.Model(inputs=symbolic_inputs, outputs=[outputs])
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["binary_accuracy"])
model.fit(X, epochs=1)
with tempfile.TemporaryDirectory() as tmpdirname:
filepath = os.path.join(tmpdirname, "keras_model.h5")
model.save(filepath)
if "T5" in main_layer_class.__name__:
model = tf.keras.models.load_model(
filepath,
custom_objects={
main_layer_class.__name__: main_layer_class,
"TFSharedEmbeddings": TFSharedEmbeddings,
},
)
else:
model = tf.keras.models.load_model(
filepath, custom_objects={main_layer_class.__name__: main_layer_class}
)
assert isinstance(model, tf.keras.Model)
model(inputs_dict)
def test_compile_tf_model(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
max_input = getattr(self.model_tester, "max_position_embeddings", 512)
optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy")
for model_class in self.all_model_classes:
if self.is_encoder_decoder:
input_ids = {
"decoder_input_ids": tf.keras.Input(
batch_shape=(2, max_input),
name="decoder_input_ids",
dtype="int32",
),
"input_ids": tf.keras.Input(batch_shape=(2, max_input), name="input_ids", dtype="int32"),
}
elif model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
input_ids = tf.keras.Input(batch_shape=(4, 2, max_input), name="input_ids", dtype="int32")
else:
input_ids = tf.keras.Input(batch_shape=(2, max_input), name="input_ids", dtype="int32")
# Prepare our model
model = model_class(config)
model(self._prepare_for_class(inputs_dict, model_class)) # Model must be called before saving.
# Let's load it from the disk to be sure we can use pretrained weights
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=False)
model = model_class.from_pretrained(tmpdirname)
outputs_dict = model(input_ids)
hidden_states = outputs_dict[0]
# Add a dense layer on top to test integration with other keras modules
outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states)
# Compile extended model
extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs])
extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
def test_keyword_and_dict_args(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs_dict = model(inputs)
inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
input_ids = inputs_keywords.pop("input_ids", None)
outputs_keywords = model(input_ids, **inputs_keywords)
output_dict = outputs_dict[0].numpy()
output_keywords = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
def check_decoder_attentions_output(outputs):
out_len = len(outputs)
self.assertEqual(out_len % 2, 0)
decoder_attentions = outputs.decoder_attentions
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
def check_encoder_attentions_output(outputs):
attentions = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["use_cache"] = False
config.output_hidden_states = False
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
out_len = len(outputs)
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
if self.is_encoder_decoder:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_decoder_attentions_output(outputs)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
config.output_hidden_states = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))
self.assertEqual(model.config.output_hidden_states, True)
check_encoder_attentions_output(outputs)
def test_hidden_states_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_hidden_states_output(config, inputs_dict, model_class):
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
if model.config.is_encoder_decoder:
encoder_hidden_states = outputs.encoder_hidden_states
decoder_hidden_states = outputs.decoder_hidden_states
self.assertEqual(config.output_attentions, False)
self.assertEqual(len(encoder_hidden_states), expected_num_layers)
self.assertListEqual(
list(encoder_hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
self.assertEqual(len(decoder_hidden_states), expected_num_layers)
self.assertListEqual(
list(decoder_hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
else:
hidden_states = outputs.hidden_states
self.assertEqual(config.output_attentions, False)
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(config, inputs_dict, model_class)
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(config, inputs_dict, model_class)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
list_lm_models = (
list(TF_MODEL_FOR_CAUSAL_LM_MAPPING.values())
+ list(TF_MODEL_FOR_MASKED_LM_MAPPING.values())
+ list(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values())
)
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in list_lm_models:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert isinstance(name, dict)
for k, v in name.items():
assert isinstance(v, tf.Variable)
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
def test_determinism(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
first, second = (
model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
)
out_1 = first.numpy()
out_2 = second.numpy()
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(tuple_object, dict_object)),
msg=f"Tuple and dict output are not equal. Difference: {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}",
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(
model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True}
)
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = model.get_input_embeddings()(input_ids)
else:
inputs["inputs_embeds"] = model.get_input_embeddings()(encoder_input_ids)
inputs["decoder_inputs_embeds"] = model.get_input_embeddings()(decoder_input_ids)
model(inputs)
def test_numpy_arrays_inputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def prepare_numpy_arrays(inputs_dict):
inputs_np_dict = {}
for k, v in inputs_dict.items():
if tf.is_tensor(v):
inputs_np_dict[k] = v.numpy()
else:
inputs_np_dict[k] = np.array(k)
return inputs_np_dict
for model_class in self.all_model_classes:
model = model_class(config)
inputs = self._prepare_for_class(inputs_dict, model_class)
inputs_np = prepare_numpy_arrays(inputs)
model(inputs_np)
def test_resize_token_embeddings(self):
if not self.test_resize_embeddings:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(model, embedding_layer):
embeds = getattr(embedding_layer, "weight", None)
if embeds is not None:
return embeds
embeds = getattr(embedding_layer, "decoder", None)
if embeds is not None:
return embeds
model(model.dummy_inputs)
embeds = getattr(embedding_layer, "weight", None)
if embeds is not None:
return embeds
embeds = getattr(embedding_layer, "decoder", None)
if embeds is not None:
return embeds
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10, None]:
# build the embeddings
model = model_class(config=config)
old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
old_bias = model.get_bias()
old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
# reshape the embeddings
model.resize_token_embeddings(size)
new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
new_bias = model.get_bias()
new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
# check that the resized embeddings size matches the desired size.
assert_size = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0], assert_size)
# check that weights remain the same after resizing
models_equal = True
for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_bias is not None and new_bias is not None:
for old_weight, new_weight in zip(old_bias.values(), new_bias.values()):
self.assertEqual(new_weight.shape[0], assert_size)
models_equal = True
for p1, p2 in zip(old_weight.value(), new_weight.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0], assert_size)
self.assertEqual(new_output_embeddings.shape[1], old_output_embeddings.shape[1])
models_equal = True
for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
def test_lm_head_model_random_no_beam_search_generate(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict["input_ids"]
# iterate over all generative models
for model_class in self.all_generative_model_classes:
model = model_class(config)
if config.bos_token_id is None:
# if bos token id is not defined mobel needs input_ids
with self.assertRaises(AssertionError):
model.generate(do_sample=True, max_length=5)
# num_return_sequences = 1
self._check_generated_ids(model.generate(input_ids, do_sample=True))
else:
# num_return_sequences = 1
self._check_generated_ids(model.generate(do_sample=True, max_length=5))
with self.assertRaises(AssertionError):
# generating multiple sequences when no beam search generation
# is not allowed as it would always generate the same sequences
model.generate(input_ids, do_sample=False, num_return_sequences=2)
# num_return_sequences > 1, sample
self._check_generated_ids(model.generate(input_ids, do_sample=True, num_return_sequences=2))
# check bad words tokens language generation
# create list of 1-seq bad token and list of 2-seq of bad tokens
bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
output_tokens = model.generate(
input_ids, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2
)
# only count generated tokens
generated_ids = output_tokens[:, input_ids.shape[-1] :]
self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))
def test_lm_head_model_random_beam_search_generate(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict["input_ids"]
for model_class in self.all_generative_model_classes:
model = model_class(config)
if config.bos_token_id is None:
# if bos token id is not defined mobel needs input_ids, num_return_sequences = 1
self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2))
else:
# num_return_sequences = 1
self._check_generated_ids(model.generate(do_sample=True, max_length=5, num_beams=2))
with self.assertRaises(AssertionError):
# generating more sequences than having beams leads is not possible
model.generate(input_ids, do_sample=False, num_return_sequences=3, num_beams=2)
# num_return_sequences > 1, sample
self._check_generated_ids(
model.generate(
input_ids,
do_sample=True,
num_beams=2,
num_return_sequences=2,
)
)
# num_return_sequences > 1, greedy
self._check_generated_ids(model.generate(input_ids, do_sample=False, num_beams=2, num_return_sequences=2))
# check bad words tokens language generation
# create list of 1-seq bad token and list of 2-seq of bad tokens
bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
output_tokens = model.generate(
input_ids, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2
)
# only count generated tokens
generated_ids = output_tokens[:, input_ids.shape[-1] :]
self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))
def test_loss_computation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
if getattr(model, "compute_loss", None):
# The number of elements in the loss should be the same as the number of elements in the label
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
added_label = prepared_for_class[
sorted(list(prepared_for_class.keys() - inputs_dict.keys()), reverse=True)[0]
]
loss_size = tf.size(added_label)
if model.__class__ in TF_MODEL_FOR_CAUSAL_LM_MAPPING.values():
# if loss is causal lm loss, labels are shift, so that one label per batch
# is cut
loss_size = loss_size - self.model_tester.batch_size
# Test that model correctly compute the loss with kwargs
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
input_ids = prepared_for_class.pop("input_ids")
loss = model(input_ids, **prepared_for_class)[0]
self.assertEqual(loss.shape, [loss_size])
# Test that model correctly compute the loss with a dict
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
loss = model(prepared_for_class)[0]
self.assertEqual(loss.shape, [loss_size])
# Test that model correctly compute the loss with a tuple
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
# Get keys that were added with the _prepare_for_class function
label_keys = prepared_for_class.keys() - inputs_dict.keys()
signature = inspect.signature(model.call).parameters
signature_names = list(signature.keys())
# Create a dictionary holding the location of the tensors in the tuple
tuple_index_mapping = {0: "input_ids"}
for label_key in label_keys:
label_key_index = signature_names.index(label_key)
tuple_index_mapping[label_key_index] = label_key
sorted_tuple_index_mapping = sorted(tuple_index_mapping.items())
# Initialize a list with their default values, update the values and convert to a tuple
list_input = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default)
for index, value in sorted_tuple_index_mapping:
list_input[index] = prepared_for_class[value]
tuple_input = tuple(list_input)
# Send to model
loss = model(tuple_input[:-1])[0]
self.assertEqual(loss.shape, [loss_size])
def _generate_random_bad_tokens(self, num_bad_tokens, model):
# special tokens cannot be bad tokens
special_tokens = []
if model.config.bos_token_id is not None:
special_tokens.append(model.config.bos_token_id)
if model.config.pad_token_id is not None:
special_tokens.append(model.config.pad_token_id)
if model.config.eos_token_id is not None:
special_tokens.append(model.config.eos_token_id)
# create random bad tokens that are not special tokens
bad_tokens = []
while len(bad_tokens) < num_bad_tokens:
token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0]
if token not in special_tokens:
bad_tokens.append(token)
return bad_tokens
def _check_generated_ids(self, output_ids):
for token_id in output_ids[0].numpy().tolist():
self.assertGreaterEqual(token_id, 0)
self.assertLess(token_id, self.model_tester.vocab_size)
def _check_match_tokens(self, generated_ids, bad_words_ids):
# for all bad word tokens
for bad_word_ids in bad_words_ids:
# for all slices in batch
for generated_ids_slice in generated_ids:
# for all word idx
for i in range(len(bad_word_ids), len(generated_ids_slice)):
# if tokens match
if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids:
return True
return False
def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None):
"""Creates a random int32 tensor of the shape within the vocab size."""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
output = tf.constant(values, shape=shape, dtype=dtype if dtype is not None else tf.int32)
return output
@require_tf
class UtilsFunctionsTest(unittest.TestCase):
# tests whether the top_k_top_p_filtering function behaves as expected
def test_top_k_top_p_filtering(self):
logits = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
],
dtype=tf.float32,
)
non_inf_expected_idx = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]],
dtype=tf.int32,
) # expected non filtered idx as noted above
non_inf_expected_output = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023],
dtype=tf.float32,
) # expected non filtered values as noted above
output = tf_top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4)
non_inf_output = output[output != -float("inf")]
non_inf_idx = tf.cast(
tf.where(tf.not_equal(output, tf.constant(-float("inf"), dtype=tf.float32))),
dtype=tf.int32,
)
tf.debugging.assert_near(non_inf_output, non_inf_expected_output, rtol=1e-12)
tf.debugging.assert_equal(non_inf_idx, non_inf_expected_idx)
|
py | b4120a00a9b8ea4d9e6a01edcee6c3ff4381a368 | from typing import Optional
import phonenumbers
def dialing_code_for_region(phone_region: str) -> Optional[str]:
"""Gets the dialing code for the specified region.
The dialing code is not the same as the dialing prefix.
The dialing code is without the '+'. For example '971' for
the UAE.
Arguments:
phone_region:
Country code as defined by ISO 3166-1 alpha-2.
Returns:
The dialing code for the specified region.
"""
return phonenumbers.country_code_for_region(phone_region) or None
def dialing_prefix_for_region(phone_region: str) -> Optional[str]:
"""Gets the dialing prefix for the specified region.
The dialing prefix is the dialing code prefixed by '+'
Arguments:
phone_region:
Country code as defined by ISO 3166-1 alpha-2.
Returns:
The dialing prefix for the specified region, example
of a returned dialing code: '+971'.
"""
dialing_code = dialing_code_for_region(phone_region)
if not dialing_code:
return None
return "+%s" % dialing_code
|
py | b4120a2d8c5fa427b4454f376f77faca05f707d7 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : MeUtils.
# @File : log_utils
# @Time : 2020/11/12 11:40 上午
# @Author : yuanjie
# @Email : [email protected]
# @Software : PyCharm
# @Description :
import os
from random import uniform
from loguru import logger
from meutils.notice.wecom import Wecom
# LOG CONF: 需提前配置在环境变量里, 其他参考loguru._defaults.LOGURU_*
LOG_PATH = os.environ.get('LOG_PATH') # python xx.py 才生效
# todo: https://blog.csdn.net/bailang_zhizun/article/details/107863671
# 1. 过滤
# 2. 默认配置、zk配置、文件配置、环境变量配置
if LOG_PATH:
logger.add(
LOG_PATH,
rotation="100 MB",
enqueue=True, # 异步
encoding="utf-8",
backtrace=True,
diagnose=True,
# level=_defaults.LOGURU_LEVEL,
# filter=_defaults.LOGURU_FILTER,
)
# 日志采样输出:按时间 按条数
def logger4sample(log, bins=10):
if uniform(0, bins) < 1:
logger.info(log)
# todo: 起个服务配置通用logger
def logger4wecom(title='这是一个标题', text='这是一条log', hook_url=None):
return Wecom(hook_url).send_markdown(title=str(title), content=str(text))
# todo:
# add zk/es/mongo/hdfs logger
# logger = logger.patch(lambda r: r.update(name=__file__))
logger_patch = lambda name: logger.patch(lambda r: r.update(name=name)) # main模块: 等价于 __name__=__file__
if __name__ == '__main__':
logger.info("xx")
# logger4feishu('', 'a\nb')
logger4wecom()
|
py | b4120a59f7d7ad5752508bb5930a4214e28b91ba | from flask import Flask
from flask import render_template
import socket
import random
import os
import argparse
app = Flask(__name__)
color_codes = {
"red": "#e74c3c",
"green": "#16a085",
"blue": "#2980b9",
"blue2": "#30336b",
"pink": "#be2edd",
"darkblue": "#130f40"
}
SUPPORTED_COLORS = ",".join(color_codes.keys())
# Get color from Environment variable
COLOR_FROM_ENV = os.environ.get('APP_COLOR')
# Generate a random color
COLOR = random.choice(["red", "green", "blue", "blue2", "darkblue", "pink"])
@app.route("/")
def main():
# return 'Hello'
return render_template('hello.html', name=socket.gethostname(), color=color_codes[COLOR])
if __name__ == "__main__":
print(" This is a sample web application that displays a colored background. \n"
" A color can be specified in two ways. \n"
"\n"
" 1. As a command line argument with --color as the argument. Accepts one of " + SUPPORTED_COLORS + " \n"
" 2. As an Environment variable APP_COLOR. Accepts one of " + SUPPORTED_COLORS + " \n"
" 3. If none of the above then a random color is picked from the above list. \n"
" Note: Command line argument precedes over environment variable.\n"
"\n"
"")
# Check for Command Line Parameters for color
parser = argparse.ArgumentParser()
parser.add_argument('--color', required=False)
args = parser.parse_args()
if args.color:
print("Color from command line argument =" + args.color)
COLOR = args.color
if COLOR_FROM_ENV:
print("A color was set through environment variable -" + COLOR_FROM_ENV + ". However, color from command line argument takes precendence.")
elif COLOR_FROM_ENV:
print("No Command line argument. Color from environment variable =" + COLOR_FROM_ENV)
COLOR = COLOR_FROM_ENV
else:
print("No command line argument or environment variable. Picking a Random Color =" + COLOR)
# Check if input color is a supported one
if COLOR not in color_codes:
print("Color not supported. Received '" + COLOR + "' expected one of " + SUPPORTED_COLORS)
exit(1)
# Run Flask Application
app.run(host="0.0.0.0", port=8080)
|
py | b4120a9f4e72b8c38ae88419b64bb357551ce831 | import pytest
from python_api.exceptions import RepositoryException
from python_api.models import User
from python_api.repositories import user_repository
from tests.factories import UserFactory
class TestUserRepository:
def test_it_finds_a_user_by_id(self):
user = UserFactory.create()
found_user = user_repository.find_by_id(user.id)
assert found_user.username == user.username
def test_it_raises_exception_for_user_not_found(self):
user = UserFactory.create(is_deleted=True)
with pytest.raises(RepositoryException):
user_repository.find_by_id(user.id)
def test_it_creates_a_user(self, database):
user = UserFactory.build()
user_repository.create(user)
created_user = database.query(User).get(user.id)
assert created_user.username == user.username
def test_it_updates_a_user(self, faker, database):
user = UserFactory.create()
new_username = faker.user_name()
user.username = new_username
user_repository.update(user)
updated_user = database.query(User).get(user.id)
assert updated_user.username == user.username
def test_it_deletes_a_user(self, database):
user = UserFactory.create()
user_repository.delete(user)
deleted_user = database.query(User).get(user.id)
assert deleted_user.is_deleted
assert deleted_user.deleted_on is not None
|
py | b4120be061af50110b720a0435287873308fa379 | import pandas as pd
import numpy as np
import math
from sklearn.datasets import load_digits, load_iris, load_boston, load_breast_cancer
from sklearn.model_selection import train_test_split
class Perceptron():
def __init__(self, w = None, b = None, termination_steps = 0):
self.w = w
self.b = b
self.termination_steps = termination_steps
self.prev_weights = []
self.prev_bias = []
def fit(self, X, y):
self.w = np.zeros((1, X.shape[1]))
self.b = 0
while(True):
# Update w, b by iterating over each row of X
misclassified_count = 0
for index in range(len(y)):
# Update when a data point gets misclassified
pred = np.sign(np.dot(self.w, X[index]) + self.b)[0]
if pred != y[index]:
misclassified_count += 1
self.w += X[index]*y[index]
self.b += y[index]
# Termination condition
if (misclassified_count == 0) or (misclassified_count >= 0.3*len(y) \
and self.termination_steps >= 1e5):
break
self.prev_weights.append(self.w.copy())
self.prev_bias.append(self.b.copy())
self.termination_steps += 1
def predict(self, X):
return np.sign(self.w @ X.T + self.b)[0]
def get_accuracy(self, y, y_hat):
return np.mean(y == y_hat)*100
# Load data
data = load_breast_cancer()
X, y = data.data, data.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1)
# Fit model
model = Perceptron()
model.fit(X_train, y_train)
# Predict
y_pred = model.predict(X_test)
# Get accuracy
score = model.get_accuracy(y_pred, y_test)
print("Model Score = ", str(score))
|
py | b4120d871381af6b1da9f9e2f993261a726f010d | import numpy as np
import matplotlib.pyplot as plt
from loadData import *
# SVM
def calculate_cost_gradient(Z, X, Y):
# Z: N x 1
# Y: N x 1
# X: N x d+1
# w: d+1 x 1
dW = np.zeros([X.shape[1],1])
for n in range(X.shape[0]):
yZ = Y[n] * Z[n] # 1 x 1
this_x = X[n] # 1 x d+1
dw = np.zeros([X.shape[1],1])
for i in range(X.shape[1]):
if yZ < 1:
dw[i][0] = - Y[n] * this_x[i]
else:
dw[i][0] = 0
dW += dw
dW = dW/X.shape[0] # average
# print(dW)
return dW
def predict(X, W, t = None):
# X_new: Nsample x (d+1)
# W: (d+1) x K
# t: N x 1
# Z: N x 1
Z = np.dot(X, W)
y = np.sign(Z)
loss = np.average(np.clip(1-Z*t, a_min=0, a_max=99999)) #hinge loss
correct = (y == t).sum()
acc = correct / X.shape[0]
return Z, loss, acc
def train(X_train, y_train, X_val, y_val):
N_train = X_train.shape[0]
N_val = X_val.shape[0]
# init weight
w = np.zeros([X_train.shape[1], 1])
# init return val
epoch_best = 0
acc_best = 0
W_best = None
track_loss_train = []
track_acc_val = []
for epoch in range(MaxIter):
loss_this_epoch = 0
for b in range(int(np.ceil(N_train/batch_size)) ):
X_batch = X_train[b*batch_size : (b+1)*batch_size]
y_batch = y_train[b*batch_size : (b+1)*batch_size]
Z, loss, _ = predict(X_batch, w, y_batch)
loss_this_epoch += loss
# calculate gradient
gradient = calculate_cost_gradient(Z, X_batch, y_batch) + decay * w
decay_alpha = np.power(0.96, epoch) * alpha # learning rate decay
w = w - decay_alpha * gradient
avg_loss_this_epoch = loss_this_epoch / (N_train/batch_size)
_, _, acc = predict(X_val, w, y_val)
print('epoch:', epoch)
print('avg loss this epoch:', avg_loss_this_epoch)
print('valid loss:',loss, '\nvalid accuracy:',acc)
track_loss_train.append(avg_loss_this_epoch)
track_acc_val.append(acc)
if acc > acc_best:
acc_best = acc
epoch_best = epoch
W_best = w
return epoch_best, acc_best, W_best, track_loss_train, track_acc_val
X_train, y_train, X_val, y_val, X_test, y_test = loadSpamData(True)
print(X_train.shape, y_train.shape, X_val.shape, y_val.shape, X_test.shape, y_test.shape)
N_class = 2
alpha = 0.0008 # learning rate
batch_size = 10 # batch size
MaxIter = 200 # Maximum iteration
decay = 0.01 # weight decay
epoch_best, acc_best, W_best, track_loss_train, track_acc_val = train(X_train, y_train, X_val, y_val)
_, _, acc_test = predict(X_test, W_best, y_test)
print('\nAt epoch {}.\nvalidation accuracy: {:.2f}%.\ntest accuracy: {:.2f}%'.format(epoch_best, acc_best*100, acc_test*100))
plt.figure(1)
plt.plot(track_loss_train)
plt.xlabel("epochs")
plt.ylabel("training loss")
plt.figure(2)
plt.plot(track_acc_val)
plt.xlabel("epochs")
plt.ylabel("validation acc")
plt.show() |
py | b4120e4b32849b5739641da07e32d1c8eceea3c9 | #!usr/bin/python
# -*- coding:utf8 -*-
"""
翻转字符数组 https://leetcode.com/problems/reverse-string/
s是一个字符串列表
1. s.reverse
"""
class Solution:
def reverseString(self, s):
"""
Do not return anything, modify s in-place instead.
"""
beg = 0
end = len(s) - 1
while beg < end:
s[beg], s[end] = s[end], s[beg]
beg += 1
end -= 1
|
py | b4120e59da0eb5520f5b8c470ecf4064f6d5297f | # =============================================================================
# MIT License
#
# Copyright (c) 2018 chuanqi305
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
import sys
import cv2
import numpy as np
import caffe
from caffe.proto.caffe_pb2 import NetParameter, LayerParameter
import google.protobuf.text_format as txtf
def transform_input(img, transpose=True, dtype=np.float32):
"""Return transformed input image 'img' for CNN input
transpose: if True, channels in dim 0, else channels in dim 2
dtype: type of returned array (sometimes important)
"""
inpt = cv2.resize(img, (300,300))
inpt = inpt - 127.5
inpt = inpt / 127.5
inpt = inpt.astype(dtype)
if transpose:
inpt = inpt.transpose((2, 0, 1))
return inpt
def get_masks(net, percentile=50):
"""Returns dict layer_name:channels_mask for convolutions.
100%-percentile% channels are selected by maximum response (in blobs).
net: caffe.Net network
"""
bnames = [e for e in net.blobs.keys() if ('data' not in e) and ('split' not in e)
and ('mbox' not in e) and ('detection' not in e)]
blobmask = {}
prev = None
for b in bnames:
blob = net.blobs[b].data
mean = blob.mean(axis=(0,2,3))
perc = np.percentile(mean, percentile)
mask = mean>perc
blobmask[b] = mask
if ('dw' in b) and (prev is not None):
blobmask[prev] = mask
prev = b
return blobmask
def resize_network(netdef, name2num, verbose=True):
"""Change number of channels in convolutions
netdef: network params
name2num: maps from channel name to new number of channels
verbose: if True, display changes
"""
new_layers = []
for l in netdef.layer:
newl = LayerParameter()
newl.CopyFrom(l)
if (l.name in name2num):
if (l.type == 'Convolution'):
if verbose:
print(l.name+': \t'+
'Changing num_output from '+str(l.convolution_param.num_output)+' to '+str(name2num[l.name]))
newl.convolution_param.num_output = name2num[l.name]
if newl.convolution_param.group > 1:
newl.convolution_param.group = name2num[l.name]
else:
if verbose:
print('Layer '+l.name+' is not convolution, skipping')
new_layers.append(newl)
new_pnet = NetParameter()
new_pnet.CopyFrom(netdef)
del(new_pnet.layer[:])
new_pnet.layer.extend(new_layers)
return new_pnet
def set_params(model, newmodel, newnetdef, blob2mask):
"""Copy parameters from bigger network to smaller (with pruned channel).
model: initial model (bigger)
newmodel: pruned model (smaller)
newnetdef: pruned model parameters
blob2mask: maps blob name to channel mask
"""
l2bot = {l.name:l.bottom for l in newnetdef.layer}
l2top = {l.name:l.top for l in newnetdef.layer}
l2group = {l.name:l.convolution_param.group for l in newnetdef.layer}
for name in model.params.keys():
#if 'mbox' in name:
if ('perm' in name) or ('flat' in name) or ('priorbox' in name):
continue
top = l2top[name][0]
bot = l2bot[name][0]
topmask = blob2mask[top] if top in blob2mask else None
botmask = blob2mask[bot] if bot in blob2mask else None
conv = model.params[name][0].data
bias = model.params[name][1].data
if (topmask is not None) and (botmask is not None):
print('Setting parameters for layer '+name)
if topmask is not None:
conv = conv[topmask,:,:,:]
bias = bias[topmask]
if (botmask is not None) and (l2group[name]==1):
conv = conv[:,botmask,:,:]
newmodel.params[name][0].data[...] = conv
if name+'/scale' in newmodel.params:
newmodel.params[name+'/scale'][1].data[...] = bias
else:
newmodel.params[name][1].data[...] = bias
if __name__ == "__main__":
#get percents of pruned channels
percentile = int(sys.argv[1])
#Task-specific: mask only classes 'background' and 'person'
class_labels = ('background', 'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant', 'sheep',
'sofa', 'train', 'tvmonitor')
class_mask = [((e=='background') or (e=='person')) for e in class_labels]
n_old_classes = 21
n_coord = 4
#create masks for coordinates and confidence layers
#numbers are old/new numbers of boxes in those layers
mboxes = {
"conv11_mbox_loc" : np.array([True,]*n_coord*2 + [False,]*n_coord*(3-2)),
"conv13_mbox_loc" : np.array([True,]*n_coord*2 + [False,]*n_coord*(6-2)),
"conv14_2_mbox_loc" : np.array([True,]*n_coord*2 + [False,]*n_coord*(6-2)),
"conv15_2_mbox_loc" : np.array([True,]*n_coord*2 + [False,]*n_coord*(6-2)),
"conv16_2_mbox_loc" : np.array([True,]*n_coord*2 + [False,]*n_coord*(6-2)),
"conv17_2_mbox_loc" : np.array([True,]*n_coord*2 + [False,]*n_coord*(6-2)),
"conv11_mbox_conf" : np.array(class_mask*2 + [False,]*n_old_classes*(3-2)),
"conv13_mbox_conf" : np.array(class_mask*2 + [False,]*n_old_classes*(6-2)),
"conv14_2_mbox_conf" : np.array(class_mask*2 + [False,]*n_old_classes*(6-2)),
"conv15_2_mbox_conf" : np.array(class_mask*2 + [False,]*n_old_classes*(6-2)),
"conv16_2_mbox_conf" : np.array(class_mask*2 + [False,]*n_old_classes*(6-2)),
"conv17_2_mbox_conf" : np.array(class_mask*2 + [False,]*n_old_classes*(6-2))
}
#reference network (bigger)
ref_net = caffe.Net('models/ssd_voc/deploy.prototxt',
'models/ssd_voc/MobileNetSSD_deploy.caffemodel',
caffe.TEST)
#reference network parameters
with open('models/ssd_voc/deploy.prototxt', 'r') as f:
ref_par = NetParameter()
txtf.Merge(f.read(), ref_par)
#new network parameters: train,test,deploy
with open('models/ssd_face/ssd_face_train.prototxt', 'r') as f:
train_par = NetParameter()
txtf.Merge(f.read(), train_par)
with open('models/ssd_face/ssd_face_test.prototxt', 'r') as f:
test_par = NetParameter()
txtf.Merge(f.read(), test_par)
with open('models/ssd_face/ssd_face_deploy.prototxt', 'r') as f:
dep_par = NetParameter()
txtf.Merge(f.read(), dep_par)
#get faces collage and compute layer responses
faces = cv2.imread('images/faces.png')
inpt = transform_input(faces)
ref_net.blobs['data'].data[...] = inpt
output = ref_net.forward()
#get masks for regular convolutions
blobmask = get_masks(ref_net, percentile)
#get masks for coordinate|confidence convolutions
blobmask.update(mboxes)
#resize networks
sizes = {k:sum(v) for k,v in blobmask.items()}
train_par = resize_network(train_par, sizes)
test_par = resize_network(test_par, sizes, verbose=False)
dep_par = resize_network(dep_par, sizes, verbose=False)
#write new parameters
with open('models/ssd_face_pruned/face_train.prototxt', 'w') as f:
f.write(txtf.MessageToString(train_par))
with open('models/ssd_face_pruned/face_test.prototxt', 'w') as f:
f.write(txtf.MessageToString(test_par))
with open('models/ssd_face_pruned/face_deploy.prototxt', 'w') as f:
f.write(txtf.MessageToString(dep_par))
#load pruned net with empty parameters
new_net = caffe.Net('models/ssd_face_pruned/face_train.prototxt',
'models/empty.caffemodel', caffe.TRAIN)
#copy masked parameters to pruned net
set_params(ref_net, new_net, train_par, blobmask)
#save pruned net parameters
new_net.save('models/ssd_face_pruned/face_init.caffemodel')
|
py | b41210bf8f7d82443f61944a3a8d2b69332533ed | import sys
import copy
import time
class Debugger:
"""
Debugger class.
Receives a function object and function arguments in a list, runs the function while tracing it and produces results.
"""
def __init__(self, func, func_args, cmd_args):
self.func = func
self.func_name = func.__name__
self.func_args = func_args
self.cmd_args = cmd_args
self.curr_line = None
self.prev_variables = {}
self.variable_history = {}
self.line_history = {}
self.prev_time = time.time()
self.step = 1
self.results = {"code_info": {"function_name": self.func_name, "function_args": self.func_args, "cmd_args": self.cmd_args}, "execution_log": [], "variable_history": [], "line_history": []}
def run(self):
"""
Runs the function, and traces it.
:return: Analyzed tracing results.
"""
sys.settrace(self.__trace_calls)
self.prev_time = time.time()
sys.argv = self.cmd_args
self.results["returned_value"] = self.func(*self.func_args)
sys.settrace(None)
self.results["variable_history"] = [var_obj.get_dict() for var_obj in self.variable_history.values()]
self.results["line_history"] = [line_obj.get_dict() for line_obj in self.line_history.values()]
return self.results
def __trace_calls(self, frame, event, arg):
"""Trace function, used in sys.settrace."""
self.curr_line = frame.f_lineno
if frame.f_code.co_name == self.func_name:
return self.__trace_lines
def __trace_lines(self, frame, event, arg):
"""Runs every line executed in the traced function, and analyzes the changes in variables."""
curr_execution_log = {"step": self.step, "timestamp": time.time(), "line_num": self.curr_line, "actions": []}
self.results["execution_log"].append(curr_execution_log)
if self.curr_line not in self.line_history:
self.line_history[self.curr_line] = Line(self.curr_line)
self.line_history[self.curr_line].run_line(time.time() - self.prev_time)
curr_execution_log["line_runtime"] = self.line_history[self.curr_line].get_dict()
self.is_first_print_for_this_line = True
current_variables = frame.f_locals
for var, val in current_variables.items():
if var not in self.prev_variables:
curr_execution_log["actions"].append({"action": "init_var", "var": var, "val": val})
self.variable_history[var] = Variable(var, self.curr_line, self.step, copy.deepcopy(val))
elif self.prev_variables[var] != val:
prev_val = self.prev_variables[var]
if isinstance(prev_val, list) and isinstance(val, list):
self.__compare_lists(var, prev_val, val)
elif isinstance(prev_val, dict) and isinstance(val, dict):
self.__compare_dictionaries(var, prev_val, val)
else:
curr_execution_log["actions"].append({"action": "change_var", "var": var, "prev_val": prev_val, "new_val": val})
self.variable_history[var].add_value(self.step, self.curr_line, copy.deepcopy(val))
self.prev_variables = copy.deepcopy(current_variables)
self.prev_time = time.time()
self.curr_line = frame.f_lineno
self.step += 1
def __compare_lists(self, var, prev_val, val):
"""Utility function that compares two lists, and adds the changes to the execution log."""
curr_execution_log = self.results["execution_log"][-1]
for i in range(min(len(val), len(prev_val))):
if val[i] != prev_val[i]:
curr_execution_log["actions"].append({"action": "list_change", "var": var, "index": i, "prev_val": prev_val[i], "new_val": val[i]})
if len(val) > len(prev_val):
for i in range(len(prev_val), len(val)):
curr_execution_log["actions"].append({"action": "list_add", "var": var, "index": i, "val": val[i]})
if len(val) < len(prev_val):
for i in range(len(val), len(prev_val)):
curr_execution_log["actions"].append({"action": "list_remove", "var": var, "index": i})
def __compare_dictionaries(self, var, prev_val, val):
"""Utility function that compares two dictionaries, and adds the changes to the execution log."""
curr_execution_log = self.results["execution_log"][-1]
for elem in val:
if elem not in prev_val:
curr_execution_log["actions"].append({"action": "dict_add", "var": var, "key": elem, "val": val[elem]})
elif prev_val[elem] != val[elem]:
curr_execution_log["actions"].append({"action": "dict_change", "var": var, "key": elem, "prev_val": prev_val[elem], "new_val": val[elem]})
for elem in prev_val:
if elem not in val:
curr_execution_log["actions"].append({"action": "dict_remove", "var": var, "key": elem})
class Variable:
"""
Represents a variable, used in the Debugger class.
Stores variable name, and log of values by line and step number.
"""
def __init__(self, name, init_line, init_step, init_val):
self.name = name
self.line_value = []
self.add_value(init_step, init_line, init_val)
def add_value(self, step, line, value):
"""
Add a value to the variable's log.
:param int step: Step number this value was set in while running the program.
:param int line: Line number this value was set in while running the program.
:param value: Value of this variable in the corresponding step and line.
"""
self.line_value.append({"step": step, "line": line, "value": value})
def get_type(self):
"""
Returns the variable's type, if it stayed constant throughout its lifetime. Otherwise, "undefined".
"""
init_val = self.line_value[0]["value"]
t = type(init_val)
for lv in self.line_value:
if type(lv["value"]) != t:
return "undefined" # Undefined type - changed during execution
return t
def get_range(self):
"""
Returns the variable's range of values while running the program, if its type is int or float. Otherwise, None.
"""
if self.get_type() in [int, float]:
values = [lv["value"] for lv in self.line_value]
return [min(values), max(values)]
def get_dict(self):
"""
Returns a dictionary representation of the variable, to store for use by reporters.
"""
return {"var": self.name, "type": str(self.get_type()), "range": self.get_range(), "val_history": self.line_value}
class Line:
"""
Represents a line, used in the Debugger class.
Stores line number, number of times the line was executed and total time spent running the line.
"""
def __init__(self, line_num):
self.line_num = line_num
self.times_executed = 0
self.total_time = 0
def run_line(self, time):
"""
Stores an execution of the line, and updates the relevant variables.
:param float time: Time in seconds the line took to execute.
"""
self.times_executed += 1
self.total_time += time
def get_dict(self):
"""
Returns a dictionary representation of the line, to store for use by reporters.
"""
return {"line_num": self.line_num, "times_executed": self.times_executed, "total_time": self.total_time}
|
py | b412124d774bb120562037c2ed3e91483a60b182 | # -*- encoding:utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys, textwrap
from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc
from numpydoc.docscrape_sphinx import SphinxDocString, SphinxClassDoc
from nose.tools import *
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
doc_txt = '''\
numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError
Some error
Warns
-----
RuntimeWarning
Some warning
Warnings
--------
Certain warnings apply.
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
See Also
--------
some, other, funcs
otherfunc : relationship
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss
'''
doc = NumpyDocString(doc_txt)
def test_signature():
assert doc['Signature'].startswith('numpy.multivariate_normal(')
assert doc['Signature'].endswith('spam=None)')
def test_summary():
assert doc['Summary'][0].startswith('Draw values')
assert doc['Summary'][-1].endswith('covariance.')
def test_extended_summary():
assert doc['Extended Summary'][0].startswith('The multivariate normal')
def test_parameters():
assert_equal(len(doc['Parameters']), 3)
assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape'])
arg, arg_type, desc = doc['Parameters'][1]
assert_equal(arg_type, '(N, N) ndarray')
assert desc[0].startswith('Covariance matrix')
assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3'
def test_other_parameters():
assert_equal(len(doc['Other Parameters']), 1)
assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam'])
arg, arg_type, desc = doc['Other Parameters'][0]
assert_equal(arg_type, 'parrot')
assert desc[0].startswith('A parrot off its mortal coil')
def test_returns():
assert_equal(len(doc['Returns']), 2)
arg, arg_type, desc = doc['Returns'][0]
assert_equal(arg, 'out')
assert_equal(arg_type, 'ndarray')
assert desc[0].startswith('The drawn samples')
assert desc[-1].endswith('distribution.')
arg, arg_type, desc = doc['Returns'][1]
assert_equal(arg, 'list of str')
assert_equal(arg_type, '')
assert desc[0].startswith('This is not a real')
assert desc[-1].endswith('anonymous return values.')
def test_notes():
assert doc['Notes'][0].startswith('Instead')
assert doc['Notes'][-1].endswith('definite.')
assert_equal(len(doc['Notes']), 17)
def test_references():
assert doc['References'][0].startswith('..')
assert doc['References'][-1].endswith('2001.')
def test_examples():
assert doc['Examples'][0].startswith('>>>')
assert doc['Examples'][-1].endswith('True]')
def test_index():
assert_equal(doc['index']['default'], 'random')
assert_equal(len(doc['index']), 2)
assert_equal(len(doc['index']['refguide']), 2)
def non_blank_line_by_line_compare(a,b):
a = textwrap.dedent(a)
b = textwrap.dedent(b)
a = [l.rstrip() for l in a.split('\n') if l.strip()]
b = [l.rstrip() for l in b.split('\n') if l.strip()]
for n,line in enumerate(a):
if not line == b[n]:
raise AssertionError("Lines %s of a and b differ: "
"\n>>> %s\n<<< %s\n" %
(n,line,b[n]))
def test_str():
non_blank_line_by_line_compare(str(doc),
"""numpy.multivariate_normal(mean, cov, shape=None, spam=None)
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
Parameters
----------
mean : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
cov : (N, N) ndarray
Covariance matrix of the distribution.
shape : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
Returns
-------
out : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
Other Parameters
----------------
spam : parrot
A parrot off its mortal coil.
Raises
------
RuntimeError
Some error
Warns
-----
RuntimeWarning
Some warning
Warnings
--------
Certain warnings apply.
See Also
--------
`some`_, `other`_, `funcs`_
`otherfunc`_
relationship
Notes
-----
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
References
----------
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
Examples
--------
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
.. index:: random
:refguide: random;distributions, random;gauss""")
def test_sphinx_str():
sphinx_doc = SphinxDocString(doc_txt)
non_blank_line_by_line_compare(str(sphinx_doc),
"""
.. index:: random
single: random;distributions, random;gauss
Draw values from a multivariate normal distribution with specified
mean and covariance.
The multivariate normal or Gaussian distribution is a generalisation
of the one-dimensional normal distribution to higher dimensions.
:Parameters:
**mean** : (N,) ndarray
Mean of the N-dimensional distribution.
.. math::
(1+2+3)/3
**cov** : (N, N) ndarray
Covariance matrix of the distribution.
**shape** : tuple of ints
Given a shape of, for example, (m,n,k), m*n*k samples are
generated, and packed in an m-by-n-by-k arrangement. Because
each sample is N-dimensional, the output shape is (m,n,k,N).
:Returns:
**out** : ndarray
The drawn samples, arranged according to `shape`. If the
shape given is (m,n,...), then the shape of `out` is
(m,n,...,N).
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
list of str
This is not a real return value. It exists to test
anonymous return values.
:Other Parameters:
**spam** : parrot
A parrot off its mortal coil.
:Raises:
**RuntimeError**
Some error
:Warns:
**RuntimeWarning**
Some warning
.. warning::
Certain warnings apply.
.. seealso::
:obj:`some`, :obj:`other`, :obj:`funcs`
:obj:`otherfunc`
relationship
.. rubric:: Notes
Instead of specifying the full covariance matrix, popular
approximations include:
- Spherical covariance (`cov` is a multiple of the identity matrix)
- Diagonal covariance (`cov` has non-negative elements only on the diagonal)
This geometrical property can be seen in two dimensions by plotting
generated data-points:
>>> mean = [0,0]
>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis
>>> x,y = multivariate_normal(mean,cov,5000).T
>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show()
Note that the covariance matrix must be symmetric and non-negative
definite.
.. rubric:: References
.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic
Processes," 3rd ed., McGraw-Hill Companies, 1991
.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification,"
2nd ed., Wiley, 2001.
.. only:: latex
[1]_, [2]_
.. rubric:: Examples
>>> mean = (1,2)
>>> cov = [[1,0],[1,0]]
>>> x = multivariate_normal(mean,cov,(3,3))
>>> print x.shape
(3, 3, 2)
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
>>> print list( (x[0,0,:] - mean) < 0.6 )
[True, True]
""")
doc2 = NumpyDocString("""
Returns array of indices of the maximum values of along the given axis.
Parameters
----------
a : {array_like}
Array to look in.
axis : {None, integer}
If None, the index is into the flattened array, otherwise along
the specified axis""")
def test_parameters_without_extended_description():
assert_equal(len(doc2['Parameters']), 2)
doc3 = NumpyDocString("""
my_signature(*params, **kwds)
Return this and that.
""")
def test_escape_stars():
signature = str(doc3).split('\n')[0]
assert_equal(signature, 'my_signature(\*params, \*\*kwds)')
doc4 = NumpyDocString(
"""a.conj()
Return an array with all complex-valued elements conjugated.""")
def test_empty_extended_summary():
assert_equal(doc4['Extended Summary'], [])
doc5 = NumpyDocString(
"""
a.something()
Raises
------
LinAlgException
If array is singular.
Warns
-----
SomeWarning
If needed
""")
def test_raises():
assert_equal(len(doc5['Raises']), 1)
name,_,desc = doc5['Raises'][0]
assert_equal(name,'LinAlgException')
assert_equal(desc,['If array is singular.'])
def test_warns():
assert_equal(len(doc5['Warns']), 1)
name,_,desc = doc5['Warns'][0]
assert_equal(name,'SomeWarning')
assert_equal(desc,['If needed'])
def test_see_also():
doc6 = NumpyDocString(
"""
z(x,theta)
See Also
--------
func_a, func_b, func_c
func_d : some equivalent func
foo.func_e : some other func over
multiple lines
func_f, func_g, :meth:`func_h`, func_j,
func_k
:obj:`baz.obj_q`
:class:`class_j`: fubar
foobar
""")
assert len(doc6['See Also']) == 12
for func, desc, role in doc6['See Also']:
if func in ('func_a', 'func_b', 'func_c', 'func_f',
'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'):
assert(not desc)
else:
assert(desc)
if func == 'func_h':
assert role == 'meth'
elif func == 'baz.obj_q':
assert role == 'obj'
elif func == 'class_j':
assert role == 'class'
else:
assert role is None
if func == 'func_d':
assert desc == ['some equivalent func']
elif func == 'foo.func_e':
assert desc == ['some other func over', 'multiple lines']
elif func == 'class_j':
assert desc == ['fubar', 'foobar']
def test_see_also_print():
class Dummy(object):
"""
See Also
--------
func_a, func_b
func_c : some relationship
goes here
func_d
"""
pass
obj = Dummy()
s = str(FunctionDoc(obj, role='func'))
assert(':func:`func_a`, :func:`func_b`' in s)
assert(' some relationship' in s)
assert(':func:`func_d`' in s)
doc7 = NumpyDocString("""
Doc starts on second line.
""")
def test_empty_first_line():
assert doc7['Summary'][0].startswith('Doc starts')
def test_no_summary():
str(SphinxDocString("""
Parameters
----------"""))
def test_unicode():
doc = SphinxDocString("""
öäöäöäöäöåååå
öäöäöäööäååå
Parameters
----------
ååå : äää
ööö
Returns
-------
ååå : ööö
äää
""")
assert isinstance(doc['Summary'][0], str)
assert doc['Summary'][0] == 'öäöäöäöäöåååå'
def test_plot_examples():
cfg = dict(use_plots=True)
doc = SphinxDocString("""
Examples
--------
>>> import matplotlib.pyplot as plt
>>> plt.plot([1,2,3],[4,5,6])
>>> plt.show()
""", config=cfg)
assert 'plot::' in str(doc), str(doc)
doc = SphinxDocString("""
Examples
--------
.. plot::
import matplotlib.pyplot as plt
plt.plot([1,2,3],[4,5,6])
plt.show()
""", config=cfg)
assert str(doc).count('plot::') == 1, str(doc)
def test_class_members():
class Dummy(object):
"""
Dummy class.
"""
def spam(self, a, b):
"""Spam\n\nSpam spam."""
pass
def ham(self, c, d):
"""Cheese\n\nNo cheese."""
pass
@property
def spammity(self):
"""Spammity index"""
return 0.95
class Ignorable(object):
"""local class, to be ignored"""
pass
for cls in (ClassDoc, SphinxClassDoc):
doc = cls(Dummy, config=dict(show_class_members=False))
assert 'Methods' not in str(doc), (cls, str(doc))
assert 'spam' not in str(doc), (cls, str(doc))
assert 'ham' not in str(doc), (cls, str(doc))
assert 'spammity' not in str(doc), (cls, str(doc))
assert 'Spammity index' not in str(doc), (cls, str(doc))
doc = cls(Dummy, config=dict(show_class_members=True))
assert 'Methods' in str(doc), (cls, str(doc))
assert 'spam' in str(doc), (cls, str(doc))
assert 'ham' in str(doc), (cls, str(doc))
assert 'spammity' in str(doc), (cls, str(doc))
if cls is SphinxClassDoc:
assert '.. autosummary::' in str(doc), str(doc)
else:
assert 'Spammity index' in str(doc), str(doc)
def test_duplicate_signature():
# Duplicate function signatures occur e.g. in ufuncs, when the
# automatic mechanism adds one, and a more detailed comes from the
# docstring itself.
doc = NumpyDocString(
"""
z(x1, x2)
z(a, theta)
""")
assert doc['Signature'].strip() == 'z(a, theta)'
class_doc_txt = """
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Methods
-------
a
b
c
Examples
--------
For usage examples, see `ode`.
"""
def test_class_members_doc():
doc = ClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
Parameters
----------
f : callable ``f(t, y, *f_args)``
Aaa.
jac : callable ``jac(t, y, *jac_args)``
Bbb.
Examples
--------
For usage examples, see `ode`.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Methods
-------
a
b
c
.. index::
""")
def test_class_members_doc_sphinx():
doc = SphinxClassDoc(None, class_doc_txt)
non_blank_line_by_line_compare(str(doc),
"""
Foo
:Parameters:
**f** : callable ``f(t, y, *f_args)``
Aaa.
**jac** : callable ``jac(t, y, *jac_args)``
Bbb.
.. rubric:: Examples
For usage examples, see `ode`.
.. rubric:: Attributes
=== ==========
t (float) Current time.
y (ndarray) Current variable values.
=== ==========
.. rubric:: Methods
=== ==========
a
b
c
=== ==========
""")
if __name__ == "__main__":
import nose
nose.run()
|
py | b412140a6c29dff6c05cceeebf996291deac48d3 | import connexion
import six
from swagger_server.models.model_flow_chart import ModelFlowChart # noqa: E501
from swagger_server.models.model_flow_chart_meta import ModelFlowChartMeta # noqa: E501
from swagger_server import util
def model_flow_model_flow_id_delete(model_flow_id): # noqa: E501
"""delete a model flow chart
delete a model flow chart # noqa: E501
:param model_flow_id: model flow chart id
:type model_flow_id: str
:rtype: object
"""
return 'do some magic!'
def model_flow_model_flow_id_get(model_flow_id): # noqa: E501
"""retrieve a model flow chart
retrieve a model flow chart # noqa: E501
:param model_flow_id: model flow chart id
:type model_flow_id: str
:rtype: List[ModelFlowChartMeta]
"""
return 'do some magic!'
def model_flow_model_flow_id_post(model_flow_id, model_flow_chart): # noqa: E501
"""create a model flow chart
create a model flow chart # noqa: E501
:param model_flow_id: model flow chart id
:type model_flow_id: str
:param model_flow_chart: model flow chart
:type model_flow_chart: dict | bytes
:rtype: object
"""
if connexion.request.is_json:
model_flow_chart = .from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
def model_flow_model_flow_id_put(model_flow_id, model_flow_chart): # noqa: E501
"""update a model flow chart
update a model flow chart # noqa: E501
:param model_flow_id: model flow chart id
:type model_flow_id: str
:param model_flow_chart: model flow chart
:type model_flow_chart: dict | bytes
:rtype: object
"""
if connexion.request.is_json:
model_flow_chart = .from_dict(connexion.request.get_json()) # noqa: E501
return 'do some magic!'
|
py | b412159734a6bb142e0c09e5fa0936b22f37921c | # ------------------------------------------------------------------------------
# Transform a multi-channeled network output into a prediction, and similar
# helper functions.
# ------------------------------------------------------------------------------
import torch
def arg_max(output, channel_dim=1):
r"""Select the class with highest probability."""
return torch.argmax(output, dim=channel_dim)
def softmax(output, channel_dim=1):
r"""Softmax outputs so that the vlues add up to 1."""
f = torch.nn.Softmax(dim=channel_dim)
return f(output)
|
py | b41216391f034859b00e440ddd1d7697cffdfbdc | # Copyright 2019-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__author__ = "The klio developers"
__version__ = "0.2.1"
__email__ = "[email protected]"
__description__ = "Core klio library for common functionality"
__uri__ = "https://github.com/spotify/klio"
|
py | b4121656727f53581af564df2f3793caa70a48da | #!/usr/bin/env python
# n-step Advantage Actor-Critic Agent (A2C) | Praveen Palanisamy
# Chapter 8, Hands-on Intelligent Agents with OpenAI Gym, 2018
from argparse import ArgumentParser
from datetime import datetime
from collections import namedtuple
import numpy as np
import torch
from torch.distributions.multivariate_normal import MultivariateNormal
from torch.distributions.categorical import Categorical
import torch.multiprocessing as mp
import torch.nn.functional as F
import gym
try:
import roboschool
except ImportError:
pass
from tensorboardX import SummaryWriter
from utils.params_manager import ParamsManager
from function_approximator.shallow import Actor as ShallowActor
from function_approximator.shallow import DiscreteActor as ShallowDiscreteActor
from function_approximator.shallow import Critic as ShallowCritic
from function_approximator.deep import Actor as DeepActor
from function_approximator.deep import DiscreteActor as DeepDiscreteActor
from function_approximator.deep import Critic as DeepCritic
from environment import carla_gym
import environment.atari as Atari
parser = ArgumentParser("deep_ac_agent")
parser.add_argument("--env", help="Name of the Gym environment",
default="Pendulum-v0", metavar="ENV_ID")
parser.add_argument("--params-file", default="a2c_parameters.json",
help="Path to parameters file.Default=a2c_parameters.json",
metavar="a2c_parameters.json")
parser.add_argument("--model-dir", default="trained_models/", metavar="MODEL_DIR",
help="Directory to save/load trained model. Default= ./trained_models/")
parser.add_argument("--render", action='store_true', default=False,
help="Whether to render the environment to the display. Default=False")
parser.add_argument("--test", action='store_true', default=False,
help="Tests a saved Agent model to see the performance. Disables learning")
parser.add_argument("--gpu-id", type=int, default=0, metavar="GPU_ID",
help="GPU device ID to use. Default:0")
args = parser.parse_args()
global_step_num = 0
params_manager= ParamsManager(args.params_file)
summary_file_path_prefix = params_manager.get_agent_params()['summary_file_path_prefix']
summary_file_path= summary_file_path_prefix + args.env + "_" + datetime.now().strftime("%y-%m-%d-%H-%M")
writer = SummaryWriter(summary_file_path)
# Export the parameters as json files to the log directory to keep track of the parameters used in each experiment
params_manager.export_env_params(summary_file_path + "/" + "env_params.json")
params_manager.export_agent_params(summary_file_path + "/" + "agent_params.json")
use_cuda = params_manager.get_agent_params()['use_cuda']
# Introduced in PyTorch 0.4
device = torch.device("cuda:" + str(args.gpu_id) if torch.cuda.is_available() and use_cuda else "cpu")
seed = params_manager.get_agent_params()['seed'] # With the intent to make the results reproducible
torch.manual_seed(seed)
np.random.seed(seed)
if torch.cuda.is_available() and use_cuda:
torch.cuda.manual_seed_all(seed)
Transition = namedtuple("Transition", ["s", "value_s", "a", "log_prob_a"])
class DeepActorCriticAgent(mp.Process):
def __init__(self, id, env_name, agent_params, env_params):
"""
An Advantage Actor-Critic Agent that uses a Deep Neural Network to represent it's Policy and the Value function
:param id: An integer ID to identify the agent in case there are multiple agent instances
:param env_name: Name/ID of the environment
:param agent_params: Parameters to be used by the agent
"""
super(DeepActorCriticAgent, self).__init__()
self.id = id
self.actor_name = "actor" + str(self.id)
self.env_name = env_name
self.params = agent_params
self.env_conf = env_params
self.policy = self.multi_variate_gaussian_policy
self.gamma = self.params['gamma']
self.trajectory = [] # Contains the trajectory of the agent as a sequence of Transitions
self.rewards = [] # Contains the rewards obtained from the env at every step
self.global_step_num = 0
self.best_mean_reward = - float("inf") # Agent's personal best mean episode reward
self.best_reward = - float("inf")
self.saved_params = False # Whether or not the params have been saved along with the model to model_dir
self.continuous_action_space = True #Assumption by default unless env.action_space is Discrete
def multi_variate_gaussian_policy(self, obs):
"""
Calculates a multi-variate gaussian distribution over actions given observations
:param obs: Agent's observation
:return: policy, a distribution over actions for the given observation
"""
mu, sigma = self.actor(obs)
value = self.critic(obs)
[ mu[:, i].clamp_(float(self.env.action_space.low[i]), float(self.env.action_space.high[i]))
for i in range(self.action_shape)] # Clamp each dim of mu based on the (low,high) limits of that action dim
sigma = torch.nn.Softplus()(sigma).squeeze() + 1e-7 # Let sigma be (smoothly) +ve
self.mu = mu.to(torch.device("cpu"))
self.sigma = sigma.to(torch.device("cpu"))
self.value = value.to(torch.device("cpu"))
if len(self.mu.shape) == 0: # See if mu is a scalar
#self.mu = self.mu.unsqueeze(0) # This prevents MultivariateNormal from crashing with SIGFPE
self.mu.unsqueeze_(0)
self.action_distribution = MultivariateNormal(self.mu, torch.eye(self.action_shape) * self.sigma, validate_args=True)
return self.action_distribution
def discrete_policy(self, obs):
"""
Calculates a discrete/categorical distribution over actions given observations
:param obs: Agent's observation
:return: policy, a distribution over actions for the given observation
"""
logits = self.actor(obs)
value = self.critic(obs)
self.logits = logits.to(torch.device("cpu"))
self.value = value.to(torch.device("cpu"))
self.action_distribution = Categorical(logits=self.logits)
return self.action_distribution
def preproc_obs(self, obs):
obs = np.array(obs) # Obs could be lazy frames. So, force fetch before moving forward
if len(obs.shape) == 3:
# Reshape obs from (H x W x C) order to this order: C x W x H and resize to (C x 84 x 84)
obs = np.reshape(obs, (obs.shape[2], obs.shape[1], obs.shape[0]))
obs = np.resize(obs, (obs.shape[0], 84, 84))
# Convert to torch Tensor, add a batch dimension, convert to float repr
obs = torch.from_numpy(obs).unsqueeze(0).float()
return obs
def process_action(self, action):
if self.continuous_action_space:
[action[:, i].clamp_(float(self.env.action_space.low[i]), float(self.env.action_space.high[i]))
for i in range(self.action_shape)] # Limit the action to lie between the (low, high) limits of the env
action = action.to(torch.device("cpu"))
return action.numpy().squeeze(0) # Convert to numpy ndarray, squeeze and remove the batch dimension
def get_action(self, obs):
obs = self.preproc_obs(obs)
action_distribution = self.policy(obs) # Call to self.policy(obs) also populates self.value with V(obs)
value = self.value
action = action_distribution.sample()
log_prob_a = action_distribution.log_prob(action)
action = self.process_action(action)
# Store the n-step trajectory while training. Skip storing the trajectories in test mode
if not self.params["test"]:
self.trajectory.append(Transition(obs, value, action, log_prob_a)) # Construct the trajectory
return action
def calculate_n_step_return(self, n_step_rewards, final_state, done, gamma):
"""
Calculates the n-step return for each state in the input-trajectory/n_step_transitions
:param n_step_rewards: List of rewards for each step
:param final_state: Final state in this n_step_transition/trajectory
:param done: True rf the final state is a terminal state if not, False
:return: The n-step return for each state in the n_step_transitions
"""
g_t_n_s = list()
with torch.no_grad():
g_t_n = torch.tensor([[0]]).float() if done else self.critic(self.preproc_obs(final_state)).cpu()
for r_t in n_step_rewards[::-1]: # Reverse order; From r_tpn to r_t
g_t_n = torch.tensor(r_t).float() + self.gamma * g_t_n
g_t_n_s.insert(0, g_t_n) # n-step returns inserted to the left to maintain correct index order
return g_t_n_s
def calculate_loss(self, trajectory, td_targets):
"""
Calculates the critic and actor losses using the td_targets and self.trajectory
:param td_targets:
:return:
"""
n_step_trajectory = Transition(*zip(*trajectory))
v_s_batch = n_step_trajectory.value_s
log_prob_a_batch = n_step_trajectory.log_prob_a
actor_losses, critic_losses = [], []
for td_target, critic_prediction, log_p_a in zip(td_targets, v_s_batch, log_prob_a_batch):
td_err = td_target - critic_prediction
actor_losses.append(- log_p_a * td_err) # td_err is an unbiased estimated of Advantage
critic_losses.append(F.smooth_l1_loss(critic_prediction, td_target))
#critic_loss.append(F.mse_loss(critic_pred, td_target))
if self.params["use_entropy_bonus"]:
actor_loss = torch.stack(actor_losses).mean() - self.action_distribution.entropy().mean()
else:
actor_loss = torch.stack(actor_losses).mean()
critic_loss = torch.stack(critic_losses).mean()
writer.add_scalar(self.actor_name + "/critic_loss", critic_loss, self.global_step_num)
writer.add_scalar(self.actor_name + "/actor_loss", actor_loss, self.global_step_num)
return actor_loss, critic_loss
def learn(self, n_th_observation, done):
if self.params["clip_rewards"]:
self.rewards = np.sign(self.rewards).tolist() # Clip rewards to -1 or 0 or +1
td_targets = self.calculate_n_step_return(self.rewards, n_th_observation, done, self.gamma)
actor_loss, critic_loss = self.calculate_loss(self.trajectory, td_targets)
self.actor_optimizer.zero_grad()
actor_loss.backward(retain_graph=True)
self.actor_optimizer.step()
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
self.trajectory.clear()
self.rewards.clear()
def save(self):
model_file_name = self.params["model_dir"] + "A2C_" + self.env_name + ".ptm"
agent_state = {"Actor": self.actor.state_dict(),
"Critic": self.critic.state_dict(),
"best_mean_reward": self.best_mean_reward,
"best_reward": self.best_reward}
torch.save(agent_state, model_file_name)
print("Agent's state is saved to", model_file_name)
# Export the params used if not exported already
if not self.saved_params:
params_manager.export_agent_params(model_file_name + ".agent_params")
print("The parameters have been saved to", model_file_name + ".agent_params")
self.saved_params = True
def load(self):
model_file_name = self.params["model_dir"] + "A2C_" + self.env_name + ".ptm"
agent_state = torch.load(model_file_name, map_location= lambda storage, loc: storage)
self.actor.load_state_dict(agent_state["Actor"])
self.critic.load_state_dict(agent_state["Critic"])
self.actor.to(device)
self.critic.to(device)
self.best_mean_reward = agent_state["best_mean_reward"]
self.best_reward = agent_state["best_reward"]
print("Loaded Advantage Actor-Critic model state from", model_file_name,
" which fetched a best mean reward of:", self.best_mean_reward,
" and an all time best reward of:", self.best_reward)
def run(self):
# If a custom useful_region configuration for this environment ID is available, use it if not use the Default.
# Currently this is utilized for only the Atari env. Follows the same procedure as in Chapter 6
custom_region_available = False
for key, value in self.env_conf['useful_region'].items():
if key in args.env:
self.env_conf['useful_region'] = value
custom_region_available = True
break
if custom_region_available is not True:
self.env_conf['useful_region'] = self.env_conf['useful_region']['Default']
atari_env = False
for game in Atari.get_games_list():
if game.replace("_", "") in args.env.lower():
atari_env = True
if atari_env: # Use the Atari wrappers (like we did in Chapter 6) if it's an Atari env
self.env = Atari.make_env(self.env_name, self.env_conf)
else:
#print("Given environment name is not an Atari Env. Creating a Gym env")
self.env = gym.make(self.env_name)
self.state_shape = self.env.observation_space.shape
if isinstance(self.env.action_space.sample(), int): # Discrete action space
self.action_shape = self.env.action_space.n
self.policy = self.discrete_policy
self.continuous_action_space = False
else: # Continuous action space
self.action_shape = self.env.action_space.shape[0]
self.policy = self.multi_variate_gaussian_policy
self.critic_shape = 1
if len(self.state_shape) == 3: # Screen image is the input to the agent
if self.continuous_action_space:
self.actor= DeepActor(self.state_shape, self.action_shape, device).to(device)
else: # Discrete action space
self.actor = DeepDiscreteActor(self.state_shape, self.action_shape, device).to(device)
self.critic = DeepCritic(self.state_shape, self.critic_shape, device).to(device)
else: # Input is a (single dimensional) vector
if self.continuous_action_space:
#self.actor_critic = ShallowActorCritic(self.state_shape, self.action_shape, 1, self.params).to(device)
self.actor = ShallowActor(self.state_shape, self.action_shape, device).to(device)
else: # Discrete action space
self.actor = ShallowDiscreteActor(self.state_shape, self.action_shape, device).to(device)
self.critic = ShallowCritic(self.state_shape, self.critic_shape, device).to(device)
self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=self.params["learning_rate"])
self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=self.params["learning_rate"])
# Handle loading and saving of trained Agent models
episode_rewards = list()
prev_checkpoint_mean_ep_rew = self.best_mean_reward
num_improved_episodes_before_checkpoint = 0 # To keep track of the num of ep with higher perf to save model
#print("Using agent_params:", self.params)
if self.params['load_trained_model']:
try:
self.load()
prev_checkpoint_mean_ep_rew = self.best_mean_reward
except FileNotFoundError:
if args.test: # Test a saved model
print("FATAL: No saved model found. Cannot test. Press any key to train from scratch")
input()
else:
print("WARNING: No trained model found for this environment. Training from scratch.")
for episode in range(self.params["max_num_episodes"]):
obs = self.env.reset()
done = False
ep_reward = 0.0
step_num = 0
while not done:
action = self.get_action(obs)
next_obs, reward, done, _ = self.env.step(action)
self.rewards.append(reward)
ep_reward += reward
step_num +=1
if not args.test and (step_num >= self.params["learning_step_thresh"] or done):
self.learn(next_obs, done)
step_num = 0
# Monitor performance and save Agent's state when perf improves
if done:
episode_rewards.append(ep_reward)
if ep_reward > self.best_reward:
self.best_reward = ep_reward
if np.mean(episode_rewards) > prev_checkpoint_mean_ep_rew:
num_improved_episodes_before_checkpoint += 1
if num_improved_episodes_before_checkpoint >= self.params["save_freq_when_perf_improves"]:
prev_checkpoint_mean_ep_rew = np.mean(episode_rewards)
self.best_mean_reward = np.mean(episode_rewards)
self.save()
num_improved_episodes_before_checkpoint = 0
obs = next_obs
self.global_step_num += 1
if args.render:
self.env.render()
#print(self.actor_name + ":Episode#:", episode, "step#:", step_num, "\t rew=", reward, end="\r")
writer.add_scalar(self.actor_name + "/reward", reward, self.global_step_num)
print("{}:Episode#:{} \t ep_reward:{} \t mean_ep_rew:{}\t best_ep_reward:{}".format(
self.actor_name, episode, ep_reward, np.mean(episode_rewards), self.best_reward))
writer.add_scalar(self.actor_name + "/ep_reward", ep_reward, self.global_step_num)
if __name__ == "__main__":
agent_params = params_manager.get_agent_params()
agent_params["model_dir"] = args.model_dir
agent_params["test"] = args.test
env_params = params_manager.get_env_params() # Used with Atari environments
env_params["env_name"] = args.env
mp.set_start_method('spawn') # Prevents RuntimeError during cuda init
agent_procs =[DeepActorCriticAgent(id, args.env, agent_params, env_params)
for id in range(agent_params["num_agents"])]
[p.start() for p in agent_procs]
[p.join() for p in agent_procs]
|
py | b41216a02519a430744eb6996c150ef74bc80026 | from __future__ import print_function
import os
import argparse
import torch
import torch.backends.cudnn as cudnn
import numpy as np
from data import cfg_mnet, cfg_re50
from layers.functions.prior_box import PriorBox
from utils.nms.py_cpu_nms import py_cpu_nms
import cv2
from models.retinaface import RetinaFace
from utils.box_utils import decode, decode_landm
import time
parser = argparse.ArgumentParser(description='Retinaface')
parser.add_argument('-m', '--trained_model', default='./weights/Resnet50_Final.pth',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--network', default='resnet50', help='Backbone network mobile0.25 or resnet50')
parser.add_argument('--cpu', action="store_true", default=False, help='Use cpu inference')
parser.add_argument('--confidence_threshold', default=0.02, type=float, help='confidence_threshold')
parser.add_argument('--top_k', default=5000, type=int, help='top_k')
parser.add_argument('--nms_threshold', default=0.4, type=float, help='nms_threshold')
parser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')
parser.add_argument('-s', '--save_image', action="store_true", default=True, help='show detection results')
parser.add_argument('--vis_thres', default=0.6, type=float, help='visualization_threshold')
args = parser.parse_args()
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
print('Missing keys:{}'.format(len(missing_keys)))
print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
print('Used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
print('remove prefix \'{}\''.format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def load_model(model, pretrained_path, load_to_cpu):
print('Loading pretrained model from {}'.format(pretrained_path))
if load_to_cpu:
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
else:
device = torch.cuda.current_device()
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
if __name__ == '__main__':
torch.set_grad_enabled(False)
cfg = None
if args.network == "mobile0.25":
cfg = cfg_mnet
elif args.network == "resnet50":
cfg = cfg_re50
# net and model
net = RetinaFace(cfg=cfg, phase = 'test')
net = load_model(net, args.trained_model, args.cpu)
net.eval()
print('Finished loading model!')
print(net)
cudnn.benchmark = True
device = torch.device("cpu" if args.cpu else "cuda")
net = net.to(device)
resize = 1
# testing begin
for i in range(100):
image_path = "./curve/test.jpg"
img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)
img = np.float32(img_raw)
im_height, im_width, _ = img.shape
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).unsqueeze(0)
img = img.to(device)
scale = scale.to(device)
tic = time.time()
loc, conf, landms = net(img) # forward pass
# print('net forward time: {:.4f}'.format(time.time() - tic))
priorbox = PriorBox(cfg, image_size=(im_height, im_width))
priors = priorbox.forward()
priors = priors.to(device)
prior_data = priors.data
boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])
boxes = boxes * scale / resize
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
landms = decode_landm(landms.data.squeeze(0), prior_data, cfg['variance'])
scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2]])
scale1 = scale1.to(device)
landms = landms * scale1 / resize
landms = landms.cpu().numpy()
# ignore low scores
inds = np.where(scores > args.confidence_threshold)[0]
boxes = boxes[inds]
landms = landms[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1][:args.top_k]
boxes = boxes[order]
landms = landms[order]
scores = scores[order]
# do NMS
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = py_cpu_nms(dets, args.nms_threshold)
# keep = nms(dets, args.nms_threshold,force_cpu=args.cpu)
dets = dets[keep, :]
landms = landms[keep]
# keep top-K faster NMS
dets = dets[:args.keep_top_k, :]
landms = landms[:args.keep_top_k, :]
dets = np.concatenate((dets, landms), axis=1)
# show image
if args.save_image:
for b in dets[:1]:
if b[4] < args.vis_thres:
continue
text = "{:.4f}".format(b[4])
b = list(map(int, b))
cv2.rectangle(img_raw, (b[0], b[1]), (b[2], b[3]), (0, 0, 255), 2)
cx = b[0]
cy = b[1] + 12
cv2.putText(img_raw, text, (cx, cy),
cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
# landms
cv2.circle(img_raw, (b[5], b[6]), 1, (0, 0, 255), 4)
cv2.circle(img_raw, (b[7], b[8]), 1, (0, 255, 255), 4)
cv2.circle(img_raw, (b[9], b[10]), 1, (255, 0, 255), 4)
cv2.circle(img_raw, (b[11], b[12]), 1, (0, 255, 0), 4)
cv2.circle(img_raw, (b[13], b[14]), 1, (255, 0, 0), 4)
# save image
name = "test1.jpg"
cv2.imwrite(name, img_raw)
|
py | b412173c0dd0b71df3875606272488aff056806c | import datetime
from django.test import TestCase
from django.utils import timezone
from ..forms import RenewBookForm
class RenewBookFormTest(TestCase):
def test_renew_form_date_field_label(self):
form = RenewBookForm()
self.assertTrue(form.fields['renewal_date'].label is None or form.fields['renewal_date'].label == 'renewal date')
def test_renew_form_date_field_help_text(self):
form = RenewBookForm()
self.assertEqual(form.fields['renewal_date'].help_text, 'Enter a date between now and 4 weeks (default 3).')
def test_renew_form_date_in_past(self):
date = datetime.date.today() - datetime.timedelta(days=1)
form = RenewBookForm(data={'renewal_date': date})
self.assertFalse(form.is_valid())
def test_renew_form_date_too_far_in_future(self):
date = datetime.date.today() + datetime.timedelta(weeks=4) + datetime.timedelta(days=1)
form = RenewBookForm(data={'renewal_date': date})
self.assertFalse(form.is_valid())
def test_renew_form_date_today(self):
date = datetime.date.today()
form = RenewBookForm(data={'renewal_date': date})
self.assertTrue(form.is_valid())
def test_renew_form_date_max(self):
date = timezone.now() + datetime.timedelta(weeks=4)
form = RenewBookForm(data={'renewal_date': date})
self.assertTrue(form.is_valid())
|
py | b41218bfbbe05f71ce1eabd4e83a58065e42d1f5 | import socket
s = socket.socket()
host = "localhost"
port = 1885
s.bind((host, port))
s.listen(5)
(c, addr) = s.accept()
print('Connection Established from', addr)
c.send(b'\x10\x17\x00\x04MQTT\x04\x02\x00<\x00\x0bpython_test')
c.close()
|
py | b41218eea360fd16e3c7b158d9cd7956c4a3218b | # -*- coding: utf8 - *-
from __future__ import absolute_import, print_function, unicode_literals
from cihai import extend
from cihai.core import Cihai
from cihai.data.unihan.dataset import Unihan, UnihanVariants
class SimplestDataset(extend.Dataset):
def a_method(self):
return 'hi'
def test_add_dataset():
c = Cihai()
c.add_dataset(SimplestDataset, namespace='simple')
assert hasattr(c, 'simple')
assert isinstance(c.simple, extend.Dataset)
assert hasattr(c.simple, 'a_method')
assert callable(c.simple.a_method)
assert c.simple.a_method() == 'hi'
class SimplestSQLAlchemyDataset(extend.Dataset, extend.SQLAlchemyMixin):
def a_method(self):
return 'hi'
def test_add_dataset_with_db():
c = Cihai()
c.add_dataset(SimplestSQLAlchemyDataset, namespace='simple')
assert hasattr(c, 'simple')
assert isinstance(c.simple, extend.Dataset)
assert hasattr(c.simple, 'a_method')
assert callable(c.simple.a_method)
assert c.simple.a_method() == 'hi'
assert hasattr(c, 'sql')
assert hasattr(c.simple, 'sql')
def test_add_dataset_unihan(unihan_options):
c = Cihai()
c.add_dataset(Unihan, namespace='unihan')
assert hasattr(c, 'unihan')
assert isinstance(c.unihan, extend.Dataset)
c.unihan.sql
c.unihan.bootstrap(options=unihan_options)
U = c.sql.base.classes.Unihan
first_glyph = (
c.unihan.sql.session.query(U).filter(U.kDefinition.isnot(None)).first()
)
char = first_glyph.char
assert (
c.unihan.lookup_char(char=char).first().kDefinition == first_glyph.kDefinition
)
assert (
c.unihan.reverse_char(hints=[first_glyph.kDefinition]).first().char == char
), 'works with list of column value matches'
assert (
c.unihan.reverse_char(hints=first_glyph.kDefinition).first().char == char
), 'works with strings'
c.unihan.add_plugin(UnihanVariants, 'variants')
assert hasattr(c.unihan, 'variants')
def variant_list(field):
for char in c.unihan.with_fields(field):
variants = []
for var in char.untagged_vars(field):
variants.append(var)
yield (char, variants)
result = {char: variants for (char, variants) in variant_list('kZVariant')}
assert len(result.values()) > 0
assert len(result.keys()) > 0
|
py | b41219652af040caa0ca0a428555c55425633905 | from enum import Enum, unique
import math
# 牌组类型
@unique
class CardsType(Enum):
Error = -1 # 不符合规则
Pass = 0 # 过
Single = 1 # 单
Double = 2 # 对
Triple = 3 # 炸(三个)
Quad = 4 # 轰(四个)
Jokers = 5 # 对王
Dragon = 6 # 单龙
DoubleDragon = 7 # 双龙
Plane3With0 = 8 # 飞机(包括单独的三个的),带0
Plane3With1 = 9 # 飞机,带单
Plane3With2 = 10 # 飞机,带双
def is_plane_type(type):
return type.value >= CardsType.Plane3With0.value and type.value <= CardsType.Plane3With2.value
def get_card_point(card):
if card == 53:
return 15
return math.floor(card / 4) + 1
def get_dragon_next(point):
if point > 2 and point < 13:
return point + 1
elif point == 13:
return 1
else:
return -1 # 连不下去了
def point_compare(point1, point2):
def cmpval(point):
if point <= 2 or point == 14:
return point + 14
else:
return point
v1 = cmpval(point1)
v2 = cmpval(point2)
if v1 > v2:
return 1
elif v1 == v2:
return 0
else:
return -1
def card_compare(card1, card2):
return point_compare(get_card_point(card1), get_card_point(card2))
# 牌型判断和大小获取
def get_pass_val(pt):
if len(pt) == 0:
return 0
def get_single_val(pt):
if len(pt) == 1:
return pt[0]
def get_jokers_val(pt):
if len(pt) == 2 and pt[0] == 14 and pt[1] == 15:
return 0
def get_double_val(pt):
if len(pt) == 2 and pt[0] == pt[1]:
return pt[0]
def get_triple_val(pt):
if len(pt) == 3 and pt[0] == pt[1] and pt[0] == pt[2]:
return pt[0]
def get_quad_val(pt):
if len(pt) == 4 and pt[0] == pt[1] and pt[0] == pt[2] and pt[0] == pt[3]:
return pt[0]
def get_dragon_val(pt):
v = pt[0]
for val in pt:
if val != v:
return
v = get_dragon_next(v)
return pt[0]
def get_double_dragon_val(pt):
if len(pt) % 2 == 1:
return
v = pt[0]
for i, val in enumerate(pt):
if val != v:
return
if i % 2 == 1:
v = get_dragon_next(v)
return pt[0]
def get_plane3with0_val(pt):
if len(pt) % 3 != 0:
return
v = pt[0]
for i, val in enumerate(pt):
if val != v:
return
if i % 3 == 2:
v = get_dragon_next(v)
return pt[0]
def get_plane3with1_val(pt):
if len(pt) % 4 != 0:
return
triples = []
singles = []
i = 0
while i < len(pt):
if i < len(pt) - 2 and pt[i] == pt[i + 1] and pt[i] == pt[i + 2]:
triples.append(pt[i])
i += 3
else:
singles.append(pt[i])
i += 1
if len(triples) == len(singles):
# 带的东西必须不一样
i = 0
while i < len(singles) - 1:
if singles[i] == singles[i + 1]:
return
return get_dragon_val(triples)
def get_plane3with2_val(pt):
if len(pt) % 4 != 0:
return
triples = []
doubles = []
i = 0
while i < len(pt):
if i < len(pt) - 2 and pt[i] == pt[i + 1] and pt[i] == pt[i + 2]:
triples.append(pt[i])
i += 3
elif i < len(pt) - 1 and pt[i] == pt[i + 1]:
doubles.append(pt[i])
i += 2
else:
return
if len(triples) == len(doubles):
# 带的东西必须不一样
i = 0
while i < len(doubles) - 1:
if doubles[i] == doubles[i + 1]:
return
return get_dragon_val(triples) |
py | b4121a34948547c9372ba8662fbd63464df2e4a4 | # Databricks notebook source
# MAGIC %sh
# MAGIC
# MAGIC echo 'this is a test' |
py | b4121d037ef3cabccd6798d8741baae6559e6116 | # Copyright (c) 2008-2009 Pablo Flouret <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met: Redistributions of
# source code must retain the above copyright notice, this list of conditions and
# the following disclaimer. Redistributions in binary form must reproduce the
# above copyright notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution.
# Neither the name of the software nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import time
import urwid
import urwid.display_common
import xmmsclient
import commands
import containers
import mif
import signals
import util
import widgets
import xmms
try:
from PIL import Image
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
except ImportError:
pass
class NowPlaying(urwid.WidgetWrap):
def __init__(self, app, show_cover=True, formatname='nowplaying'):
self.xs = xmms.get()
self.app = app
self.show_cover = show_cover
self.format = formatname
self.parser = mif.FormatParser(self.app.config.format(formatname))
self.ctx = self.info = {}
self.cur_hash = None
self.status = self.xs.playback_status()
self.time = 0
self.progress = urwid.ProgressBar('progress-normal', 'progress-complete', 0, 100,
'progress-smooth')
self.song = urwid.Text('', align='right')
if self.show_cover:
self.cover = AlbumCoverWidget(maxcols=65, align='center', valign='top')
cover_w = self.cover
else:
cover_w = urwid.SolidFill(' ')
fill = urwid.SolidFill(' ')
w = urwid.Columns([('fixed', 1, fill),
cover_w,
('fixed', 2, fill),
urwid.Filler(urwid.Pile([('flow', self.song),
('fixed', 1, urwid.SolidFill(' ')),
('flow', self.progress)]), 'top'),
('fixed', 1, fill),
])
self.__super.__init__(w)
signals.connect('xmms-playback-status', self.on_xmms_playback_status)
signals.connect('xmms-playback-current-info', self.on_xmms_playback_current_info)
signals.connect('xmms-playback-playtime', self.on_xmms_playback_playtime)
self.xs.playback_current_info(self.on_xmms_playback_current_info, sync=False)
def update(self):
status_desc = {xmmsclient.PLAYBACK_STATUS_PLAY: 'PLAYING',
xmmsclient.PLAYBACK_STATUS_STOP: 'STOPPED',
xmmsclient.PLAYBACK_STATUS_PAUSE: 'PAUSED '}
self.ctx['status'] = status_desc[self.status]
self.ctx['elapsed'] = util.humanize_time(self.time)
if 'duration' in self.info:
self.ctx['total'] = util.humanize_time(self.info['duration'])
self.progress.set_completion(float(self.time) / self.info['duration'] * 100)
self.song.set_text(self.parser.eval(self.ctx))
def on_xmms_playback_playtime(self, milli):
if self.show_cover and not self.cur_hash:
self.cover.reset()
if self.time/1000 != milli/1000:
self.time = milli
self.update()
def on_xmms_playback_status(self, status):
self.status = status
self.update()
def on_xmms_playback_current_info(self, info):
self.info = info
self.ctx = dict(zip((k[1] for k in self.info), self.info.values()))
if self.show_cover:
if 'picture_front' in self.info:
# TODO: cache the picture to disk (or open directly from disk if local?)
hash = self.info['picture_front']
if hash != self.cur_hash:
self.xs.bindata_retrieve(hash, cb=self._set_cover_cb, sync=False)
self.cur_hash = hash
else:
self.cover.reset()
self.cur_hash = None
self.update()
def _set_cover_cb(self, r):
if not r.iserror():
self.cover.set_data(r.value())
self._invalidate()
def cmd_same(self, args):
fields = args.split()
if 'id' in self.info:
q = ' AND '.join('%s:"%s"' % (f, self.info[f]) for f in fields if self.info.get(f))
if q:
self.app.search(q)
else:
pass # TODO: error message
def get_contexts(self):
return [self]
_colormap_cache = {}
class AlbumCoverWidget(urwid.WidgetWrap):
def __init__(self, data=None, maxcols=-1, align='center', valign='middle'):
self.maxcols = maxcols
self.img = None
self.dim = None
self.step = 0
self.cheesy_last_animated = 0
if data:
self.set_data(data)
self.text = urwid.Text('', wrap=urwid.ANY)
self.filler = urwid.Filler(self.text, valign)
self.padding = urwid.Padding(self.filler, align)
self.__super.__init__(self.filler)
def reset(self):
self.dim = None
self.img = None
self.text.set_text('')
self._w = self.filler
self._invalidate()
def set_data(self, data):
try:
self.img = Image.open(StringIO(data))
if self.img.mode == 'P':
self.img = self.img.convert('RGB')
self.dim = None
self.text.align = 'left'
self._w = self.padding
except IOError, e:
self.reset()
self._invalidate()
def closest_color(self, rgb):
global _colormap_cache
n = rgb[0] << 16 | rgb[1] << 8 | rgb[2]
if n in _colormap_cache:
return _colormap_cache[n]
distance = 257*257*3
match = 0
colors = urwid.display_common._COLOR_VALUES_256[16:]
indexes = range(16,256)
for i, values in zip(indexes, colors):
rd, gd, bd = rgb[0] - values[0], rgb[1] - values[1], rgb[2] - values[2]
d = rd*rd + gd*gd + bd*bd
if d < distance:
match = i
distance = d
_colormap_cache[n] = match
return match
def get_markup(self, img):
markup = []
for y in range(0, img.size[1], 1):
last = ''
n = 0
for x in range(0, img.size[0], 1):
rgb = img.getpixel((x,y))
if type(rgb) == int:
rgb = (rgb >> 16 & 0xff, rgb >> 8 & 0xff, rgb & 0xff)
c = self.closest_color(rgb)
if c != last:
if last:
#markup.append((urwid.AttrSpec(last, last), ' '*n))
markup.append(('h%d'%last, ' '*n))
last = c
n = 0
n += 1
if n:
#markup.append((urwid.AttrSpec(last, last), ' '*n))
markup.append(('h%d'%last, ' '*n))
markup.append('\n')
return markup[:-1]
def scaled_dim(self, size):
w = size[0]
if self.maxcols > 0 and size[0] > self.maxcols:
w = self.maxcols
w = min(w, self.img.size[0])
h = (w/2) * self.img.size[1] / self.img.size[0]
if len(size) > 1 and h > size[1]:
h = size[1]
w = (h * self.img.size[0] / self.img.size[1])*2
return w, h
def render(self, size, focus=False):
if self.img:
dim = self.scaled_dim(size)
if dim != self.dim:
self.dim = dim
img = self.img.resize(dim, Image.ANTIALIAS)
self.text.set_text(self.get_markup(img))
self._w.width = dim[0]
return self._w.render(size)
|
py | b4121e845e83f57618c06b079311ef72c8b72dcb | import pytest
import numpy as np
from mikeio import Mesh
from fmdap import Pfs
@pytest.fixture
def pfs():
pfs_file = "tests/testdata/OresundHD2D_EnKF10/OresundHD2D_EnKF10.m21fm"
return Pfs(pfs_file)
@pytest.fixture
def mesh():
return Mesh("tests/testdata/Oresund_mesh_GEO.mesh")
def test_dda(pfs):
assert "METHOD" in pfs.dda
assert pfs.dda["METHOD"]["type"] == 1
def test_sections(pfs):
assert "METHOD" in pfs.sections
def test_get_item(pfs):
assert "type" in pfs["METHOD"]
assert "type" in pfs["method"]
def test_get_attr(pfs):
assert "type" in pfs.METHOD
assert "type" in pfs.method
def test_model_errors(pfs):
df = pfs.model_errors
assert "include" in df
assert len(df) == 2
def test_measurements(pfs):
df = pfs.measurements
assert len(df) == 4
def test_measurement_positions(pfs):
df = pfs.measurement_positions
assert "x" in df
assert "name" in df
assert len(df) == 4
def test_validate_positions(mesh, pfs):
df = pfs.validate_positions(mesh, pfs.measurements)
assert len(df) == 4
def test_diagnostics(pfs):
df = pfs.diagnostics
assert len(df) == 9
assert df.loc[9].file_name == "Diagnostics_Global_DA_statistics.dfs0"
assert np.all(df.type < 4)
|
py | b4121ecac28e8f5806d5a01c217cc495480fa33b | #!/usr/bin/env python
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='stickytape',
version='0.1.14',
description='Convert Python packages into a single script',
long_description=read("README.rst"),
author='Michael Williamson',
author_email='[email protected]',
url='http://github.com/mwilliamson/stickytape',
packages=['stickytape'],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
license="BSD-2-Clause",
entry_points={
"console_scripts": [
"stickytape=stickytape.main:main"
]
},
)
|
py | b4121f6c365c249d211f85bb32a01c31b68529b7 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tokenization utilities."""
import sys
import collections
import unicodedata
def convert_to_printable(text):
"""
Converts `text` to a printable coding format.
"""
if sys.version_info[0] == 3:
if isinstance(text, str):
return text
if isinstance(text, bytes):
return text.decode("utf-8", "ignore")
raise ValueError("Only support type `str` or `bytes`, while text type is `%s`" % (type(text)))
if sys.version_info[0] == 2:
if isinstance(text, str):
return text
if isinstance(text, unicode):
return text.encode("utf-8")
raise ValueError("Only support type `str` or `unicode`, while text type is `%s`" % (type(text)))
raise ValueError("Only supported when running on Python2 or Python3.")
def convert_to_unicode(text):
"""
Converts `text` to Unicode format.
"""
if sys.version_info[0] == 3:
if isinstance(text, str):
return text
if isinstance(text, bytes):
return text.decode("utf-8", "ignore")
raise ValueError("Only support type `str` or `bytes`, while text type is `%s`" % (type(text)))
if sys.version_info[0] == 2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
if isinstance(text, unicode):
return text
raise ValueError("Only support type `str` or `unicode`, while text type is `%s`" % (type(text)))
raise ValueError("Only supported when running on Python2 or Python3.")
def load_vocab_file(vocab_file):
"""
Loads a vocabulary file and turns into a {token:id} dictionary.
"""
vocab_dict = collections.OrderedDict()
index = 0
with open(vocab_file, "r") as vocab:
while True:
token = convert_to_unicode(vocab.readline())
if not token:
break
token = token.strip()
vocab_dict[token] = index
index += 1
return vocab_dict
def convert_by_vocab_dict(vocab_dict, items):
"""
Converts a sequence of [tokens|ids] according to the vocab dict.
"""
output = []
for item in items:
if item in vocab_dict:
output.append(vocab_dict[item])
else:
output.append(vocab_dict["<unk>"])
return output
class WhiteSpaceTokenizer():
"""
Whitespace tokenizer.
"""
def __init__(self, vocab_file):
self.vocab_dict = load_vocab_file(vocab_file)
self.inv_vocab_dict = {index: token for token, index in self.vocab_dict.items()}
def _is_whitespace_char(self, char):
"""
Checks if it is a whitespace character(regard "\t", "\n", "\r" as whitespace here).
"""
if char in (" ", "\t", "\n", "\r"):
return True
uni = unicodedata.category(char)
if uni == "Zs":
return True
return False
def _is_control_char(self, char):
"""
Checks if it is a control character.
"""
if char in ("\t", "\n", "\r"):
return False
uni = unicodedata.category(char)
if uni in ("Cc", "Cf"):
return True
return False
def _clean_text(self, text):
"""
Remove invalid characters and cleanup whitespace.
"""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or self._is_control_char(char):
continue
if self._is_whitespace_char(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
def _whitespace_tokenize(self, text):
"""
Clean whitespace and split text into tokens.
"""
text = text.strip()
if not text:
tokens = []
else:
tokens = text.split()
return tokens
def tokenize(self, text):
"""
Tokenizes text.
"""
text = convert_to_unicode(text)
text = self._clean_text(text)
tokens = self._whitespace_tokenize(text)
return tokens
def convert_tokens_to_ids(self, tokens):
return convert_by_vocab_dict(self.vocab_dict, tokens)
def convert_ids_to_tokens(self, ids):
return convert_by_vocab_dict(self.inv_vocab_dict, ids)
|
py | b412203fde0e8c06dc18c47dbd128fcd7e27ddc5 | # This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, Paul D. Nation and Robert J. Johansson.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
__all__ = ['expect', 'variance']
import numpy as np
import scipy.sparse as sp
from qutip.qobj import Qobj, isoper
from qutip.eseries import eseries
from qutip.cy.spmatfuncs import (cy_expect_rho_vec, cy_expect_psi, cy_spmm_tr,
expect_csr_ket)
expect_rho_vec = cy_expect_rho_vec
expect_psi = cy_expect_psi
def expect(oper, state):
'''Calculates the expectation value for operator(s) and state(s).
Parameters
----------
oper : qobj/array-like
A single or a `list` or operators for expectation value.
state : qobj/array-like
A single or a `list` of quantum states or density matrices.
Returns
-------
expt : float/complex/array-like
Expectation value. ``real`` if `oper` is Hermitian, ``complex``
otherwise. A (nested) array of expectaction values of state or operator
are arrays.
Examples
--------
>>> expect(num(4), basis(4, 3)) == 3 # doctest: +NORMALIZE_WHITESPACE
True
'''
if isinstance(state, Qobj) and isinstance(oper, Qobj):
return _single_qobj_expect(oper, state)
elif isinstance(oper, Qobj) and isinstance(state, eseries):
return _single_eseries_expect(oper, state)
elif isinstance(oper, (list, np.ndarray)):
if isinstance(state, Qobj):
if (all([op.isherm for op in oper]) and
(state.isket or state.isherm)):
return np.array([_single_qobj_expect(o, state) for o in oper])
else:
return np.array([_single_qobj_expect(o, state) for o in oper],
dtype=complex)
else:
return [expect(o, state) for o in oper]
elif isinstance(state, (list, np.ndarray)):
if oper.isherm and all([(op.isherm or op.type == 'ket')
for op in state]):
return np.array([_single_qobj_expect(oper, x) for x in state])
else:
return np.array([_single_qobj_expect(oper, x) for x in state],
dtype=complex)
else:
raise TypeError('Arguments must be quantum objects or eseries')
def _single_qobj_expect(oper, state):
"""
Private function used by expect to calculate expectation values of Qobjs.
"""
if isoper(oper):
if oper.dims[1] != state.dims[0]:
raise Exception('Operator and state do not have same tensor ' +
'structure: %s and %s' %
(oper.dims[1], state.dims[0]))
if state.type == 'oper':
# calculates expectation value via TR(op*rho)
return cy_spmm_tr(oper.data, state.data,
oper.isherm and state.isherm)
elif state.type == 'ket':
# calculates expectation value via <psi|op|psi>
return expect_csr_ket(oper.data, state.data,
oper.isherm)
else:
raise TypeError('Invalid operand types')
def _single_eseries_expect(oper, state):
"""
Private function used by expect to calculate expectation values for
eseries.
"""
out = eseries()
if isoper(state.ampl[0]):
out.rates = state.rates
out.ampl = np.array([expect(oper, a) for a in state.ampl])
else:
out.rates = np.array([])
out.ampl = np.array([])
for m in range(len(state.rates)):
op_m = state.ampl[m].data.conj().T * oper.data
for n in range(len(state.rates)):
a = op_m * state.ampl[n].data
if isinstance(a, sp.spmatrix):
a = a.todense()
out.rates = np.append(out.rates, state.rates[n] -
state.rates[m])
out.ampl = np.append(out.ampl, a)
return out
def variance(oper, state):
"""
Variance of an operator for the given state vector or density matrix.
Parameters
----------
oper : qobj
Operator for expectation value.
state : qobj/list
A single or `list` of quantum states or density matrices..
Returns
-------
var : float
Variance of operator 'oper' for given state.
"""
return expect(oper ** 2, state) - expect(oper, state) ** 2
|
py | b412227b154ac29c5efea1e06da298740ad7c046 | z = "Tests were ran for service: matchService \n" \
"Passed: \n" \
"Failed: \n" \
"Error: \n" \
"Skipped: \n"
|
py | b412227efebd4857645f7ea22de083b61646ff3e | from distutils.core import setup
from Cython.Build import cythonize
setup(
ext_modules = cythonize("cython_module.pyx")
)
|
py | b4122296b0e31915f2a7be76160a043219e399af | from abc import ABC, abstractmethod
from .fields import SQLType
from .sql import WHERE
class BaseOperation:
def __init__(self, first_operand, second_operand):
self.first_operand = first_operand
self.second_operand = second_operand
def _build_operand(self, operand):
if isinstance(operand, str):
value = operand
else:
value = operand.build()
return value
def get_args(self):
return self.first_operand.get_args() + self.second_operand.get_args()
def _build_operand_values(self):
first_value = self._build_operand(self.first_operand)
second_value = self._build_operand(self.second_operand)
return first_value, second_value
def __add__(self, other):
return OR(self, other)
def __eq__(self, other):
if isinstance(self, other.__class__):
if (self.first_operand == other.first_operand and self.second_operand == other.second_operand) or \
(self.first_operand == other.second_operand and self.second_operand == other.first_operand):
return True
return False
class AND(BaseOperation):
def __init__(self, first_operand, second_operand):
super(AND, self).__init__(first_operand, second_operand)
def build(self):
first_value, second_value = self._build_operand_values()
return WHERE._and(first_value, second_value)
class OR(BaseOperation):
def __init__(self, first_operand, second_operand):
super(OR, self).__init__(first_operand, second_operand)
def build(self):
first_value, second_value = self._build_operand_values()
return WHERE._or(first_value, second_value)
class Q:
def __get_arg_name(self):
keys = self.kwargs.keys()
keys_iterator = iter(keys)
arg_name = next(keys_iterator)
return arg_name
def __set_args(self):
_arg_name = self.__get_arg_name()
self.arg_value = self.kwargs[_arg_name]
if _arg_name.find('__') != -1:
self.arg_name, method_name = _arg_name.split('__')
self.arg_method_name = method_name
else:
self.arg_name = _arg_name
self.arg_method_name = WHERE.eq.__name__
def __init__(self, **kwargs):
self.kwargs = kwargs
QValidator.validate(QValidatorCountKwargs, self)
self.__set_args()
QValidator.validate(QValidatorMethodName, self)
def build(self):
function = getattr(WHERE, self.arg_method_name)
return function(column_name=self.arg_name,
value=SQLType.convert(self.arg_value))
def get_args(self):
return [{'arg_name': self.arg_name, 'arg_value': self.arg_value, 'method': self.arg_method_name}]
def __or__(self, other):
if other is None:
return self
return OR(self, other)
def __and__(self, other):
if other is None:
return self
return AND(self, other)
def __eq__(self, other):
if isinstance(self, other.__class__):
if (self.arg_name == other.arg_name and
self.arg_value == other.arg_value and
self.arg_method_name == other.arg_method_name):
return True
return False
class QBaseValidator(ABC):
@staticmethod
@abstractmethod
def validate(query: Q):
pass
class QValidatorCountKwargs(QBaseValidator):
@staticmethod
@abstractmethod
def validate(query: Q):
if len(query.kwargs) != 1:
raise AttributeError(f"Class {WHERE.__name__} takes one key word argument. " \
f"Received {len(query.kwargs)} arguments {query.kwargs}")
class QValidatorMethodName(QBaseValidator):
@staticmethod
@abstractmethod
def validate(query: Q):
if query.arg_method_name not in WHERE.__dict__.keys():
raise AttributeError(f"Unknown condition method '{query.arg_method_name}'")
class QValidator:
@staticmethod
def validate(validator: QBaseValidator, query: Q):
validator.validate(query)
|
py | b412237ca9f59266f5812d29cb96daa6393ed7c1 | from django.test import TestCase
from django.contrib.auth import get_user_model
from django.urls import reverse
from .models import Sport
class SportsTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username="Mohammad", email="[email protected]", password="pwd"
)
self.sport = Sport.objects.create(
player_name="Noura", coach_s_name=self.user,description="No info"
)
def test_string_representation(self):
self.assertEqual(str(self.sport), "Noura")
def test_sport_content(self):
self.assertEqual(f"{self.sport.player_name}", "Noura")
self.assertEqual(f"{self.sport.coach_s_name}", "[email protected]")
self.assertEqual(self.sport.description,"No info")
def test_sport_list_view(self):
response = self.client.get(reverse("sport_list"))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Noura")
self.assertTemplateUsed(response, "sports/sport-list.html")
def test_sport_detail_view(self):
response = self.client.get(reverse("sport_detail", args="1"))
no_response = self.client.get("/100000/")
self.assertEqual(response.status_code, 200)
self.assertEqual(no_response.status_code, 404)
self.assertContains(response,"coach_s_name:[email protected]")
self.assertTemplateUsed(response, "sports/sport-detail.html")
def test_sport_create_view(self):
response = self.client.post(
reverse("sport_create"),
{
"player_name": "Mona",
"coach_s_name": self.user.id,
"description": "good player",
}, follow=True
)
self.assertRedirects(response, reverse("sport_detail", args="2"))
self.assertContains(response, "Mona")
def test_sport_update_view_redirect(self):
response = self.client.post(
reverse("sport_update", args="1"),
{"player_name": "Updated player_name","coach_s_name":self.user.id,"description":"New description"}
)
self.assertRedirects(response, reverse("sport_detail", args="1"))
def test_sport_delete_view(self):
response = self.client.get(reverse("sport_delete", args="1"))
self.assertEqual(response.status_code, 200) |
py | b41223b6a437d36cd9fd412c44535044d6b1b6ee | from django.apps import AppConfig
class TestVupConfig(AppConfig):
name = 'test_vup'
|
py | b412244e8252d25a86cea15e8cadee1a8e3a2284 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/backend.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/api/backend.proto",
package="google.api",
syntax="proto3",
serialized_options=b"\n\016com.google.apiB\014BackendProtoP\001ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\242\002\004GAPI",
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x18google/api/backend.proto\x12\ngoogle.api"1\n\x07\x42\x61\x63kend\x12&\n\x05rules\x18\x01 \x03(\x0b\x32\x17.google.api.BackendRule"\xf2\x02\n\x0b\x42\x61\x63kendRule\x12\x10\n\x08selector\x18\x01 \x01(\t\x12\x0f\n\x07\x61\x64\x64ress\x18\x02 \x01(\t\x12\x10\n\x08\x64\x65\x61\x64line\x18\x03 \x01(\x01\x12\x14\n\x0cmin_deadline\x18\x04 \x01(\x01\x12\x1a\n\x12operation_deadline\x18\x05 \x01(\x01\x12\x41\n\x10path_translation\x18\x06 \x01(\x0e\x32\'.google.api.BackendRule.PathTranslation\x12\x16\n\x0cjwt_audience\x18\x07 \x01(\tH\x00\x12\x16\n\x0c\x64isable_auth\x18\x08 \x01(\x08H\x00\x12\x10\n\x08protocol\x18\t \x01(\t"e\n\x0fPathTranslation\x12 \n\x1cPATH_TRANSLATION_UNSPECIFIED\x10\x00\x12\x14\n\x10\x43ONSTANT_ADDRESS\x10\x01\x12\x1a\n\x16\x41PPEND_PATH_TO_ADDRESS\x10\x02\x42\x10\n\x0e\x61uthenticationBn\n\x0e\x63om.google.apiB\x0c\x42\x61\x63kendProtoP\x01ZEgoogle.golang.org/genproto/googleapis/api/serviceconfig;serviceconfig\xa2\x02\x04GAPIb\x06proto3',
)
_BACKENDRULE_PATHTRANSLATION = _descriptor.EnumDescriptor(
name="PathTranslation",
full_name="google.api.BackendRule.PathTranslation",
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name="PATH_TRANSLATION_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="CONSTANT_ADDRESS",
index=1,
number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.EnumValueDescriptor(
name="APPEND_PATH_TO_ADDRESS",
index=2,
number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key,
),
],
containing_type=None,
serialized_options=None,
serialized_start=343,
serialized_end=444,
)
_sym_db.RegisterEnumDescriptor(_BACKENDRULE_PATHTRANSLATION)
_BACKEND = _descriptor.Descriptor(
name="Backend",
full_name="google.api.Backend",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="rules",
full_name="google.api.Backend.rules",
index=0,
number=1,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=40,
serialized_end=89,
)
_BACKENDRULE = _descriptor.Descriptor(
name="BackendRule",
full_name="google.api.BackendRule",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="selector",
full_name="google.api.BackendRule.selector",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="address",
full_name="google.api.BackendRule.address",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="deadline",
full_name="google.api.BackendRule.deadline",
index=2,
number=3,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="min_deadline",
full_name="google.api.BackendRule.min_deadline",
index=3,
number=4,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="operation_deadline",
full_name="google.api.BackendRule.operation_deadline",
index=4,
number=5,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="path_translation",
full_name="google.api.BackendRule.path_translation",
index=5,
number=6,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="jwt_audience",
full_name="google.api.BackendRule.jwt_audience",
index=6,
number=7,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="disable_auth",
full_name="google.api.BackendRule.disable_auth",
index=7,
number=8,
type=8,
cpp_type=7,
label=1,
has_default_value=False,
default_value=False,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="protocol",
full_name="google.api.BackendRule.protocol",
index=8,
number=9,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[_BACKENDRULE_PATHTRANSLATION],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name="authentication",
full_name="google.api.BackendRule.authentication",
index=0,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[],
)
],
serialized_start=92,
serialized_end=462,
)
_BACKEND.fields_by_name["rules"].message_type = _BACKENDRULE
_BACKENDRULE.fields_by_name["path_translation"].enum_type = _BACKENDRULE_PATHTRANSLATION
_BACKENDRULE_PATHTRANSLATION.containing_type = _BACKENDRULE
_BACKENDRULE.oneofs_by_name["authentication"].fields.append(
_BACKENDRULE.fields_by_name["jwt_audience"]
)
_BACKENDRULE.fields_by_name[
"jwt_audience"
].containing_oneof = _BACKENDRULE.oneofs_by_name["authentication"]
_BACKENDRULE.oneofs_by_name["authentication"].fields.append(
_BACKENDRULE.fields_by_name["disable_auth"]
)
_BACKENDRULE.fields_by_name[
"disable_auth"
].containing_oneof = _BACKENDRULE.oneofs_by_name["authentication"]
DESCRIPTOR.message_types_by_name["Backend"] = _BACKEND
DESCRIPTOR.message_types_by_name["BackendRule"] = _BACKENDRULE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Backend = _reflection.GeneratedProtocolMessageType(
"Backend",
(_message.Message,),
{
"DESCRIPTOR": _BACKEND,
"__module__": "google.api.backend_pb2"
# @@protoc_insertion_point(class_scope:google.api.Backend)
},
)
_sym_db.RegisterMessage(Backend)
BackendRule = _reflection.GeneratedProtocolMessageType(
"BackendRule",
(_message.Message,),
{
"DESCRIPTOR": _BACKENDRULE,
"__module__": "google.api.backend_pb2"
# @@protoc_insertion_point(class_scope:google.api.BackendRule)
},
)
_sym_db.RegisterMessage(BackendRule)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
py | b412252d59d3e5bdb9093fd144628a48cf3ae52d | ''' Polymorphism : Part 1
Polymorphism basically means the ability to take or have multiple or various forms.
Polymorphim means the ability to take or have various forms.
Polymorphism: Part 2 30
Creating a polymorphic class method. 39
Polymorphism : Part 1
'''
print(len("Hello World!")) # 12
print(len([20,40,80])) # 3
# Polymorphism: Part 2
def addNumbers(a,b,c=1):
return a + b + c
print(addNumbers(8,9)) # 18 ## What I am doing here is passing arguments for the function parameters,
print(addNumbers(8,9,4)) # 21 ## Here I changed the default value of c.
|
py | b412267552f87914325768e901003b8aad5e06d5 | """issue_analysis_nature_can_be_null
Revision ID: 4332e26568de
Revises: 4437cb6ea605
Create Date: 2015-04-23 11:08:04.988846
"""
# revision identifiers, used by Alembic.
revision = '4332e26568de'
down_revision = '4437cb6ea605'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('issues', 'analysis_nature_id',
existing_type=mysql.INTEGER(display_width=11),
nullable=True,
existing_server_default=sa.text(u"'1'"))
op.execute("update issues set analysis_nature_id = null")
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('issues', 'analysis_nature_id',
existing_type=mysql.INTEGER(display_width=11),
nullable=False,
existing_server_default=sa.text(u"'1'"))
op.execute("update issues set analysis_nature_id = 1")
### end Alembic commands ###
|
py | b41226a5f81d2a4340a2b81de7ed14834c13047d | import plotly.plotly as py
import plotly.graph_objs as go
import requests
import json
from datetime import datetime
key = 'KEY'
username = 'USERNAME'
py.sign_in(username, key)
now = datetime.now()
start_date = "2018-01-01"
url = "https://api.coindesk.com/v1/bpi/historical/close.json?start=" + start_date + "&end=" + now.strftime('%Y-%m-%d')
data = requests.get(url)
data = data.json()['bpi']
prices = []
dates = []
for key in data:
dates.append(key)
dates.sort()
for key in dates:
prices.append(data[key])
url = "https://api.coindesk.com/v1/bpi/currentprice.json"
request = requests.get(url)
currentprice = request.json()['bpi']['USD']['rate_float']
prices[-1] = currentprice
layout = go.Layout(
title='BTC Price History, ' + now.strftime('%Y-%m-%d, %I:%M %p') + ' (updated hourly)',
plot_bgcolor='#424242',
paper_bgcolor='#FF9900',
showlegend=False,
width=1920,
titlefont=dict(
family="Open Sans",
size=30
),
height=1080,
xaxis = dict(gridcolor='#bdbdbd'),
yaxis = dict(
gridcolor='#bdbdbd',
autotick = False,
ticks = 'outside',
tick0 = prices[0],
dtick = 1000
),
margin = go.Margin(
l=60,
r=40,
b=30,
t=70
),
annotations=[
dict(
visible=True,
x=dates[-1],
y=prices[-1],
xref='x',
yref='y',
text="$" + str(prices[-1]),
showarrow=True,
font = dict(
family="Open Sans",
size=28,
color="#ffffff"
),
align = 'center',
arrowhead=2,
arrowsize=1,
arrowwidth=2,
arrowcolor='#9E9E9E',
ax=-200,
ay=-100,
bordercolor = '#9E9E9E',
borderwidth = 1,
borderpad = 2,
bgcolor = '#BDBDBD',
opacity=0.8
)
]
)
graph = [
go.Scatter(
y=prices,
x=dates,
line = dict(
color = '#FF9900'
),
opacity = 0.8,
name="",
showlegend = False
),
go.Scatter(
y=[prices[-1], prices[-1]],
x=[dates[0], dates[-1]],
line = dict(
color = '#F44336'
),
opacity=1,
mode='lines',
)]
py.image.save_as(dict(data=graph, layout=layout), filename='plot.png') |
py | b41227311617aa3716cec225a440d208877d0d62 | import os
from mountaintools import client as mt
import mlprocessors as mlpr
import shutil
def install_waveclus(repo, commit):
spikeforest_alg_install_path = get_install_path()
key = dict(
alg='waveclus',
repo=repo,
commit=commit
)
source_path = spikeforest_alg_install_path + '/waveclus_' + commit
if os.path.exists(source_path):
# The dir hash method does not seem to be working for some reason here
# hash0 = mt.computeDirHash(source_path)
# if hash0 == mt.getValue(key=key):
# print('waveclus is already auto-installed.')
# return source_path
a = mt.loadObject(path=source_path + '/spikeforest.json')
if a:
if mt.sha1OfObject(a) == mt.sha1OfObject(key):
print('waveclus is already auto-installed.')
return source_path
print('Removing directory: {}'.format(source_path))
shutil.rmtree(source_path)
script = """
#!/bin/bash
set -e
git clone {repo} {source_path}
cd {source_path}
git checkout {commit}
""".format(repo=repo, commit=commit, source_path=source_path)
ss = mlpr.ShellScript(script=script)
ss.start()
retcode = ss.wait()
if retcode != 0:
raise Exception('Install script returned a non-zero exit code/')
# The dir hash method does not seem to be working for some reason here
# hash0 = mt.computeDirHash(source_path)
# mt.setValue(key=key, value=hash0)
mt.saveObject(object=key, dest_path=source_path + '/spikeforest.json')
return source_path
def get_install_path():
spikeforest_alg_install_path = os.getenv('SPIKEFOREST_ALG_INSTALL_PATH', os.getenv('HOME') + '/spikeforest_algs')
if not os.path.exists(spikeforest_alg_install_path):
os.mkdir(spikeforest_alg_install_path)
return spikeforest_alg_install_path |
py | b41227871ae03bc02a1dd801d19d5edba500443a | """
Base and utility classes for tseries type pandas objects.
"""
import operator
from typing import Set
import numpy as np
from pandas._libs import NaT, iNaT, lib
from pandas._libs.algos import unique_deltas
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, cache_readonly
from pandas.core.dtypes.common import (
ensure_int64,
is_bool_dtype,
is_dtype_equal,
is_float,
is_integer,
is_list_like,
is_period_dtype,
is_scalar,
)
from pandas.core.dtypes.generic import ABCIndex, ABCIndexClass, ABCSeries
from pandas.core import algorithms, ops
from pandas.core.accessor import PandasDelegate
from pandas.core.arrays import ExtensionArray, ExtensionOpsMixin
from pandas.core.arrays.datetimelike import (
DatetimeLikeArrayMixin,
_ensure_datetimelike_to_i8,
)
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import Index, _index_shared_docs
from pandas.core.tools.timedeltas import to_timedelta
from pandas.tseries.frequencies import to_offset
_index_doc_kwargs = dict(ibase._index_doc_kwargs)
def ea_passthrough(array_method):
"""
Make an alias for a method of the underlying ExtensionArray.
Parameters
----------
array_method : method on an Array class
Returns
-------
method
"""
def method(self, *args, **kwargs):
return array_method(self._data, *args, **kwargs)
method.__name__ = array_method.__name__
method.__doc__ = array_method.__doc__
return method
def _make_wrapped_arith_op(opname):
def method(self, other):
meth = getattr(self._data, opname)
result = meth(maybe_unwrap_index(other))
return wrap_arithmetic_op(self, other, result)
method.__name__ = opname
return method
class DatetimeIndexOpsMixin(ExtensionOpsMixin):
"""
common ops mixin to support a unified interface datetimelike Index
"""
_data: ExtensionArray
# DatetimeLikeArrayMixin assumes subclasses are mutable, so these are
# properties there. They can be made into cache_readonly for Index
# subclasses bc they are immutable
inferred_freq = cache_readonly(
DatetimeLikeArrayMixin.inferred_freq.fget # type: ignore
)
_isnan = cache_readonly(DatetimeLikeArrayMixin._isnan.fget) # type: ignore
hasnans = cache_readonly(DatetimeLikeArrayMixin._hasnans.fget) # type: ignore
_hasnans = hasnans # for index / array -agnostic code
_resolution = cache_readonly(
DatetimeLikeArrayMixin._resolution.fget # type: ignore
)
resolution = cache_readonly(DatetimeLikeArrayMixin.resolution.fget) # type: ignore
_maybe_mask_results = ea_passthrough(DatetimeLikeArrayMixin._maybe_mask_results)
__iter__ = ea_passthrough(DatetimeLikeArrayMixin.__iter__)
mean = ea_passthrough(DatetimeLikeArrayMixin.mean)
@property
def freq(self):
"""
Return the frequency object if it is set, otherwise None.
"""
return self._data.freq
@property
def freqstr(self):
"""
Return the frequency object as a string if it is set, otherwise None.
"""
return self._data.freqstr
def unique(self, level=None):
if level is not None:
self._validate_index_level(level)
result = self._data.unique()
# Note: if `self` is already unique, then self.unique() should share
# a `freq` with self. If not already unique, then self.freq must be
# None, so again sharing freq is correct.
return self._shallow_copy(result._data)
@classmethod
def _create_comparison_method(cls, op):
"""
Create a comparison method that dispatches to ``cls.values``.
"""
def wrapper(self, other):
if isinstance(other, ABCSeries):
# the arrays defer to Series for comparison ops but the indexes
# don't, so we have to unwrap here.
other = other._values
result = op(self._data, maybe_unwrap_index(other))
return result
wrapper.__doc__ = op.__doc__
wrapper.__name__ = "__{}__".format(op.__name__)
return wrapper
@property
def _ndarray_values(self) -> np.ndarray:
return self._data._ndarray_values
# ------------------------------------------------------------------------
# Abstract data attributes
@property
def values(self):
# Note: PeriodArray overrides this to return an ndarray of objects.
return self._data._data
@property # type: ignore # https://github.com/python/mypy/issues/1362
@Appender(DatetimeLikeArrayMixin.asi8.__doc__)
def asi8(self):
return self._data.asi8
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc.
"""
result = lib.item_from_zerodim(result)
if is_bool_dtype(result) or lib.is_scalar(result):
return result
attrs = self._get_attributes_dict()
if not is_period_dtype(self) and attrs["freq"]:
# no need to infer if freq is None
attrs["freq"] = "infer"
return Index(result, **attrs)
# ------------------------------------------------------------------------
def equals(self, other):
"""
Determines if two Index objects contain the same elements.
"""
if self.is_(other):
return True
if not isinstance(other, ABCIndexClass):
return False
elif not isinstance(other, type(self)):
try:
other = type(self)(other)
except (ValueError, TypeError, OverflowError):
# e.g.
# ValueError -> cannot parse str entry, or OutOfBoundsDatetime
# TypeError -> trying to convert IntervalIndex to DatetimeIndex
# OverflowError -> Index([very_large_timedeltas])
return False
if not is_dtype_equal(self.dtype, other.dtype):
# have different timezone
return False
elif is_period_dtype(self):
if not is_period_dtype(other):
return False
if self.freq != other.freq:
return False
return np.array_equal(self.asi8, other.asi8)
@staticmethod
def _join_i8_wrapper(joinf, dtype, with_indexers=True):
"""
Create the join wrapper methods.
"""
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
@staticmethod
def wrapper(left, right):
if isinstance(
left, (np.ndarray, ABCIndex, ABCSeries, DatetimeLikeArrayMixin)
):
left = left.view("i8")
if isinstance(
right, (np.ndarray, ABCIndex, ABCSeries, DatetimeLikeArrayMixin)
):
right = right.view("i8")
results = joinf(left, right)
if with_indexers:
join_index, left_indexer, right_indexer = results
join_index = join_index.view(dtype)
return join_index, left_indexer, right_indexer
return results
return wrapper
def _ensure_localized(
self, arg, ambiguous="raise", nonexistent="raise", from_utc=False
):
# See DatetimeLikeArrayMixin._ensure_localized.__doc__
if getattr(self, "tz", None):
# ensure_localized is only relevant for tz-aware DTI
result = self._data._ensure_localized(
arg, ambiguous=ambiguous, nonexistent=nonexistent, from_utc=from_utc
)
return type(self)._simple_new(result, name=self.name)
return arg
def _box_values(self, values):
return self._data._box_values(values)
@Appender(_index_shared_docs["contains"] % _index_doc_kwargs)
def __contains__(self, key):
try:
res = self.get_loc(key)
return (
is_scalar(res)
or isinstance(res, slice)
or (is_list_like(res) and len(res))
)
except (KeyError, TypeError, ValueError):
return False
# Try to run function on index first, and then on elements of index
# Especially important for group-by functionality
def map(self, mapper, na_action=None):
try:
result = mapper(self)
# Try to use this result if we can
if isinstance(result, np.ndarray):
result = Index(result)
if not isinstance(result, Index):
raise TypeError("The map function must return an Index object")
return result
except Exception:
return self.astype(object).map(mapper)
def sort_values(self, return_indexer=False, ascending=True):
"""
Return sorted copy of Index.
"""
if return_indexer:
_as = self.argsort()
if not ascending:
_as = _as[::-1]
sorted_index = self.take(_as)
return sorted_index, _as
else:
sorted_values = np.sort(self._ndarray_values)
attribs = self._get_attributes_dict()
freq = attribs["freq"]
if freq is not None and not is_period_dtype(self):
if freq.n > 0 and not ascending:
freq = freq * -1
elif freq.n < 0 and ascending:
freq = freq * -1
attribs["freq"] = freq
if not ascending:
sorted_values = sorted_values[::-1]
return self._simple_new(sorted_values, **attribs)
@Appender(_index_shared_docs["take"] % _index_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True, fill_value=None, **kwargs):
nv.validate_take(tuple(), kwargs)
indices = ensure_int64(indices)
maybe_slice = lib.maybe_indices_to_slice(indices, len(self))
if isinstance(maybe_slice, slice):
return self[maybe_slice]
taken = self._assert_take_fillable(
self.asi8,
indices,
allow_fill=allow_fill,
fill_value=fill_value,
na_value=iNaT,
)
# keep freq in PeriodArray/Index, reset otherwise
freq = self.freq if is_period_dtype(self) else None
return self._shallow_copy(taken, freq=freq)
_can_hold_na = True
_na_value = NaT
"""The expected NA value to use with this index."""
def _convert_tolerance(self, tolerance, target):
tolerance = np.asarray(to_timedelta(tolerance).to_numpy())
if target.size != tolerance.size and tolerance.size > 1:
raise ValueError("list-like tolerance size must match target index size")
return tolerance
def tolist(self):
"""
Return a list of the underlying data.
"""
return list(self.astype(object))
def min(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the minimum value of the Index or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Series.min : Return the minimum value in a Series.
"""
nv.validate_min(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
try:
# quick check
if len(i8) and self.is_monotonic:
if i8[0] != iNaT:
return self._box_func(i8[0])
if self.hasnans:
if skipna:
min_stamp = self[~self._isnan].asi8.min()
else:
return self._na_value
else:
min_stamp = i8.min()
return self._box_func(min_stamp)
except ValueError:
return self._na_value
def argmin(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the minimum values along an axis.
See `numpy.ndarray.argmin` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmin
"""
nv.validate_argmin(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = np.iinfo("int64").max
return i8.argmin()
def max(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the maximum value of the Index or maximum along
an axis.
See Also
--------
numpy.ndarray.max
Series.max : Return the maximum value in a Series.
"""
nv.validate_max(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
try:
# quick check
if len(i8) and self.is_monotonic:
if i8[-1] != iNaT:
return self._box_func(i8[-1])
if self.hasnans:
if skipna:
max_stamp = self[~self._isnan].asi8.max()
else:
return self._na_value
else:
max_stamp = i8.max()
return self._box_func(max_stamp)
except ValueError:
return self._na_value
def argmax(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the maximum values along an axis.
See `numpy.ndarray.argmax` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmax
"""
nv.validate_argmax(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = 0
return i8.argmax()
# --------------------------------------------------------------------
# Rendering Methods
def _format_with_header(self, header, na_rep="NaT", **kwargs):
return header + list(self._format_native_types(na_rep, **kwargs))
@property
def _formatter_func(self):
raise AbstractMethodError(self)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value).
"""
attrs = super()._format_attrs()
for attrib in self._attributes:
if attrib == "freq":
freq = self.freqstr
if freq is not None:
freq = f"{freq!r}"
attrs.append(("freq", freq))
return attrs
# --------------------------------------------------------------------
def _convert_scalar_indexer(self, key, kind=None):
"""
We don't allow integer or float indexing on datetime-like when using
loc.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
assert kind in ["ix", "loc", "getitem", "iloc", None]
# we don't allow integer/float indexing for loc
# we don't allow float indexing for ix/getitem
if is_scalar(key):
is_int = is_integer(key)
is_flt = is_float(key)
if kind in ["loc"] and (is_int or is_flt):
self._invalid_indexer("index", key)
elif kind in ["ix", "getitem"] and is_flt:
self._invalid_indexer("index", key)
return super()._convert_scalar_indexer(key, kind=kind)
@classmethod
def _add_datetimelike_methods(cls):
"""
Add in the datetimelike methods (as we may have to override the
superclass).
"""
def __add__(self, other):
# dispatch to ExtensionArray implementation
result = self._data.__add__(maybe_unwrap_index(other))
return wrap_arithmetic_op(self, other, result)
cls.__add__ = __add__
def __radd__(self, other):
# alias for __add__
return self.__add__(other)
cls.__radd__ = __radd__
def __sub__(self, other):
# dispatch to ExtensionArray implementation
result = self._data.__sub__(maybe_unwrap_index(other))
return wrap_arithmetic_op(self, other, result)
cls.__sub__ = __sub__
def __rsub__(self, other):
result = self._data.__rsub__(maybe_unwrap_index(other))
return wrap_arithmetic_op(self, other, result)
cls.__rsub__ = __rsub__
__pow__ = _make_wrapped_arith_op("__pow__")
__rpow__ = _make_wrapped_arith_op("__rpow__")
__mul__ = _make_wrapped_arith_op("__mul__")
__rmul__ = _make_wrapped_arith_op("__rmul__")
__floordiv__ = _make_wrapped_arith_op("__floordiv__")
__rfloordiv__ = _make_wrapped_arith_op("__rfloordiv__")
__mod__ = _make_wrapped_arith_op("__mod__")
__rmod__ = _make_wrapped_arith_op("__rmod__")
__divmod__ = _make_wrapped_arith_op("__divmod__")
__rdivmod__ = _make_wrapped_arith_op("__rdivmod__")
__truediv__ = _make_wrapped_arith_op("__truediv__")
__rtruediv__ = _make_wrapped_arith_op("__rtruediv__")
def isin(self, values, level=None):
"""
Compute boolean array of whether each index value is found in the
passed set of values.
Parameters
----------
values : set or sequence of values
Returns
-------
is_contained : ndarray (boolean dtype)
"""
if level is not None:
self._validate_index_level(level)
if not isinstance(values, type(self)):
try:
values = type(self)(values)
except ValueError:
return self.astype(object).isin(values)
return algorithms.isin(self.asi8, values.asi8)
def intersection(self, other, sort=False):
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
if self.equals(other):
return self._get_reconciled_name_object(other)
if len(self) == 0:
return self.copy()
if len(other) == 0:
return other.copy()
if not isinstance(other, type(self)):
result = Index.intersection(self, other, sort=sort)
if isinstance(result, type(self)):
if result.freq is None:
# TODO: find a less code-smelly way to set this
result._data._freq = to_offset(result.inferred_freq)
return result
elif (
other.freq is None
or self.freq is None
or other.freq != self.freq
or not other.freq.isAnchored()
or (not self.is_monotonic or not other.is_monotonic)
):
result = Index.intersection(self, other, sort=sort)
# Invalidate the freq of `result`, which may not be correct at
# this point, depending on the values.
# TODO: find a less code-smelly way to set this
result._data._freq = None
if hasattr(self, "tz"):
result = self._shallow_copy(
result._values, name=result.name, tz=result.tz, freq=None
)
else:
result = self._shallow_copy(result._values, name=result.name, freq=None)
if result.freq is None:
# TODO: find a less code-smelly way to set this
result._data._freq = to_offset(result.inferred_freq)
return result
# to make our life easier, "sort" the two ranges
if self[0] <= other[0]:
left, right = self, other
else:
left, right = other, self
# after sorting, the intersection always starts with the right index
# and ends with the index of which the last elements is smallest
end = min(left[-1], right[-1])
start = right[0]
if end < start:
return type(self)(data=[])
else:
lslice = slice(*left.slice_locs(start, end))
left_chunk = left.values[lslice]
return self._shallow_copy(left_chunk)
@Appender(_index_shared_docs["repeat"] % _index_doc_kwargs)
def repeat(self, repeats, axis=None):
nv.validate_repeat(tuple(), dict(axis=axis))
freq = self.freq if is_period_dtype(self) else None
return self._shallow_copy(self.asi8.repeat(repeats), freq=freq)
@Appender(_index_shared_docs["where"] % _index_doc_kwargs)
def where(self, cond, other=None):
other = _ensure_datetimelike_to_i8(other, to_utc=True)
values = _ensure_datetimelike_to_i8(self, to_utc=True)
result = np.where(cond, values, other).astype("i8")
result = self._ensure_localized(result, from_utc=True)
return self._shallow_copy(result)
def _summary(self, name=None):
"""
Return a summarized representation.
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
formatter = self._formatter_func
if len(self) > 0:
index_summary = f", {formatter(self[0])} to {formatter(self[-1])}"
else:
index_summary = ""
if name is None:
name = type(self).__name__
result = f"{name}: {len(self)} entries{index_summary}"
if self.freq:
result += "\nFreq: %s" % self.freqstr
# display as values, not quoted
result = result.replace("'", "")
return result
def _concat_same_dtype(self, to_concat, name):
"""
Concatenate to_concat which has the same class.
"""
attribs = self._get_attributes_dict()
attribs["name"] = name
# do not pass tz to set because tzlocal cannot be hashed
if len({str(x.dtype) for x in to_concat}) != 1:
raise ValueError("to_concat must have the same tz")
new_data = type(self._values)._concat_same_type(to_concat).asi8
# GH 3232: If the concat result is evenly spaced, we can retain the
# original frequency
is_diff_evenly_spaced = len(unique_deltas(new_data)) == 1
if not is_period_dtype(self) and not is_diff_evenly_spaced:
# reset freq
attribs["freq"] = None
return self._simple_new(new_data, **attribs)
@Appender(_index_shared_docs["astype"])
def astype(self, dtype, copy=True):
if is_dtype_equal(self.dtype, dtype) and copy is False:
# Ensure that self.astype(self.dtype) is self
return self
new_values = self._data.astype(dtype, copy=copy)
# pass copy=False because any copying will be done in the
# _data.astype call above
return Index(new_values, dtype=new_values.dtype, name=self.name, copy=False)
def shift(self, periods=1, freq=None):
"""
Shift index by desired number of time frequency increments.
This method is for shifting the values of datetime-like indexes
by a specified time increment a given number of times.
Parameters
----------
periods : int, default 1
Number of periods (or increments) to shift by,
can be positive or negative.
.. versionchanged:: 0.24.0
freq : pandas.DateOffset, pandas.Timedelta or string, optional
Frequency increment to shift by.
If None, the index is shifted by its own `freq` attribute.
Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc.
Returns
-------
pandas.DatetimeIndex
Shifted index.
See Also
--------
Index.shift : Shift values of Index.
PeriodIndex.shift : Shift values of PeriodIndex.
"""
result = self._data._time_shift(periods, freq=freq)
return type(self)(result, name=self.name)
def wrap_arithmetic_op(self, other, result):
if result is NotImplemented:
return NotImplemented
if isinstance(result, tuple):
# divmod, rdivmod
assert len(result) == 2
return (
wrap_arithmetic_op(self, other, result[0]),
wrap_arithmetic_op(self, other, result[1]),
)
if not isinstance(result, Index):
# Index.__new__ will choose appropriate subclass for dtype
result = Index(result)
res_name = ops.get_op_result_name(self, other)
result.name = res_name
return result
def maybe_unwrap_index(obj):
"""
If operating against another Index object, we need to unwrap the underlying
data before deferring to the DatetimeArray/TimedeltaArray/PeriodArray
implementation, otherwise we will incorrectly return NotImplemented.
Parameters
----------
obj : object
Returns
-------
unwrapped object
"""
if isinstance(obj, ABCIndexClass):
return obj._data
return obj
class DatetimelikeDelegateMixin(PandasDelegate):
"""
Delegation mechanism, specific for Datetime, Timedelta, and Period types.
Functionality is delegated from the Index class to an Array class. A
few things can be customized
* _delegate_class : type
The class being delegated to.
* _delegated_methods, delegated_properties : List
The list of property / method names being delagated.
* raw_methods : Set
The set of methods whose results should should *not* be
boxed in an index, after being returned from the array
* raw_properties : Set
The set of properties whose results should should *not* be
boxed in an index, after being returned from the array
"""
# raw_methods : dispatch methods that shouldn't be boxed in an Index
_raw_methods: Set[str] = set()
# raw_properties : dispatch properties that shouldn't be boxed in an Index
_raw_properties: Set[str] = set()
name = None
_data: ExtensionArray
@property
def _delegate_class(self):
raise AbstractMethodError
def _delegate_property_get(self, name, *args, **kwargs):
result = getattr(self._data, name)
if name not in self._raw_properties:
result = Index(result, name=self.name)
return result
def _delegate_property_set(self, name, value, *args, **kwargs):
setattr(self._data, name, value)
def _delegate_method(self, name, *args, **kwargs):
result = operator.methodcaller(name, *args, **kwargs)(self._data)
if name not in self._raw_methods:
result = Index(result, name=self.name)
return result
|
py | b41228ebd87bffa753ff6adc3a62b5e3533cdabd | # coding: utf-8
"""
Laserfiche API
Welcome to the Laserfiche API Swagger Playground. You can try out any of our API calls against your live Laserfiche Cloud account. Visit the developer center for more details: <a href=\"https://developer.laserfiche.com\">https://developer.laserfiche.com</a><p><strong>Build# : </strong>650780</p> # noqa: E501
OpenAPI spec version: 1-alpha
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class LFColor(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'a': 'int',
'r': 'int',
'g': 'int',
'b': 'int'
}
attribute_map = {
'a': 'a',
'r': 'r',
'g': 'g',
'b': 'b'
}
def __init__(self, a=None, r=None, g=None, b=None): # noqa: E501
"""LFColor - a model defined in Swagger""" # noqa: E501
self._a = None
self._r = None
self._g = None
self._b = None
self.discriminator = None
if a is not None:
self.a = a
if r is not None:
self.r = r
if g is not None:
self.g = g
if b is not None:
self.b = b
@property
def a(self):
"""Gets the a of this LFColor. # noqa: E501
:return: The a of this LFColor. # noqa: E501
:rtype: int
"""
return self._a
@a.setter
def a(self, a):
"""Sets the a of this LFColor.
:param a: The a of this LFColor. # noqa: E501
:type: int
"""
self._a = a
@property
def r(self):
"""Gets the r of this LFColor. # noqa: E501
:return: The r of this LFColor. # noqa: E501
:rtype: int
"""
return self._r
@r.setter
def r(self, r):
"""Sets the r of this LFColor.
:param r: The r of this LFColor. # noqa: E501
:type: int
"""
self._r = r
@property
def g(self):
"""Gets the g of this LFColor. # noqa: E501
:return: The g of this LFColor. # noqa: E501
:rtype: int
"""
return self._g
@g.setter
def g(self, g):
"""Sets the g of this LFColor.
:param g: The g of this LFColor. # noqa: E501
:type: int
"""
self._g = g
@property
def b(self):
"""Gets the b of this LFColor. # noqa: E501
:return: The b of this LFColor. # noqa: E501
:rtype: int
"""
return self._b
@b.setter
def b(self, b):
"""Sets the b of this LFColor.
:param b: The b of this LFColor. # noqa: E501
:type: int
"""
self._b = b
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(LFColor, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LFColor):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b4122aeb57a92149c292523ab95e06935022de8f | #-*- coding: utf-8 -*-
class Proxy(object):
def __init__(self):
self.index = 10000
self.ip = ''
self.port = ''
self.country = ''
self.anonymity = ''
self.https = ''
self.speed = ''
self.source = ''
def __str__(self):
data = {
'ip': self.ip,
'port': self.port,
'country': self.country,
'anonymity': self.anonymity,
'https': self.https,
'speed': self.speed,
'source': self.source
}
return str(data)
def set_value(self, ip, port, country, anonymity, https, speed, source):
self.ip = ip
self.port = port
self.country = country
self.anonymity = self.get_anonymity_type(anonymity)
self.https = https
self.speed = speed
self.source = source
def get_anonymity_type(self, anonymity):
'''There are 3 levels of proxies according to their anonymity.
Level 1 - Elite Proxy / Highly Anonymous Proxy: The web server can't detect whether you are using a proxy.
Level 2 - Anonymous Proxy: The web server can know you are using a proxy, but it can't know your real IP.
Level 3 - Transparent Proxy: The web server can know you are using a proxy and it can also know your real
IP.
'''
if anonymity == u'高匿代理' or anonymity == u'高匿名' or anonymity == 'elite proxy' or \
anonymity == u'超级匿名':
return '1'
elif anonymity == u'匿名' or anonymity == 'anonymous' or anonymity == u'普通匿名':
return '2'
elif anonymity == u'透明' or anonymity == 'transparent':
return '3'
else:
return '3'
|
py | b4122c4bda796c0bde1ac5a39323223b1dcd5fcf | """Utility Class for reading and writing input output"""
import json
class IOUtil:
@staticmethod
def read_index_vector_json(file_path):
with open(file_path, 'r') as json_file:
input_file = json.load(json_file)
label_list = [entry['label'] for entry in input_file['keys']]
vector_list = [entry['embedding'] for entry in input_file['keys']]
index2label = {index: label for index, label in enumerate(label_list)}
vec_dim = len(vector_list[0])
return vec_dim, index2label, vector_list
|
py | b4122cc3f4642a01cef685dede0cc7a7884988c7 | def area(a,b):
s = a * b
print(f"A área do terreno com lado a = {a} e b = {b} é igual a {s}")
lado1 = int(input('Digite a largura do terreno: '))
lado2 = int(input('Digite o comprimento do terreno: '))
area(lado1,lado2) |
py | b4122e3c9f1ddd45347a6c96fc4c533e00da6791 | import pytest
from reddit_clone.users.forms import UserCreationForm
from reddit_clone.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
def test_clean_username(self):
# A user with proto_user params does not exist yet.
proto_user = UserFactory.build()
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert form.is_valid()
assert form.clean_username() == proto_user.username
# Creating a user.
form.save()
# The user with proto_user params already exists,
# hence cannot be created.
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
|
py | b4122eea066444ecfa9b2e0f3a206f86912f996b | """Sequence and Bracketed Grammars."""
from typing import Optional, List, Tuple
from ...errors import SQLParseError
from ..segments import BaseSegment, Indent, Dedent
from ..helpers import trim_non_code_segments, check_still_complete
from ..match_result import MatchResult
from ..match_wrapper import match_wrapper
from ..context import ParseContext
from .base import BaseGrammar, Ref, cached_method_for_parse_context
class Sequence(BaseGrammar):
"""Match a specific sequence of elements."""
@cached_method_for_parse_context
def simple(self, parse_context: ParseContext) -> Optional[List[str]]:
"""Does this matcher support a uppercase hash matching route?
Sequence does provide this, as long as the *first* non-optional
element does, *AND* and optional elements which preceed it also do.
"""
simple_buff = []
for opt in self._elements:
simple = opt.simple(parse_context=parse_context)
if not simple:
return None
simple_buff += simple
if not opt.is_optional():
# We found our first non-optional element!
return simple_buff
# If *all* elements are optional AND simple, I guess it's also simple.
return simple_buff
@match_wrapper()
def match(self, segments, parse_context):
"""Match a specific sequence of elements."""
if isinstance(segments, BaseSegment):
segments = tuple(segments)
matched_segments = MatchResult.from_empty()
unmatched_segments = segments
for idx, elem in enumerate(self._elements):
while True:
# Is it an indent or dedent?
if elem.is_meta:
# Is it actually enabled?
if not elem.is_enabled(parse_context=parse_context):
break
# Work out how to find an appropriate pos_marker for
# the meta segment.
if matched_segments:
# Get from end of last
last_matched = matched_segments.matched_segments[-1]
meta_pos_marker = last_matched.get_end_pos_marker()
else:
# Get from start of next
meta_pos_marker = unmatched_segments[0].pos_marker
matched_segments += elem(pos_marker=meta_pos_marker)
break
# Consume non-code if appropriate
if self.allow_gaps:
pre_nc, mid_seg, post_nc = trim_non_code_segments(
unmatched_segments
)
else:
pre_nc = ()
mid_seg = unmatched_segments
post_nc = ()
if len(pre_nc + mid_seg + post_nc) == 0:
# We've run our of sequence without matching everyting.
# Do only optional or meta elements remain?
if all(e.is_optional() or e.is_meta for e in self._elements[idx:]):
# then it's ok, and we can return what we've got so far.
# No need to deal with anything left over because we're at the end,
# unless it's a meta segment.
# Get hold of the last thing to be matched, so we've got an anchor.
last_matched = matched_segments.matched_segments[-1]
meta_pos_marker = last_matched.get_end_pos_marker()
# NB: This complicated expression just adds indents as appropriate.
return matched_segments + tuple(
e(pos_marker=meta_pos_marker)
for e in self._elements[idx:]
if e.is_meta and e.is_enabled(parse_context=parse_context)
)
else:
# we've got to the end of the sequence without matching all
# required elements.
return MatchResult.from_unmatched(segments)
else:
# We've already dealt with potential whitespace above, so carry on to matching
with parse_context.deeper_match() as ctx:
elem_match = elem.match(mid_seg, parse_context=ctx)
if elem_match.has_match():
# We're expecting mostly partial matches here, but complete
# matches are possible. Don't be greedy with whitespace!
matched_segments += pre_nc + elem_match.matched_segments
unmatched_segments = elem_match.unmatched_segments + post_nc
# Each time we do this, we do a sense check to make sure we haven't
# dropped anything. (Because it's happened before!).
check_still_complete(
segments,
matched_segments.matched_segments,
unmatched_segments,
)
# Break out of the while loop and move to the next element.
break
else:
# If we can't match an element, we should ascertain whether it's
# required. If so then fine, move on, but otherwise we should crash
# out without a match. We have not matched the sequence.
if elem.is_optional():
# This will crash us out of the while loop and move us
# onto the next matching element
break
else:
return MatchResult.from_unmatched(segments)
# If we get to here, we've matched all of the elements (or skipped them)
# but still have some segments left (or perhaps have precisely zero left).
# In either case, we're golden. Return successfully, with any leftovers as
# the unmatched elements.
return MatchResult(matched_segments.matched_segments, unmatched_segments)
class Bracketed(Sequence):
"""Match if this is a bracketed sequence, with content that matches one of the elements.
Note that the contents of the Bracketed Expression are treated as an expected sequence.
Changelog:
- Post 0.3.2: Bracketed inherits from Sequence and anything within
the the `Bracketed()` expression is treated as a sequence. For the
content of the Brackets, we call the `match()` method of the sequence
grammar.
- Post 0.1.0: Bracketed was seperate from sequence, and the content
of the expression were treated as options (like OneOf).
- Pre 0.1.0: Bracketed inherited from Sequence and simply added
brackets to that sequence,
"""
def __init__(self, *args, **kwargs):
self.square = kwargs.pop("square", False)
# Start and end tokens
# The details on how to match a bracket are stored in the dialect
if self.square:
self.start_bracket = Ref("StartSquareBracketSegment")
self.end_bracket = Ref("EndSquareBracketSegment")
else:
self.start_bracket = Ref("StartBracketSegment")
self.end_bracket = Ref("EndBracketSegment")
super(Bracketed, self).__init__(*args, **kwargs)
@cached_method_for_parse_context
def simple(self, parse_context: ParseContext) -> Optional[List[str]]:
"""Does this matcher support a uppercase hash matching route?
Bracketed does this easily, we just look for the bracket.
"""
return self.start_bracket.simple(parse_context=parse_context)
@match_wrapper()
def match(
self, segments: Tuple["BaseSegment", ...], parse_context: ParseContext
) -> MatchResult:
"""Match if this is a bracketed sequence, with content that matches one of the elements.
1. work forwards to find the first bracket.
If we find something other that whitespace, then fail out.
2. Once we have the first bracket, we need to bracket count forward to find it's partner.
3. Assuming we find it's partner then we try and match what goes between them
using the match method of Sequence.
If we match, great. If not, then we return an empty match.
If we never find it's partner then we return an empty match but should probably
log a parsing warning, or error?
"""
# Trim ends if allowed.
if self.allow_gaps:
pre_nc, seg_buff, post_nc = trim_non_code_segments(segments)
else:
seg_buff = segments
# Look for the first bracket
with parse_context.deeper_match() as ctx:
start_match = self.start_bracket.match(seg_buff, parse_context=ctx)
if start_match:
seg_buff = start_match.unmatched_segments
else:
# Can't find the opening bracket. No Match.
return MatchResult.from_unmatched(segments)
# Look for the closing bracket
content_segs, end_match, _ = self._bracket_sensitive_look_ahead_match(
segments=seg_buff,
matchers=[self.end_bracket],
parse_context=parse_context,
)
if not end_match:
raise SQLParseError(
"Couldn't find closing bracket for opening bracket.",
segment=start_match.matched_segments[0],
)
# Match the content now we've confirmed the brackets.
# First deal with the case of TOTALLY EMPTY BRACKETS e.g. "()"
if not content_segs:
# If it's allowed, return a match.
if not self._elements or all(e.is_optional() for e in self._elements):
return MatchResult(
start_match.matched_segments + end_match.matched_segments,
end_match.unmatched_segments,
)
# If not, don't.
else:
return MatchResult.from_unmatched(segments)
# Then trim whitespace and deal with the case of no code content e.g. "( )"
if self.allow_gaps:
pre_nc, content_segs, post_nc = trim_non_code_segments(content_segs)
else:
pre_nc = ()
post_nc = ()
# If we don't have anything left after trimming, act accordingly.
if not content_segs:
if not self._elements or (
all(e.is_optional() for e in self._elements) and self.allow_gaps
):
return MatchResult(
start_match.matched_segments
+ pre_nc
+ post_nc
+ end_match.matched_segments,
end_match.unmatched_segments,
)
else:
return MatchResult.from_unmatched(segments)
# Match using super. Sequence will interpret the content of the elements.
with parse_context.deeper_match() as ctx:
content_match = super().match(content_segs, parse_context=ctx)
# We require a complete match for the content (hopefully for obvious reasons)
if content_match.is_complete():
# Append some indent and dedent tokens at the start and the end.
pre_meta = (
Indent(
pos_marker=content_match.matched_segments[0].get_start_pos_marker()
),
)
post_meta = (
Dedent(
pos_marker=content_match.matched_segments[-1].get_end_pos_marker()
),
)
return MatchResult(
# NB: The nc segments go *outside* the indents.
start_match.matched_segments
+ pre_nc
+ pre_meta # Add a meta indent here
+ content_match.matched_segments
+ post_meta # Add a meta indent here
+ post_nc
+ end_match.matched_segments,
end_match.unmatched_segments,
)
# No complete match. Fail.
else:
return MatchResult.from_unmatched(segments)
|
py | b4123089ae34eac0004c159108d468122134c107 | import json
def hello(event, context):
body = {
"message": "Go Serverless v3.0! Your function executed successfully!",
"input": event,
}
return {"statusCode": 200, "body": json.dumps(body)}
|
py | b4123091d16e3ac8310d826e501b6eaaed93d82d | # -*- coding: utf-8 -*-
#
# PyMeasure documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 6 13:06:00 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.insert(0, os.path.abspath('..')) # Allow modules to be found
# Include Read the Docs formatting
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.doctest'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyMeasure'
copyright = u'2013-2019, PyMeasure Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.6.0'
# The full version, including alpha/beta/rc tags.
release = '0.6.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyMeasuredoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'PyMeasure.tex', u'PyMeasure Documentation',
u'PyMeasure Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pymeasure', u'PyMeasure Documentation',
[u'PyMeasure Developers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PyMeasure', u'PyMeasure Documentation',
u'PyMeasure Developers', 'PyMeasure', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
py | b412309b8f8b9a5bcb40dd6b3fee71deac4a400d | from toph.common import exceptions
from toph.open_sources.tiktok.client import tiktok_client
def checkByUserName(username):
try:
tiktok_client.getData(username)
except ValueError:
exceptions.printException(__name__) |
py | b412312023f971479ba144e9fb26e3de88ebcdc1 | from __future__ import print_function
from __future__ import division
import freud
from benchmark import Benchmark
import numpy as np
import math
from benchmarker import run_benchmarks
class BenchmarkDensityLocalDensity(Benchmark):
def __init__(self, nu, rcut):
self.nu = nu
self.rcut = rcut
def bench_setup(self, N):
box_size = math.sqrt(N*self.nu)
seed = 0
np.random.seed(seed)
self.pos = np.random.random_sample((N, 3)).astype(np.float32) \
* box_size - box_size/2
self.pos[:, 2] = 0
self.ld = freud.density.LocalDensity(self.rcut, 1, 1)
box_size = math.sqrt(N*self.nu)
self.box = freud.box.Box.square(box_size)
def bench_run(self, N):
self.ld.compute(self.box, self.pos)
def run():
Ns = [1000, 10000]
rcut = 10
nu = 1
name = 'freud.density.LocalDensity'
classobj = BenchmarkDensityLocalDensity
number = 100
return run_benchmarks(name, Ns, number, classobj,
nu=nu, rcut=rcut)
if __name__ == '__main__':
run()
|
py | b4123139bf0c4961704e3be7af8f6ffadc541e55 | # Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for sonnet.python.modules.spatial_transformer.""" # pylint: disable=line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from nose_parameterized import parameterized
import numpy as np
import sonnet as snt
import tensorflow as tf
no_constraints = snt.AffineWarpConstraints.no_constraints
scale_2d = snt.AffineWarpConstraints.scale_2d
scale_3d = snt.AffineWarpConstraints.scale_3d
translation_2d = snt.AffineWarpConstraints.translation_2d
translation_3d = snt.AffineWarpConstraints.translation_3d
translation_2d = snt.AffineWarpConstraints.translation_2d
translation_3d = snt.AffineWarpConstraints.translation_3d
shear_2d = snt.AffineWarpConstraints.shear_2d
no_shear_2d = snt.AffineWarpConstraints.no_shear_2d
no_shear_3d = snt.AffineWarpConstraints.no_shear_3d
class AffineGridWarperTest(tf.test.TestCase):
def testShapeInferenceAndChecks(self):
output_shape2d = (2, 3)
source_shape2d = (6, 9)
constraints = scale_2d(y=1) & translation_2d(x=-2, y=7)
agw2d = snt.AffineGridWarper(source_shape=source_shape2d,
output_shape=output_shape2d,
constraints=constraints)
input_params2d = tf.placeholder(tf.float32,
[None, constraints.num_free_params])
warped_grid2d = agw2d(input_params2d)
self.assertEqual(warped_grid2d.get_shape().as_list()[1:], [2, 3, 2])
output_shape2d = (2, 3)
source_shape3d = (100, 200, 50)
agw3d = snt.AffineGridWarper(source_shape=source_shape3d,
output_shape=output_shape2d,
constraints=[[None, 0, None, None],
[0, 1, 0, None],
[0, None, 0, None]])
input_params3d = tf.placeholder(tf.float32,
[None, agw3d.constraints.num_free_params])
warped_grid3d = agw3d(input_params3d)
self.assertEqual(warped_grid3d.get_shape().as_list()[1:], [2, 3, 3])
output_shape3d = (2, 3, 4)
source_shape3d = (100, 200, 50)
agw3d = snt.AffineGridWarper(source_shape=source_shape3d,
output_shape=output_shape3d,
constraints=[[None, 0, None, None],
[0, 1, 0, None],
[0, None, 0, None]])
input_params3d = tf.placeholder(tf.float32,
[None, agw3d.constraints.num_free_params])
warped_grid3d = agw3d(input_params3d)
self.assertEqual(warped_grid3d.get_shape().as_list()[1:], [2, 3, 4, 3])
with self.assertRaisesRegexp(snt.Error,
"Incompatible set of constraints provided.*"):
snt.AffineGridWarper(source_shape=source_shape3d,
output_shape=output_shape3d,
constraints=no_constraints(2))
with self.assertRaisesRegexp(snt.Error,
"Output domain dimensionality.*"):
snt.AffineGridWarper(source_shape=source_shape2d,
output_shape=output_shape3d,
constraints=no_constraints(2))
@parameterized.expand([
("2d_a", [13, 17], [7, 11], no_constraints(2)),
("2d_b", [11, 5], [2, 8], scale_2d(x=.7)),
("2d_c", [9, 23], [3, 11], scale_2d(y=1.2)),
("2d_d", [2, 23], [9, 13], snt.AffineWarpConstraints([[1]*3, [None]*3])),
("3d_a", [13, 17, 3], [7, 11, 3], no_constraints(3)),
("3d_b", [11, 5, 6], [2, 8, 9], scale_3d(x=.7, z=2)),
("3d_c", [9, 23, 8], [3, 11, 2], scale_3d(y=1.2)),
("3d_d", [2, 23, 2], [9, 13, 33],
snt.AffineWarpConstraints([[1]*4, [None]*4, [None, 1, None, 1]])),
("2d_3d_a", [13, 17], [7, 11, 3], no_constraints(3)),
("2d_3d_b", [11, 5], [2, 8, 9], scale_3d(y=.7, z=2)),
("2d_3d_c", [9, 23], [3, 11, 2], scale_3d(x=1.2)),
("2d_3d_d", [2, 23], [9, 13, 33],
snt.AffineWarpConstraints([[None] * 4, [1] * 4, [1, None, None, 1]]))])
def testSameAsNumPyReference(self, _, output_shape, source_shape, constraints):
def chain(x):
return itertools.chain(*x)
def predict(output_shape, source_shape, inputs):
ranges = [np.linspace(-1, 1, x, dtype=np.float32)
for x in reversed(output_shape)]
n = len(source_shape)
grid = np.meshgrid(*ranges, indexing="xy")
for _ in range(len(output_shape), len(source_shape)):
grid.append(np.zeros_like(grid[0]))
grid.append(np.ones_like(grid[0]))
grid = np.array([x.reshape(1, -1) for x in grid]).squeeze()
predicted_output = []
for i in range(0, batch_size):
x = np.dot(inputs[i, :].reshape(n, n+1), grid)
for k, s in enumerate(reversed(source_shape)):
s = (s - 1) * 0.5
x[k, :] = x[k, :] * s + s
x = np.concatenate([v.reshape(v.shape + (1,)) for v in x], -1)
predicted_output.append(x.reshape(tuple(output_shape) + (n,)))
return predicted_output
batch_size = 20
agw = snt.AffineGridWarper(source_shape=source_shape,
output_shape=output_shape,
constraints=constraints)
inputs = tf.placeholder(tf.float32, [None, constraints.num_free_params])
warped_grid = agw(inputs)
full_size = constraints.num_dim * (constraints.num_dim + 1)
full_input_np = np.random.rand(batch_size, full_size)
con_i = [i for i, x in enumerate(chain(constraints.mask)) if not x]
con_val = [x for x in chain(constraints.constraints) if x is not None]
for i, v in zip(con_i, con_val):
full_input_np[:, i] = v
uncon_i = [i for i, x in enumerate(chain(constraints.mask)) if x]
with self.test_session() as sess:
output = sess.run(warped_grid,
feed_dict={inputs: full_input_np[:, uncon_i]})
self.assertAllClose(output,
predict(output_shape, source_shape, full_input_np),
rtol=1e-05,
atol=1e-05)
def testIdentity(self):
constraints = snt.AffineWarpConstraints.no_constraints()
warper = snt.AffineGridWarper([3, 3], [3, 3], constraints=constraints)
p = tf.placeholder(tf.float64, (None, constraints.num_free_params))
grid = warper(p)
with self.test_session() as sess:
warp_p = np.array([1, 0, 0,
0, 1, 0]).reshape([1, constraints.num_free_params])
output = sess.run(grid, feed_dict={p: warp_p})
# Check that output matches expected result for a known transformation.
self.assertAllClose(output,
np.array([[[[0.0, 0.0], [1.0, 0.0], [2.0, 0.0]],
[[0.0, 1.0], [1.0, 1.0], [2.0, 1.0]],
[[0.0, 2.0], [1.0, 2.0], [2.0, 2.0]]]]))
@parameterized.expand([
("2d_a", [13, 17], [7, 11], no_constraints(2)),
("2d_b", [11, 5], [2, 8], scale_2d(x=.7)),
("2d_c", [9, 23], [3, 11], scale_2d(y=1.2)),
("2d_d", [2, 23], [9, 13], snt.AffineWarpConstraints([[1]*3, [None]*3]))])
def testInvSameAsNumPyRef(self, _, output_shape, source_shape, constraints):
def chain(x):
return itertools.chain(*x)
def predict(output_shape, source_shape, inputs):
ranges = [np.linspace(-1, 1, x, dtype=np.float32)
for x in reversed(source_shape)]
n = len(output_shape)
grid = np.meshgrid(*ranges, indexing="xy")
for _ in range(len(source_shape), len(output_shape)):
grid.append(np.zeros_like(grid[0]))
grid.append(np.ones_like(grid[0]))
grid = np.array([x.reshape(1, -1) for x in grid]).squeeze()
predicted_output = []
for i in range(0, batch_size):
affine_matrix = inputs[i, :].reshape(n, n+1)
inv_matrix = np.linalg.inv(affine_matrix[:2, :2])
inv_transform = np.concatenate(
[inv_matrix, -np.dot(inv_matrix,
affine_matrix[:, 2].reshape(2, 1))], 1)
x = np.dot(inv_transform, grid)
for k, s in enumerate(reversed(output_shape)):
s = (s - 1) * 0.5
x[k, :] = x[k, :] * s + s
x = np.concatenate([v.reshape(v.shape + (1,)) for v in x], -1)
predicted_output.append(x.reshape(tuple(source_shape) + (n,)))
return predicted_output
batch_size = 20
agw = snt.AffineGridWarper(source_shape=source_shape,
output_shape=output_shape,
constraints=constraints).inverse()
inputs = tf.placeholder(tf.float32, [None, constraints.num_free_params])
warped_grid = agw(inputs)
full_size = constraints.num_dim * (constraints.num_dim + 1)
# Adding a bit of mass to the matrix to avoid singular matrices
full_input_np = np.random.rand(batch_size, full_size) + 0.1
con_i = [i for i, x in enumerate(chain(constraints.mask)) if not x]
con_val = [x for x in chain(constraints.constraints) if x is not None]
for i, v in zip(con_i, con_val):
full_input_np[:, i] = v
uncon_i = [i for i, x in enumerate(chain(constraints.mask)) if x]
with self.test_session() as sess:
output = sess.run(warped_grid,
feed_dict={inputs: full_input_np[:, uncon_i]})
self.assertAllClose(output,
predict(output_shape, source_shape, full_input_np),
rtol=1e-05,
atol=1e-05)
class AffineWarpConstraintsTest(tf.test.TestCase):
def assertConstraintsEqual(self, warp_constraints, expected):
self.assertEqual(warp_constraints.constraints, expected)
def testCreateMasks(self):
self.assertConstraintsEqual(no_constraints(1), ((None,) * 2,) * 1)
self.assertConstraintsEqual(no_constraints(2), ((None,) * 3,) * 2)
self.assertConstraintsEqual(no_constraints(3), ((None,) * 4,) * 3)
self.assertConstraintsEqual(translation_2d(x=11, y=12), ((None, None, 11),
(None, None, 12)))
self.assertConstraintsEqual(translation_2d(x=11), ((None, None, 11),
(None, None, None)))
self.assertConstraintsEqual(translation_2d(y=12), ((None, None, None),
(None, None, 12)))
self.assertConstraintsEqual(translation_3d(x=11,
y=12,
z=13), ((None, None, None, 11),
(None, None, None, 12),
(None, None, None, 13)))
self.assertConstraintsEqual(translation_3d(x=11),
((None, None, None, 11),
(None, None, None, None),
(None, None, None, None)))
self.assertConstraintsEqual(translation_3d(y=12),
((None, None, None, None),
(None, None, None, 12),
(None, None, None, None)))
self.assertConstraintsEqual(translation_3d(z=13),
((None, None, None, None),
(None, None, None, None),
(None, None, None, 13)))
self.assertConstraintsEqual(scale_2d(x=11, y=12), ((11, None, None),
(None, 12, None)))
self.assertConstraintsEqual(scale_2d(x=11), ((11, None, None),
(None, None, None)))
self.assertConstraintsEqual(scale_2d(y=12), ((None, None, None),
(None, 12, None)))
self.assertConstraintsEqual(scale_3d(x=11,
y=12,
z=13), ((11, None, None, None),
(None, 12, None, None),
(None, None, 13, None)))
self.assertConstraintsEqual(scale_3d(x=11), ((11, None, None, None),
(None, None, None, None),
(None, None, None, None)))
self.assertConstraintsEqual(scale_3d(y=12), ((None, None, None, None),
(None, 12, None, None),
(None, None, None, None)))
self.assertConstraintsEqual(scale_3d(z=13), ((None, None, None, None),
(None, None, None, None),
(None, None, 13, None)))
self.assertConstraintsEqual(shear_2d(x=11,
y=12), ((None, 11, None),
(12, None, None)))
self.assertConstraintsEqual(shear_2d(x=11), ((None, 11, None),
(None, None, None)))
self.assertConstraintsEqual(shear_2d(y=12), ((None, None, None),
(12, None, None)))
self.assertConstraintsEqual(no_shear_2d(), ((None, 0, None),
(0, None, None)))
self.assertConstraintsEqual(no_shear_3d(), ((None, 0, 0, None),
(0, None, 0, None),
(0, 0, None, None)))
def testConstraintsOperations(self):
self.assertEqual(no_constraints(2).num_free_params, 6)
self.assertEqual(scale_2d(2, 4).num_free_params, 4)
self.assertConstraintsEqual(scale_2d(2, 4) & translation_2d(x=2),
((2, None, 2),
(None, 4, None)))
self.assertEqual(scale_2d(2, 4).mask, ((False, True, True),
(True, False, True)))
with self.assertRaisesRegexp(ValueError,
"Incompatible set of constraints provided."):
_ = scale_2d(2) & scale_2d(3)
if __name__ == "__main__":
tf.test.main()
|
py | b412321a0bad10a5a2e9baca3f26512451e2dbcc | from .opt.soga import SOGA
from .opt.elite_soga import EliteSOGA
from .opt.ssga import SSGA
|
py | b4123327c852782fe58b47cb23b4c66b03f60b08 | ##
# Copyright (c) 2008-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twisted.internet.defer import inlineCallbacks
from twisted.trial import unittest
from txdav.caldav.datastore.scheduling.imip.mailgateway import MailGatewayTokensDatabase
from txdav.caldav.datastore.scheduling.imip.mailgateway import migrateTokensToStore
from txdav.common.datastore.test.util import buildStore
import os
class MailGatewayTokenDBTests(unittest.TestCase):
@inlineCallbacks
def setUp(self):
super(MailGatewayTokenDBTests, self).setUp()
self.store = yield buildStore(self, None)
self.directory = self.store.directoryService()
@inlineCallbacks
def test_migrate(self):
self.path = self.mktemp()
os.mkdir(self.path)
oldDB = MailGatewayTokensDatabase(self.path)
oldDB.createToken(
"urn:uuid:user01", "mailto:[email protected]",
"icaluid1", token="token1")
oldDB._db_close()
yield migrateTokensToStore(self.path, self.store)
txn = self.store.newTransaction()
records = yield (txn.imipLookupByToken("token1"))
yield txn.commit()
self.assertEquals(records[0].organizer, "urn:uuid:user01")
self.assertEquals(records[0].attendee, "mailto:[email protected]")
self.assertEquals(records[0].icaluid, "icaluid1")
|
py | b4123450c2c0a72b4f010481684844e1cb66f9b5 | # from .vqvae import Model as VQVAE
# from .vqvae import Criterion as VQVAECriterion
#
# from .vae import Model as VAE
|
py | b412349d8bb408a0244b75b1e3cda55b9c898856 | import json
import sys
from .. import config
class ChannelManager:
""" Manager for handling channel tree structure and communicating to server
Attributes:
channel (Channel): channel that manager is handling
"""
def __init__(self, channel):
self.channel = channel # Channel to process
self.uploaded_files = []
self.failed_node_builds = {}
self.failed_uploads = {}
def validate(self):
""" validate: checks if tree structure is valid
Args: None
Returns: boolean indicating if tree is valid
"""
return self.channel.validate_tree()
def process_tree(self, channel_node):
"""
Returns a list of all file names associated with a tree. Profiling suggests using a global list with `extend`
is faster than using a global set or deque.
:param channel_node: Root node of the channel being processed
:return: The list of unique file names in `channel_node`.
"""
file_names = []
self.process_tree_recur(file_names, channel_node)
return [x for x in set(file_names) if x] # Remove any duplicate or None filenames
def process_tree_recur(self, file_names, node):
"""
Adds the names of all the files associated with the sub-tree rooted by `node` to `file_names` in post-order.
:param file_names: A global list containing all file names associated with a tree
:param node: The root of the current sub-tree being processed
:return: None.
"""
# Process node's children
for child_node in node.children:
self.process_tree_recur(file_names, child_node) # Call children first in case a tiled thumbnail is needed
file_names.extend(node.process_files())
def check_for_files_failed(self):
""" check_for_files_failed: print any files that failed during download process
Args: None
Returns: None
"""
if len(config.FAILED_FILES) > 0:
config.LOGGER.error(" {} file(s) have failed to download".format(len(config.FAILED_FILES)))
for f in config.FAILED_FILES:
if f.node: # files associated with a a content node
info = "{0} {id}".format(f.node.kind.capitalize(), id=f.node.source_id)
elif f.assessment_item: # files associated with an assessment item
info = "{0} {id}".format("Question", id=f.assessment_item.source_id)
else: # files not associated with a node or an assessment item
info = f.__class__.__name__
file_identifier = f.__dict__
if hasattr(f, 'path') and f.path:
file_identifier = f.path
elif hasattr(f, 'youtube_url') and f.youtube_url:
file_identifier = f.youtube_url
config.LOGGER.warning("\t{0}: {id} \n\t {err}".format(info, id=file_identifier, err=f.error))
else:
config.LOGGER.info(" All files were successfully downloaded")
def get_file_diff(self, files_to_diff):
""" get_file_diff: retrieves list of files that do not exist on content curation server
Args: None
Returns: list of files that are not on server
"""
file_diff_result = []
chunks = [files_to_diff[x:x+1000] for x in range(0, len(files_to_diff), 1000)]
file_count = 0
total_count = len(files_to_diff)
for chunk in chunks:
response = config.SESSION.post(config.file_diff_url(), data=json.dumps(chunk))
response.raise_for_status()
file_diff_result += json.loads(response._content.decode("utf-8"))
file_count += len(chunk)
config.LOGGER.info("\tGot file diff for {0} out of {1} files".format(file_count, total_count))
return file_diff_result
def upload_files(self, file_list):
""" upload_files: uploads files to server
Args:
file_list (str): list of files to upload
Returns: None
"""
counter = 0
files_to_upload = list(set(file_list) - set(self.uploaded_files)) # In case restoring from previous session
try:
for f in files_to_upload:
with open(config.get_storage_path(f), 'rb') as file_obj:
response = config.SESSION.post(config.file_upload_url(), files={'file': file_obj})
if response.status_code == 200:
response.raise_for_status()
self.uploaded_files.append(f)
counter += 1
config.LOGGER.info("\tUploaded {0} ({count}/{total}) ".format(f, count=counter, total=len(files_to_upload)))
else:
self.failed_uploads[f] = response._content.decode('utf-8')
finally:
config.PROGRESS_MANAGER.set_uploading(self.uploaded_files)
def reattempt_upload_fails(self):
""" reattempt_upload_fails: uploads failed files to server
Args: None
Returns: None
"""
if len(self.failed_uploads) > 0:
config.LOGGER.info("Reattempting to upload {0} file(s)...".format(len(self.failed_uploads)))
current_fails = [k for k in self.failed_uploads]
self.failed_uploads = {}
self.upload_files(current_fails)
def upload_tree(self):
""" upload_tree: sends processed channel data to server to create tree
Args: None
Returns: link to uploadedchannel
"""
from datetime import datetime
start_time = datetime.now()
root, channel_id = self.add_channel()
self.node_count_dict = {"upload_count": 0, "total_count": self.channel.count()}
config.LOGGER.info("\tPreparing fields...")
self.truncate_fields(self.channel)
self.add_nodes(root, self.channel)
if self.check_failed(print_warning=False):
failed = self.failed_node_builds
self.failed_node_builds = {}
self.reattempt_failed(failed)
self.check_failed()
channel_id, channel_link = self.commit_channel(channel_id)
end_time = datetime.now()
config.LOGGER.info("Upload time: {time}s".format(time=(end_time - start_time).total_seconds()))
return channel_id, channel_link
def truncate_fields(self, node):
node.truncate_fields()
for child in node.children:
self.truncate_fields(child)
def reattempt_failed(self, failed):
for node_id in failed:
node = failed[node_id]
config.LOGGER.info("\tReattempting {0}s".format(str(node['node'])))
for f in node['node'].files:
# Attempt to upload file
try:
assert f.filename, "File failed to download (cannot be uploaded)"
with open(config.get_storage_path(f.filename), 'rb') as file_obj:
response = config.SESSION.post(config.file_upload_url(), files={'file': file_obj})
response.raise_for_status()
self.uploaded_files.append(f.filename)
except AssertionError as ae:
config.LOGGER.warning(ae)
# Attempt to create node
self.add_nodes(node_id, node['node'])
def check_failed(self, print_warning=True):
if len(self.failed_node_builds) > 0:
if print_warning:
config.LOGGER.warning("WARNING: The following nodes have one or more descendants that could not be created:")
for node_id in self.failed_node_builds:
node = self.failed_node_builds[node_id]
config.LOGGER.warning("\t{} ({})".format(str(node['node']), node['error']))
else:
config.LOGGER.error("Failed to create descendants for {} node(s).".format(len(self.failed_node_builds)))
return True
else:
config.LOGGER.info(" All nodes were created successfully.")
return False
def add_channel(self):
""" add_channel: sends processed channel data to server to create tree
Args: None
Returns: link to uploadedchannel
"""
config.LOGGER.info(" Creating channel {0}".format(self.channel.title))
self.channel.truncate_fields()
payload = {
"channel_data":self.channel.to_dict(),
}
response = config.SESSION.post(config.create_channel_url(), data=json.dumps(payload))
try:
response.raise_for_status()
except Exception:
config.LOGGER.error("Error connecting to API: {}".format(response.text))
raise
new_channel = json.loads(response._content.decode("utf-8"))
return new_channel['root'], new_channel['channel_id']
def add_nodes(self, root_id, current_node, indent=1):
""" add_nodes: adds processed nodes to tree
Args:
root_id (str): id of parent node on Kolibri Studio
current_node (Node): node to publish children
indent (int): level of indentation for printing
Returns: link to uploadedchannel
"""
# if the current node has no children, no need to continue
if not current_node.children:
return
config.LOGGER.info("({count} of {total} uploaded) {indent}Processing {title} ({kind})".format(
count=self.node_count_dict['upload_count'],
total=self.node_count_dict['total_count'],
indent=" " * indent,
title=current_node.title,
kind=current_node.__class__.__name__)
)
# Send children in chunks to avoid gateway errors
try:
chunks = [current_node.children[x:x+10] for x in range(0, len(current_node.children), 10)]
for chunk in chunks:
payload_children = []
for child in chunk:
failed = [f for f in child.files if f.is_primary and (not f.filename or self.failed_uploads.get(f.filename))]
if any(failed):
if not self.failed_node_builds.get(root_id):
error_message = ""
for fail in failed:
reason = fail.filename + ": " + self.failed_uploads.get(fail.filename) if fail.filename else "File failed to download"
error_message = error_message + reason + ", "
self.failed_node_builds[root_id] = {'node': current_node, 'error': error_message[:-2]}
else:
payload_children.append(child.to_dict())
payload = {
'root_id': root_id,
'content_data': payload_children
}
# When iceqube is integrated, use this method to utilize upload file optimizations
# response = config.SESSION.post(config.add_nodes_from_file_url(), files={'file': json.dumps(payload)})
response = config.SESSION.post(config.add_nodes_url(), data=json.dumps(payload))
if response.status_code != 200:
self.failed_node_builds[root_id] = {'node': current_node, 'error': response.reason}
else:
response_json = json.loads(response._content.decode("utf-8"))
self.node_count_dict['upload_count'] += len(chunk)
if response_json['root_ids'].get(child.get_node_id().hex):
for child in chunk:
self.add_nodes(response_json['root_ids'].get(child.get_node_id().hex), child, indent + 1)
except ConnectionError as ce:
self.failed_node_builds[root_id] = {'node': current_node, 'error': ce}
def commit_channel(self, channel_id):
""" commit_channel: commits channel to Kolibri Studio
Args:
channel_id (str): channel's id on Kolibri Studio
Returns: channel id and link to uploadedchannel
"""
payload = {
"channel_id":channel_id,
"stage": config.STAGE,
}
response = config.SESSION.post(config.finish_channel_url(), data=json.dumps(payload))
if response.status_code != 200:
config.LOGGER.error("")
config.LOGGER.error("Could not activate channel: {}\n".format(response._content.decode('utf-8')))
if response.status_code == 403:
config.LOGGER.error("Channel can be viewed at {}\n\n".format(config.open_channel_url(channel_id, staging=True)))
sys.exit()
response.raise_for_status()
new_channel = json.loads(response._content.decode("utf-8"))
channel_link = config.open_channel_url(new_channel['new_channel'])
return channel_id, channel_link
def publish(self, channel_id):
""" publish: publishes tree to Kolibri
Args:
channel_id (str): channel's id on Kolibri Studio
Returns: None
"""
payload = {
"channel_id":channel_id,
}
response = config.SESSION.post(config.publish_channel_url(), data=json.dumps(payload))
response.raise_for_status()
|
py | b412385a5c492d681f6148db29416123b01c9c78 | # Definition of the classes for the SMALL TESTS
# Import TestFactory
import TestFactory as TF
# Import KratosUnittest
import KratosMultiphysics.KratosUnittest as KratosUnittest
class newtonian_dam_break_2D(TF.TestFactory):
file_name = "fluid_tests/newtonian/dam_break_2D"
file_parameters = None
def SetTestSuite(suites):
small_suite = suites['small']
small_suite.addTests(
KratosUnittest.TestLoader().loadTestsFromTestCases([
newtonian_dam_break_2D
])
)
return small_suite
|
py | b412386c43928dfef62cb9db13ef89079b09aa27 | import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import array
import platform
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8') ## test unicode string and carriage return
try:
import _thread as thread
import threading
except ImportError:
thread = None
threading = None
try:
import _socket
except ImportError:
_socket = None
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_RDS = _have_socket_rds()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen(1)
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
if threading:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceeded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
self.clientSetUp()
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen(1)
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
sock.bind(path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s.close()
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doens't masks failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"'str' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"'complex' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"'str' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"'complex' does not support the buffer interface")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
self.assertRaises(OSError, socket.gethostbyname, addr)
self.assertRaises(OSError, socket.gethostbyaddr, addr)
for addr in [support.HOST, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOST]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInvalidInterfaceNameIndex(self):
# test nonexistent interface index/name
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
# test with invalid values
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
def testNtoHErrors(self):
good_values = [ 1, 2, 3, 1, 2, 3 ]
bad_values = [ -1, -2, -3, -1, -2, -3 ]
for k in good_values:
socket.ntohl(k)
socket.ntohs(k)
socket.htonl(k)
socket.htons(k)
for k in bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htonl, k)
self.assertRaises(OverflowError, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Set the default timeout to 10, and see if it propagates
socket.setdefaulttimeout(10)
self.assertEqual(socket.getdefaulttimeout(), 10)
s = socket.socket()
self.assertEqual(s.gettimeout(), 10)
s.close()
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
s = socket.socket()
self.assertEqual(s.gettimeout(), None)
s.close()
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:7:8:')
assertInvalid('1:2:3:4:5:6:7:8:0')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(1)
sock.close()
self.assertRaises(OSError, sock.send, b"spam")
def testNewAttributes(self):
# testing .family, .type and .protocol
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
sock.close()
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test (issue #12804).
try:
socket.gethostbyname('python.org')
except socket.gaierror as e:
if e.errno == socket.EAI_NODATA:
self.skipTest('internet access required for this test')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup choses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not stricly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
def test_listen_backlog(self):
for backlog in 0, -1:
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
srv.listen(backlog)
srv.close()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
srv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
srv.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
@unittest.skipIf(os.name == 'nt', 'Will not work on Windows')
def test_uknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
#
# On Windows this trick won't work, so the test is skipped.
fd, _ = tempfile.mkstemp()
with socket.socket(family=42424, type=13331, fileno=fd) as s:
self.assertEqual(s.family, 42424)
self.assertEqual(s.type, 13331)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.bind(('', ))
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
@unittest.skipUnless(thread, 'Threading required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testCongestion(self):
# wait until the sender is done
self.evt.wait()
def _testCongestion(self):
# test the behavior in case of congestion
self.data = b'fill'
self.cli.setblocking(False)
try:
# try to lower the receiver's socket buffer size
self.cli.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 16384)
except OSError:
pass
with self.assertRaises(OSError) as cm:
try:
# fill the receiver's socket buffer
while True:
self.cli.sendto(self.data, 0, (HOST, self.port))
finally:
# signal the receiver we're done
self.evt.set()
# sendto() should have failed with ENOBUFS
self.assertEqual(cm.exception.errno, errno.ENOBUFS)
# and we should have received a congestion notification through poll
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
with self.assertRaises(socket.timeout):
while True:
self.sendmsgToServer([b"a"*512])
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
# FreeBSD < 8 doesn't always set the MSG_TRUNC flag when a truncated
# datagram is received (issue #13001).
@support.requires_freebsd_version(8)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
@support.requires_freebsd_version(8)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
@unittest.skipUnless(thread, 'Threading required for this test.')
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: None)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
self.addCleanup(self.setAlarm, 0)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises OSError with an
# errno of EINTR when interrupted by a signal.
self.setAlarm(self.alarm_time)
with self.assertRaises(OSError) as cm:
func(*args, **kwargs)
self.assertNotIsInstance(cm.exception, socket.timeout)
self.assertEqual(cm.exception.errno, errno.EINTR)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
@unittest.skipUnless(thread, 'Threading required for this test.')
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
with self.assertRaises(OSError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
self.assertNotIsInstance(cm.exception, socket.timeout)
self.assertEqual(cm.exception.errno, errno.EINTR)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
@unittest.skipUnless(thread, 'Threading required for this test.')
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
@unittest.skipUnless(hasattr(socket, 'socketpair'),
'test needs socket.socketpair()')
@unittest.skipUnless(thread, 'Threading required for this test.')
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def testSetBlocking(self):
# Testing whether set blocking works
self.serv.setblocking(True)
self.assertIsNone(self.serv.gettimeout())
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error setting non-blocking mode.")
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# reinit server socket
self.serv.close()
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM |
socket.SOCK_NONBLOCK)
self.port = support.bind_port(self.serv)
self.serv.listen(1)
# actual testing
start = time.time()
try:
self.serv.accept()
except OSError:
pass
end = time.time()
self.assertTrue((end - start) < 1.0, "Error creating with non-blocking mode.")
def _testInitNonBlocking(self):
pass
def testInheritFlags(self):
# Issue #7995: when calling accept() on a listening socket with a
# timeout, the resulting socket should not be non-blocking.
self.serv.settimeout(10)
try:
conn, addr = self.serv.accept()
message = conn.recv(len(MSG))
finally:
conn.close()
self.serv.settimeout(None)
def _testInheritFlags(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
time.sleep(0.5)
self.cli.send(MSG)
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
try:
conn, addr = self.serv.accept()
except OSError:
pass
else:
self.fail("Error trying to do non-blocking accept.")
read, write, err = select.select([self.serv], [], [])
if self.serv in read:
conn, addr = self.serv.accept()
conn.close()
else:
self.fail("Error trying to do accept after select.")
def _testAccept(self):
time.sleep(0.1)
self.cli.connect((HOST, self.port))
def testConnect(self):
# Testing non-blocking connect
conn, addr = self.serv.accept()
conn.close()
def _testConnect(self):
self.cli.settimeout(10)
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
conn.setblocking(0)
try:
msg = conn.recv(len(MSG))
except OSError:
pass
else:
self.fail("Error trying to do non-blocking recv.")
read, write, err = select.select([conn], [], [])
if conn in read:
msg = conn.recv(len(MSG))
conn.close()
self.assertEqual(msg, MSG)
else:
self.fail("Error during select call to non-blocking socket.")
def _testRecv(self):
self.cli.connect((HOST, self.port))
time.sleep(0.1)
self.cli.send(MSG)
@unittest.skipUnless(thread, 'Threading required for this test.')
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class FileObjectInterruptedTestCase(unittest.TestCase):
"""Test that the file object correctly handles EINTR internally."""
class MockSocket(object):
def __init__(self, recv_funcs=()):
# A generator that returns callables that we'll call for each
# call to recv().
self._recv_step = iter(recv_funcs)
def recv_into(self, buffer):
data = next(self._recv_step)()
assert len(buffer) >= len(data)
buffer[:len(data)] = data
return len(data)
def _decref_socketios(self):
pass
def _textiowrap_for_test(self, buffering=-1):
raw = socket.SocketIO(self, "r")
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
return raw
buffer = io.BufferedReader(raw, buffering)
text = io.TextIOWrapper(buffer, None, None)
text.mode = "rb"
return text
@staticmethod
def _raise_eintr():
raise OSError(errno.EINTR, "interrupted")
def _textiowrap_mock_socket(self, mock, buffering=-1):
raw = socket.SocketIO(mock, "r")
if buffering < 0:
buffering = io.DEFAULT_BUFFER_SIZE
if buffering == 0:
return raw
buffer = io.BufferedReader(raw, buffering)
text = io.TextIOWrapper(buffer, None, None)
text.mode = "rb"
return text
def _test_readline(self, size=-1, buffering=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"This is the first line\nAnd the sec",
self._raise_eintr,
lambda : b"ond line is here\n",
lambda : b"",
lambda : b"", # XXX(gps): io library does an extra EOF read
])
fo = mock_sock._textiowrap_for_test(buffering=buffering)
self.assertEqual(fo.readline(size), "This is the first line\n")
self.assertEqual(fo.readline(size), "And the second line is here\n")
def _test_read(self, size=-1, buffering=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"This is the first line\nAnd the sec",
self._raise_eintr,
lambda : b"ond line is here\n",
lambda : b"",
lambda : b"", # XXX(gps): io library does an extra EOF read
])
expecting = (b"This is the first line\n"
b"And the second line is here\n")
fo = mock_sock._textiowrap_for_test(buffering=buffering)
if buffering == 0:
data = b''
else:
data = ''
expecting = expecting.decode('utf-8')
while len(data) != len(expecting):
part = fo.read(size)
if not part:
break
data += part
self.assertEqual(data, expecting)
def test_default(self):
self._test_readline()
self._test_readline(size=100)
self._test_read()
self._test_read(size=100)
def test_with_1k_buffer(self):
self._test_readline(buffering=1024)
self._test_readline(size=100, buffering=1024)
self._test_read(buffering=1024)
self._test_read(size=100, buffering=1024)
def _test_readline_no_buffer(self, size=-1):
mock_sock = self.MockSocket(recv_funcs=[
lambda : b"a",
lambda : b"\n",
lambda : b"B",
self._raise_eintr,
lambda : b"b",
lambda : b"",
])
fo = mock_sock._textiowrap_for_test(buffering=0)
self.assertEqual(fo.readline(size), b"a\n")
self.assertEqual(fo.readline(size), b"Bb")
def test_no_buffer(self):
self._test_readline_no_buffer()
self._test_readline_no_buffer(size=4)
self._test_read(buffering=0)
self._test_read(size=100, buffering=0)
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid cloding the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = [ errno.ECONNREFUSED, ]
if hasattr(errno, 'ENETUNREACH'):
expected_errnos.append(errno.ENETUNREACH)
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
with self.assertRaises(socket.timeout):
socket.create_connection((HOST, 1234))
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
@unittest.skipUnless(thread, 'Threading required for this test.')
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# plaform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
try:
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen(1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
sock.bind(path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as a AF_UNIX path"
.format(path))
else:
raise
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
@unittest.skipUnless(thread, 'Threading required for this test.')
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
if not os.path.isfile("/proc/modules"):
return False
with open("/proc/modules") as f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen(5)
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# The is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
@unittest.skipUnless(thread, 'Threading required for this test.')
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertTrue(s.type & socket.SOCK_CLOEXEC)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
@unittest.skipUnless(hasattr(socket, "socketpair"),
"need socket.socketpair()")
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertTrue(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), timeout)
else:
self.assertFalse(s.type & socket.SOCK_NONBLOCK)
self.assertEqual(s.gettimeout(), None)
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timout value isn't transfered.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest, UDPTimeoutTest ]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
FileObjectInterruptedTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
])
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
py | b412393eb261088f971d3b3f6692090b7c94dbd1 | # Copyright 2015 Google Inc. All Rights Reserved. #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Library for creating sequence-to-sequence models in TensorFlow.
Sequence-to-sequence recurrent neural networks can learn complex functions
that map input sequences to output sequences. These models yield very good
results on a number of tasks, such as speech recognition, parsing, machine
translation, or even constructing automated replies to emails.
Before using this module, it is recommended to read the TensorFlow tutorial
on sequence-to-sequence models. It explains the basic concepts of this module
and shows an end-to-end example of how to build a translation model.
https://www.tensorflow.org/versions/master/tutorials/seq2seq/index.html
Here is an overview of functions available in this module. They all use
a very similar interface, so after reading the above tutorial and using
one of them, others should be easy to substitute.
* Full sequence-to-sequence models.
- basic_rnn_seq2seq: The most basic RNN-RNN model.
- tied_rnn_seq2seq: The basic model with tied encoder and decoder weights.
- embedding_rnn_seq2seq: The basic model with input embedding.
- embedding_tied_rnn_seq2seq: The tied model with input embedding.
- embedding_attention_seq2seq: Advanced model with input embedding and
the neural attention mechanism; recommended for complex tasks.
* Multi-task sequence-to-sequence models.
- one2many_rnn_seq2seq: The embedding model with multiple decoders.
* Decoders (when you write your own encoder, you can use these to decode;
e.g., if you want to write a model that generates captions for images).
- rnn_decoder: The basic decoder based on a pure RNN.
- attention_decoder: A decoder that uses the attention mechanism.
* Losses.
- sequence_loss: Loss for a sequence model returning average log-perplexity.
- sequence_loss_by_example: As above, but not averaging over all examples.
* model_with_buckets: A convenience function to create models with bucketing
(see the tutorial above for an explanation of why and how to use it).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# We disable pylint because we need python3 compatibility.
from six.moves import xrange # pylint: disable=redefined-builtin
from six.moves import zip # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.contrib.rnn.python.ops import rnn, rnn_cell
from tensorflow.python.ops import variable_scope
linear = rnn_cell._linear # pylint: disable=protected-access
def _extract_argmax_and_embed(embedding, output_projection=None,
update_embedding=True):
"""Get a loop_function that extracts the previous symbol and embeds it.
Args:
embedding: embedding tensor for symbols.
output_projection: None or a pair (W, B). If provided, each fed previous
output will first be multiplied by W and added B.
update_embedding: Boolean; if False, the gradients will not propagate
through the embeddings.
Returns:
A loop function.
"""
def loop_function(prev, _):
if output_projection is not None:
prev = nn_ops.xw_plus_b(
prev, output_projection[0], output_projection[1])
prev_symbol = math_ops.argmax(prev, 1)
# Note that gradients will not propagate through the second parameter of
# embedding_lookup.
emb_prev = embedding_ops.embedding_lookup(embedding, prev_symbol)
if not update_embedding:
emb_prev = array_ops.stop_gradient(emb_prev)
return emb_prev
return loop_function
def rnn_decoder(decoder_inputs, initial_state, cell, loop_function=None,
scope=None):
"""RNN decoder for the sequence-to-sequence model.
Args:
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
initial_state: 2D Tensor with shape [batch_size x cell.state_size].
cell: rnn_cell.RNNCell defining the cell function and size.
loop_function: If not None, this function will be applied to the i-th output
in order to generate the i+1-st input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x input_size].
scope: VariableScope for the created subgraph; defaults to "rnn_decoder".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing generated outputs.
state: The state of each cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
(Note that in some cases, like basic RNN cell or GRU cell, outputs and
states can be the same. They are different for LSTM cells though.)
"""
with variable_scope.variable_scope(scope or "rnn_decoder"):
state = initial_state
outputs = []
prev = None
for i, inp in enumerate(decoder_inputs):
if loop_function is not None and prev is not None:
with variable_scope.variable_scope("loop_function", reuse=True):
inp = loop_function(prev, i)
if i > 0:
variable_scope.get_variable_scope().reuse_variables()
output, state = cell(inp, state)
outputs.append(output)
if loop_function is not None:
prev = output
return outputs, state
def basic_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
dtype=dtypes.float32, scope=None):
"""Basic RNN sequence-to-sequence model.
This model first runs an RNN to encode encoder_inputs into a state vector,
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell type, but don't share parameters.
Args:
encoder_inputs: A list of 2D Tensors [batch_size x input_size].
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
cell: rnn_cell.RNNCell defining the cell function and size.
dtype: The dtype of the initial state of the RNN cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "basic_rnn_seq2seq".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
state: The state of each decoder cell in the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope(scope or "basic_rnn_seq2seq"):
_, enc_state = rnn.rnn(cell, encoder_inputs, dtype=dtype)
return rnn_decoder(decoder_inputs, enc_state, cell)
def tied_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
loop_function=None, dtype=dtypes.float32, scope=None):
"""RNN sequence-to-sequence model with tied encoder and decoder parameters.
This model first runs an RNN to encode encoder_inputs into a state vector, and
then runs decoder, initialized with the last encoder state, on decoder_inputs.
Encoder and decoder use the same RNN cell and share parameters.
Args:
encoder_inputs: A list of 2D Tensors [batch_size x input_size].
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
cell: rnn_cell.RNNCell defining the cell function and size.
loop_function: If not None, this function will be applied to i-th output
in order to generate i+1-th input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol), see rnn_decoder for details.
dtype: The dtype of the initial state of the rnn cell (default: tf.float32).
scope: VariableScope for the created subgraph; default: "tied_rnn_seq2seq".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
state: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope("combined_tied_rnn_seq2seq"):
scope = scope or "tied_rnn_seq2seq"
_, enc_state = rnn.rnn(
cell, encoder_inputs, dtype=dtype, scope=scope)
variable_scope.get_variable_scope().reuse_variables()
return rnn_decoder(decoder_inputs, enc_state, cell,
loop_function=loop_function, scope=scope)
def embedding_rnn_decoder(decoder_inputs, initial_state, cell, num_symbols,
embedding_size, output_projection=None,
feed_previous=False,
update_embedding_for_previous=True, scope=None):
"""RNN decoder with embedding and a pure-decoding option.
Args:
decoder_inputs: A list of 1D batch-sized int32 Tensors (decoder inputs).
initial_state: 2D Tensor [batch_size x cell.state_size].
cell: rnn_cell.RNNCell defining the cell function.
num_symbols: Integer, how many symbols come into the embedding.
embedding_size: Integer, the length of the embedding vector for each symbol.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_symbols] and B has
shape [num_symbols]; if provided and feed_previous=True, each fed
previous output will first be multiplied by W and added B.
feed_previous: Boolean; if True, only the first of decoder_inputs will be
used (the "GO" symbol), and all other decoder inputs will be generated by:
next = embedding_lookup(embedding, argmax(previous_output)),
In effect, this implements a greedy decoder. It can also be used
during training to emulate http://arxiv.org/abs/1506.03099.
If False, decoder_inputs are used as given (the standard decoder case).
update_embedding_for_previous: Boolean; if False and feed_previous=True,
only the embedding for the first symbol of decoder_inputs (the "GO"
symbol) will be updated by back propagation. Embeddings for the symbols
generated from the decoder itself remain unchanged. This parameter has
no effect if feed_previous=False.
scope: VariableScope for the created subgraph; defaults to
"embedding_rnn_decoder".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
state: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: When output_projection has the wrong shape.
"""
if output_projection is not None:
proj_weights = ops.convert_to_tensor(output_projection[0],
dtype=dtypes.float32)
proj_weights.get_shape().assert_is_compatible_with([None, num_symbols])
proj_biases = ops.convert_to_tensor(
output_projection[1], dtype=dtypes.float32)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
with variable_scope.variable_scope(scope or "embedding_rnn_decoder"):
with ops.device("/cpu:0"):
embedding = variable_scope.get_variable("embedding",
[num_symbols, embedding_size])
loop_function = _extract_argmax_and_embed(
embedding, output_projection,
update_embedding_for_previous) if feed_previous else None
emb_inp = (
embedding_ops.embedding_lookup(embedding, i) for i in decoder_inputs)
return rnn_decoder(emb_inp, initial_state, cell,
loop_function=loop_function)
def embedding_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
num_encoder_symbols, num_decoder_symbols,
embedding_size, output_projection=None,
feed_previous=False, dtype=dtypes.float32,
scope=None):
"""Embedding RNN sequence-to-sequence model.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_encoder_symbols x input_size]). Then it runs an RNN to encode
embedded encoder_inputs into a state vector. Next, it embeds decoder_inputs
by another newly created embedding (of shape [num_decoder_symbols x
input_size]). Then it runs RNN decoder, initialized with the last
encoder state, on embedded decoder_inputs.
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
cell: rnn_cell.RNNCell defining the cell function and size.
num_encoder_symbols: Integer; number of symbols on the encoder side.
num_decoder_symbols: Integer; number of symbols on the decoder side.
embedding_size: Integer, the length of the embedding vector for each symbol.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_decoder_symbols] and B has
shape [num_decoder_symbols]; if provided and feed_previous=True, each
fed previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first
of decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial state for both the encoder and encoder
rnn cells (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_rnn_seq2seq"
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x num_decoder_symbols] containing the generated
outputs.
state: The state of each decoder cell in each time-step. This is a list
with length len(decoder_inputs) -- one item for each time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope(scope or "embedding_rnn_seq2seq"):
# Encoder.
encoder_cell = rnn_cell.EmbeddingWrapper(
cell, embedding_classes=num_encoder_symbols,
embedding_size=embedding_size)
_, encoder_state = rnn.rnn(encoder_cell, encoder_inputs, dtype=dtype)
# Decoder.
if output_projection is None:
cell = rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)
if isinstance(feed_previous, bool):
return embedding_rnn_decoder(
decoder_inputs, encoder_state, cell, num_decoder_symbols,
embedding_size, output_projection=output_projection,
feed_previous=feed_previous)
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def decoder(feed_previous_bool):
reuse = None if feed_previous_bool else True
with variable_scope.variable_scope(variable_scope.get_variable_scope(),
reuse=reuse):
outputs, state = embedding_rnn_decoder(
decoder_inputs, encoder_state, cell, num_decoder_symbols,
embedding_size, output_projection=output_projection,
feed_previous=feed_previous_bool,
update_embedding_for_previous=False)
return outputs + [state]
outputs_and_state = control_flow_ops.cond(feed_previous,
lambda: decoder(True),
lambda: decoder(False))
return outputs_and_state[:-1], outputs_and_state[-1]
def embedding_tied_rnn_seq2seq(encoder_inputs, decoder_inputs, cell,
num_symbols, embedding_size,
output_projection=None, feed_previous=False,
dtype=dtypes.float32, scope=None):
"""Embedding RNN sequence-to-sequence model with tied (shared) parameters.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_symbols x input_size]). Then it runs an RNN to encode embedded
encoder_inputs into a state vector. Next, it embeds decoder_inputs using
the same embedding. Then it runs RNN decoder, initialized with the last
encoder state, on embedded decoder_inputs.
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
cell: rnn_cell.RNNCell defining the cell function and size.
num_symbols: Integer; number of symbols for both encoder and decoder.
embedding_size: Integer, the length of the embedding vector for each symbol.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_symbols] and B has
shape [num_symbols]; if provided and feed_previous=True, each
fed previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first
of decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype to use for the initial RNN states (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_tied_rnn_seq2seq".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x num_decoder_symbols] containing the generated
outputs.
state: The state of each decoder cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: When output_projection has the wrong shape.
"""
if output_projection is not None:
proj_weights = ops.convert_to_tensor(output_projection[0], dtype=dtype)
proj_weights.get_shape().assert_is_compatible_with([None, num_symbols])
proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
with variable_scope.variable_scope(scope or "embedding_tied_rnn_seq2seq"):
with ops.device("/cpu:0"):
embedding = variable_scope.get_variable("embedding",
[num_symbols, embedding_size])
emb_encoder_inputs = [embedding_ops.embedding_lookup(embedding, x)
for x in encoder_inputs]
emb_decoder_inputs = [embedding_ops.embedding_lookup(embedding, x)
for x in decoder_inputs]
if output_projection is None:
cell = rnn_cell.OutputProjectionWrapper(cell, num_symbols)
if isinstance(feed_previous, bool):
loop_function = _extract_argmax_and_embed(
embedding, output_projection, True) if feed_previous else None
return tied_rnn_seq2seq(emb_encoder_inputs, emb_decoder_inputs, cell,
loop_function=loop_function, dtype=dtype)
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def decoder(feed_previous_bool):
loop_function = _extract_argmax_and_embed(
embedding, output_projection, False) if feed_previous_bool else None
reuse = None if feed_previous_bool else True
with variable_scope.variable_scope(variable_scope.get_variable_scope(),
reuse=reuse):
outputs, state = tied_rnn_seq2seq(
emb_encoder_inputs, emb_decoder_inputs, cell,
loop_function=loop_function, dtype=dtype)
return outputs + [state]
outputs_and_state = control_flow_ops.cond(feed_previous,
lambda: decoder(True),
lambda: decoder(False))
return outputs_and_state[:-1], outputs_and_state[-1]
def attention_decoder(decoder_inputs, initial_state, attention_states, cell,
output_size=None, num_heads=1, loop_function=None,
dtype=dtypes.float32, scope=None,
initial_state_attention=False, attn_num_hidden=128):
"""RNN decoder with attention for the sequence-to-sequence model.
In this context "attention" means that, during decoding, the RNN can look up
information in the additional tensor attention_states, and it does this by
focusing on a few entries from the tensor. This model has proven to yield
especially good results in a number of sequence-to-sequence tasks. This
implementation is based on http://arxiv.org/abs/1412.7449 (see below for
details). It is recommended for complex sequence-to-sequence tasks.
Args:
decoder_inputs: A list of 2D Tensors [batch_size x input_size].
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: rnn_cell.RNNCell defining the cell function and size.
output_size: Size of the output vectors; if None, we use cell.output_size.
num_heads: Number of attention heads that read from attention_states.
loop_function: If not None, this function will be applied to i-th output
in order to generate i+1-th input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x input_size].
dtype: The dtype to use for the RNN initial state (default: tf.float32).
scope: VariableScope for the created subgraph; default: "attention_decoder".
initial_state_attention: If False (default), initial attentions are zero.
If True, initialize the attentions from the initial state and attention
states -- useful when we wish to resume decoding from a previously
stored decoder state and attention states.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors of
shape [batch_size x output_size]. These represent the generated outputs.
Output i is computed from input i (which is either the i-th element
of decoder_inputs or loop_function(output {i-1}, i)) as follows.
First, we run the cell on a combination of the input and previous
attention masks:
cell_output, new_state = cell(linear(input, prev_attn), prev_state).
Then, we calculate new attention masks:
new_attn = softmax(V^T * tanh(W * attention_states + U * new_state))
and then we calculate the output:
output = linear(cell_output, new_attn).
state: The state of each decoder cell the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: when num_heads is not positive, there are no inputs, or shapes
of attention_states are not set.
"""
# MODIFIED ADD START
assert num_heads == 1, 'We only consider the case where num_heads=1!'
# MODIFIED ADD END
if not decoder_inputs:
raise ValueError("Must provide at least 1 input to attention decoder.")
if num_heads < 1:
raise ValueError("With less than 1 heads, use a non-attention decoder.")
if not attention_states.get_shape()[1:2].is_fully_defined():
raise ValueError("Shape[1] and [2] of attention_states must be known: %s"
% attention_states.get_shape())
if output_size is None:
output_size = cell.output_size
with variable_scope.variable_scope(scope or "attention_decoder"):
batch_size = array_ops.shape(decoder_inputs[0])[0] # Needed for reshaping.
attn_length = attention_states.get_shape()[1].value
attn_size = attention_states.get_shape()[2].value
# To calculate W1 * h_t we use a 1-by-1 convolution, need to reshape before.
hidden = array_ops.reshape(
attention_states, [-1, attn_length, 1, attn_size])
hidden_features = []
v = []
attention_vec_size = attn_size # Size of query vectors for attention.
for a in xrange(num_heads):
k = variable_scope.get_variable("AttnW_%d" % a,
[1, 1, attn_size, attention_vec_size])
hidden_features.append(nn_ops.conv2d(hidden, k, [1, 1, 1, 1], "SAME"))
v.append(variable_scope.get_variable("AttnV_%d" % a,
[attention_vec_size]))
state = initial_state
# MODIFIED: return both context vector and attention weights
def attention(query):
"""Put attention masks on hidden using hidden_features and query."""
# MODIFIED ADD START
ss = None # record attention weights
# MODIFIED ADD END
ds = [] # Results of attention reads will be stored here.
for a in xrange(num_heads):
with variable_scope.variable_scope("Attention_%d" % a):
y = linear(query, attention_vec_size, True)
y = array_ops.reshape(y, [-1, 1, 1, attention_vec_size])
# Attention mask is a softmax of v^T * tanh(...).
s = math_ops.reduce_sum(
v[a] * math_ops.tanh(hidden_features[a] + y), [2, 3])
a = nn_ops.softmax(s)
ss = a
#a = tf.Print(a, [a], message="a: ",summarize=30)
# Now calculate the attention-weighted vector d.
d = math_ops.reduce_sum(
array_ops.reshape(a, [-1, attn_length, 1, 1]) * hidden,
[1, 2])
ds.append(array_ops.reshape(d, [-1, attn_size]))
# MODIFIED DELETED return ds
# MODIFIED ADD START
return ds, ss
# MODIFIED ADD END
outputs = []
# MODIFIED ADD START
attention_weights_history = []
# MODIFIED ADD END
prev = None
batch_attn_size = array_ops.stack([batch_size, attn_size])
attns = [array_ops.zeros(batch_attn_size, dtype=dtype)
for _ in xrange(num_heads)]
for a in attns: # Ensure the second shape of attention vectors is set.
a.set_shape([None, attn_size])
if initial_state_attention:
# MODIFIED DELETED attns = attention(initial_state)
# MODIFIED ADD START
attns, attn_weights = attention(initial_state)
attention_weights_history.append(attn_weights)
# MODIFIED ADD END
for i, inp in enumerate(decoder_inputs):
if i > 0:
variable_scope.get_variable_scope().reuse_variables()
# If loop_function is set, we use it instead of decoder_inputs.
if loop_function is not None and prev is not None:
with variable_scope.variable_scope("loop_function", reuse=True):
inp = loop_function(prev, i)
# Merge input and previous attentions into one vector of the right size.
# input_size = inp.get_shape().with_rank(2)[1]
# TODO: use input_size
input_size = attn_num_hidden
x = linear([inp] + attns, input_size, True)
# Run the RNN.
cell_output, state = cell(x, state)
# Run the attention mechanism.
if i == 0 and initial_state_attention:
with variable_scope.variable_scope(variable_scope.get_variable_scope(),
reuse=True):
# MODIFIED DELETED attns = attention(state)
# MODIFIED ADD START
attns, attn_weights = attention(state)
# MODIFIED ADD END
else:
# MODIFIED DELETED attns = attention(state)
# MODIFIED ADD START
attns, attn_weights = attention(state)
attention_weights_history.append(attn_weights)
# MODIFIED ADD END
with variable_scope.variable_scope("AttnOutputProjection"):
output = linear([cell_output] + attns, output_size, True)
if loop_function is not None:
prev = output
outputs.append(output)
# MODIFIED DELETED return outputs, state
# MODIFIED ADD START
return outputs, state, attention_weights_history
# MODIFIED ADD END
def embedding_attention_decoder(decoder_inputs, initial_state, attention_states,
cell, num_symbols, embedding_size, num_heads=1,
output_size=None, output_projection=None,
feed_previous=False,
update_embedding_for_previous=True,
dtype=dtypes.float32, scope=None,
initial_state_attention=False,
attn_num_hidden=128):
"""RNN decoder with embedding and attention and a pure-decoding option.
Args:
decoder_inputs: A list of 1D batch-sized int32 Tensors (decoder inputs).
initial_state: 2D Tensor [batch_size x cell.state_size].
attention_states: 3D Tensor [batch_size x attn_length x attn_size].
cell: rnn_cell.RNNCell defining the cell function.
num_symbols: Integer, how many symbols come into the embedding.
embedding_size: Integer, the length of the embedding vector for each symbol.
num_heads: Number of attention heads that read from attention_states.
output_size: Size of the output vectors; if None, use output_size.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_symbols] and B has shape
[num_symbols]; if provided and feed_previous=True, each fed previous
output will first be multiplied by W and added B.
feed_previous: Boolean; if True, only the first of decoder_inputs will be
used (the "GO" symbol), and all other decoder inputs will be generated by:
next = embedding_lookup(embedding, argmax(previous_output)),
In effect, this implements a greedy decoder. It can also be used
during training to emulate http://arxiv.org/abs/1506.03099.
If False, decoder_inputs are used as given (the standard decoder case).
update_embedding_for_previous: Boolean; if False and feed_previous=True,
only the embedding for the first symbol of decoder_inputs (the "GO"
symbol) will be updated by back propagation. Embeddings for the symbols
generated from the decoder itself remain unchanged. This parameter has
no effect if feed_previous=False.
dtype: The dtype to use for the RNN initial states (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_attention_decoder".
initial_state_attention: If False (default), initial attentions are zero.
If True, initialize the attentions from the initial state and attention
states -- useful when we wish to resume decoding from a previously
stored decoder state and attention states.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing the generated outputs.
state: The state of each decoder cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
Raises:
ValueError: When output_projection has the wrong shape.
"""
if output_size is None:
output_size = cell.output_size
if output_projection is not None:
proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)
proj_biases.get_shape().assert_is_compatible_with([num_symbols])
with variable_scope.variable_scope(scope or "embedding_attention_decoder"):
with ops.device("/cpu:0"):
embedding = variable_scope.get_variable("embedding",
[num_symbols, embedding_size])
loop_function = _extract_argmax_and_embed(
embedding, output_projection,
update_embedding_for_previous) if feed_previous else None
emb_inp = [
embedding_ops.embedding_lookup(embedding, i) for i in decoder_inputs]
return attention_decoder(
emb_inp, initial_state, attention_states, cell, output_size=output_size,
num_heads=num_heads, loop_function=loop_function,
initial_state_attention=initial_state_attention, attn_num_hidden=attn_num_hidden)
def embedding_attention_seq2seq(encoder_inputs, decoder_inputs, cell,
num_encoder_symbols, num_decoder_symbols,
embedding_size,
num_heads=1, output_projection=None,
feed_previous=False, dtype=dtypes.float32,
scope=None, initial_state_attention=False):
"""Embedding sequence-to-sequence model with attention.
This model first embeds encoder_inputs by a newly created embedding (of shape
[num_encoder_symbols x input_size]). Then it runs an RNN to encode
embedded encoder_inputs into a state vector. It keeps the outputs of this
RNN at every step to use for attention later. Next, it embeds decoder_inputs
by another newly created embedding (of shape [num_decoder_symbols x
input_size]). Then it runs attention decoder, initialized with the last
encoder state, on embedded decoder_inputs and attending to encoder outputs.
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
cell: rnn_cell.RNNCell defining the cell function and size.
num_encoder_symbols: Integer; number of symbols on the encoder side.
num_decoder_symbols: Integer; number of symbols on the decoder side.
embedding_size: Integer, the length of the embedding vector for each symbol.
num_heads: Number of attention heads that read from attention_states.
output_projection: None or a pair (W, B) of output projection weights and
biases; W has shape [output_size x num_decoder_symbols] and B has
shape [num_decoder_symbols]; if provided and feed_previous=True, each
fed previous output will first be multiplied by W and added B.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first
of decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial RNN state (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"embedding_attention_seq2seq".
initial_state_attention: If False (default), initial attentions are zero.
If True, initialize the attentions from the initial state and attention
states.
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x num_decoder_symbols] containing the generated
outputs.
state: The state of each decoder cell at the final time-step.
It is a 2D Tensor of shape [batch_size x cell.state_size].
"""
with variable_scope.variable_scope(scope or "embedding_attention_seq2seq"):
# Encoder.
encoder_cell = rnn_cell.EmbeddingWrapper(
cell, embedding_classes=num_encoder_symbols,
embedding_size=embedding_size)
encoder_outputs, encoder_state = rnn.rnn(
encoder_cell, encoder_inputs, dtype=dtype)
# First calculate a concatenation of encoder outputs to put attention on.
top_states = [array_ops.reshape(e, [-1, 1, cell.output_size])
for e in encoder_outputs]
attention_states = array_ops.concat(1, top_states)
# Decoder.
output_size = None
if output_projection is None:
cell = rnn_cell.OutputProjectionWrapper(cell, num_decoder_symbols)
output_size = num_decoder_symbols
if isinstance(feed_previous, bool):
return embedding_attention_decoder(
decoder_inputs, encoder_state, attention_states, cell,
num_decoder_symbols, embedding_size, num_heads=num_heads,
output_size=output_size, output_projection=output_projection,
feed_previous=feed_previous,
initial_state_attention=initial_state_attention)
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def decoder(feed_previous_bool):
reuse = None if feed_previous_bool else True
with variable_scope.variable_scope(variable_scope.get_variable_scope(),
reuse=reuse):
outputs, state = embedding_attention_decoder(
decoder_inputs, encoder_state, attention_states, cell,
num_decoder_symbols, embedding_size, num_heads=num_heads,
output_size=output_size, output_projection=output_projection,
feed_previous=feed_previous_bool,
update_embedding_for_previous=False,
initial_state_attention=initial_state_attention)
return outputs + [state]
outputs_and_state = control_flow_ops.cond(feed_previous,
lambda: decoder(True),
lambda: decoder(False))
return outputs_and_state[:-1], outputs_and_state[-1]
def one2many_rnn_seq2seq(encoder_inputs, decoder_inputs_dict, cell,
num_encoder_symbols, num_decoder_symbols_dict,
embedding_size, feed_previous=False,
dtype=dtypes.float32, scope=None):
"""One-to-many RNN sequence-to-sequence model (multi-task).
This is a multi-task sequence-to-sequence model with one encoder and multiple
decoders. Reference to multi-task sequence-to-sequence learning can be found
here: http://arxiv.org/abs/1511.06114
Args:
encoder_inputs: A list of 1D int32 Tensors of shape [batch_size].
decoder_inputs_dict: A dictionany mapping decoder name (string) to
the corresponding decoder_inputs; each decoder_inputs is a list of 1D
Tensors of shape [batch_size]; num_decoders is defined as
len(decoder_inputs_dict).
cell: rnn_cell.RNNCell defining the cell function and size.
num_encoder_symbols: Integer; number of symbols on the encoder side.
num_decoder_symbols_dict: A dictionary mapping decoder name (string) to an
integer specifying number of symbols for the corresponding decoder;
len(num_decoder_symbols_dict) must be equal to num_decoders.
embedding_size: Integer, the length of the embedding vector for each symbol.
feed_previous: Boolean or scalar Boolean Tensor; if True, only the first of
decoder_inputs will be used (the "GO" symbol), and all other decoder
inputs will be taken from previous outputs (as in embedding_rnn_decoder).
If False, decoder_inputs are used as given (the standard decoder case).
dtype: The dtype of the initial state for both the encoder and encoder
rnn cells (default: tf.float32).
scope: VariableScope for the created subgraph; defaults to
"one2many_rnn_seq2seq"
Returns:
A tuple of the form (outputs_dict, state_dict), where:
outputs_dict: A mapping from decoder name (string) to a list of the same
length as decoder_inputs_dict[name]; each element in the list is a 2D
Tensors with shape [batch_size x num_decoder_symbol_list[name]]
containing the generated outputs.
state_dict: A mapping from decoder name (string) to the final state of the
corresponding decoder RNN; it is a 2D Tensor of shape
[batch_size x cell.state_size].
"""
outputs_dict = {}
state_dict = {}
with variable_scope.variable_scope(scope or "one2many_rnn_seq2seq"):
# Encoder.
encoder_cell = rnn_cell.EmbeddingWrapper(
cell, embedding_classes=num_encoder_symbols,
embedding_size=embedding_size)
_, encoder_state = rnn.rnn(encoder_cell, encoder_inputs, dtype=dtype)
# Decoder.
for name, decoder_inputs in decoder_inputs_dict.items():
num_decoder_symbols = num_decoder_symbols_dict[name]
with variable_scope.variable_scope("one2many_decoder_" + str(name)):
decoder_cell = rnn_cell.OutputProjectionWrapper(cell,
num_decoder_symbols)
if isinstance(feed_previous, bool):
outputs, state = embedding_rnn_decoder(
decoder_inputs, encoder_state, decoder_cell, num_decoder_symbols,
embedding_size, feed_previous=feed_previous)
else:
# If feed_previous is a Tensor, we construct 2 graphs and use cond.
def filled_embedding_rnn_decoder(feed_previous):
# pylint: disable=cell-var-from-loop
reuse = None if feed_previous else True
vs = variable_scope.get_variable_scope()
with variable_scope.variable_scope(vs, reuse=reuse):
outputs, state = embedding_rnn_decoder(
decoder_inputs, encoder_state, decoder_cell,
num_decoder_symbols, embedding_size,
feed_previous=feed_previous)
# pylint: enable=cell-var-from-loop
return outputs + [state]
outputs_and_state = control_flow_ops.cond(
feed_previous,
lambda: filled_embedding_rnn_decoder(True),
lambda: filled_embedding_rnn_decoder(False))
outputs = outputs_and_state[:-1]
state = outputs_and_state[-1]
outputs_dict[name] = outputs
state_dict[name] = state
return outputs_dict, state_dict
def sequence_loss_by_example(logits, targets, weights,
average_across_timesteps=True,
softmax_loss_function=None, name=None):
"""Weighted cross-entropy loss for a sequence of logits (per example).
Args:
logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
targets: List of 1D batch-sized int32 Tensors of the same length as logits.
weights: List of 1D batch-sized float-Tensors of the same length as logits.
average_across_timesteps: If set, divide the returned cost by the total
label weight.
softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: Optional name for this operation, default: "sequence_loss_by_example".
Returns:
1D batch-sized float Tensor: The log-perplexity for each sequence.
Raises:
ValueError: If len(logits) is different from len(targets) or len(weights).
"""
if len(targets) != len(logits) or len(weights) != len(logits):
raise ValueError("Lengths of logits, weights, and targets must be the same "
"%d, %d, %d." % (len(logits), len(weights), len(targets)))
with ops.name_scope(name, "sequence_loss_by_example",
logits + targets + weights):
log_perp_list = []
for logit, target, weight in zip(logits, targets, weights):
if softmax_loss_function is None:
# TODO(irving,ebrevdo): This reshape is needed because
# sequence_loss_by_example is called with scalars sometimes, which
# violates our general scalar strictness policy.
target = array_ops.reshape(target, [-1])
crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
logits=logit, labels=target)
else:
crossent = softmax_loss_function(logits=logit, labels=target)
log_perp_list.append(crossent * weight)
log_perps = math_ops.add_n(log_perp_list)
if average_across_timesteps:
total_size = math_ops.add_n(weights)
total_size += 1e-12 # Just to avoid division by 0 for all-0 weights.
log_perps /= total_size
return log_perps
def sequence_loss(logits, targets, weights,
average_across_timesteps=True, average_across_batch=True,
softmax_loss_function=None, name=None):
"""Weighted cross-entropy loss for a sequence of logits, batch-collapsed.
Args:
logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
targets: List of 1D batch-sized int32 Tensors of the same length as logits.
weights: List of 1D batch-sized float-Tensors of the same length as logits.
average_across_timesteps: If set, divide the returned cost by the total
label weight.
average_across_batch: If set, divide the returned cost by the batch size.
softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
name: Optional name for this operation, defaults to "sequence_loss".
Returns:
A scalar float Tensor: The average log-perplexity per symbol (weighted).
Raises:
ValueError: If len(logits) is different from len(targets) or len(weights).
"""
with ops.name_scope(name, "sequence_loss", logits + targets + weights):
cost = math_ops.reduce_sum(sequence_loss_by_example(
logits, targets, weights,
average_across_timesteps=average_across_timesteps,
softmax_loss_function=softmax_loss_function))
if average_across_batch:
batch_size = array_ops.shape(targets[0])[0]
return cost / math_ops.cast(batch_size, dtypes.float32)
return cost
def model_with_buckets(encoder_inputs_tensor, decoder_inputs, targets, weights,
buckets, seq2seq, softmax_loss_function=None,
per_example_loss=False, name=None):
"""Create a sequence-to-sequence model with support for bucketing.
The seq2seq argument is a function that defines a sequence-to-sequence model,
e.g., seq2seq = lambda x, y: basic_rnn_seq2seq(x, y, rnn_cell.GRUCell(24))
Args:
encoder_inputs: A list of Tensors to feed the encoder; first seq2seq input.
decoder_inputs: A list of Tensors to feed the decoder; second seq2seq input.
targets: A list of 1D batch-sized int32 Tensors (desired output sequence).
weights: List of 1D batch-sized float-Tensors to weight the targets.
buckets: A list of pairs of (input size, output size) for each bucket.
seq2seq: A sequence-to-sequence model function; it takes 2 input that
agree with encoder_inputs and decoder_inputs, and returns a pair
consisting of outputs and states (as, e.g., basic_rnn_seq2seq).
softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
to be used instead of the standard softmax (the default if this is None).
per_example_loss: Boolean. If set, the returned loss will be a batch-sized
tensor of losses for each sequence in the batch. If unset, it will be
a scalar with the averaged loss from all examples.
name: Optional name for this operation, defaults to "model_with_buckets".
Returns:
A tuple of the form (outputs, losses), where:
outputs: The outputs for each bucket. Its j'th element consists of a list
of 2D Tensors of shape [batch_size x num_decoder_symbols] (jth outputs).
losses: List of scalar Tensors, representing losses for each bucket, or,
if per_example_loss is set, a list of 1D batch-sized float Tensors.
Raises:
ValueError: If length of encoder_inputsut, targets, or weights is smaller
than the largest (last) bucket.
"""
if len(targets) < buckets[-1][1]:
raise ValueError("Length of targets (%d) must be at least that of last"
"bucket (%d)." % (len(targets), buckets[-1][1]))
if len(weights) < buckets[-1][1]:
raise ValueError("Length of weights (%d) must be at least that of last"
"bucket (%d)." % (len(weights), buckets[-1][1]))
all_inputs = [encoder_inputs_tensor] + decoder_inputs + targets + weights
with ops.name_scope(name, "model_with_buckets", all_inputs):
with variable_scope.variable_scope(variable_scope.get_variable_scope(), reuse=None):
bucket = buckets[0]
encoder_inputs = tf.split(encoder_inputs_tensor, bucket[0], 0)
encoder_inputs = [tf.squeeze(encoder_input, squeeze_dims=[0]) for encoder_input in encoder_inputs]
bucket_outputs, attention_weights_history = seq2seq(encoder_inputs[:int(bucket[0])],
decoder_inputs[:int(bucket[1])], int(bucket[0]))
if per_example_loss:
loss = sequence_loss_by_example(
bucket_outputs, targets[:int(bucket[1])], weights[:int(bucket[1])],
average_across_timesteps=True,
softmax_loss_function=softmax_loss_function)
else:
loss = sequence_loss(
bucket_outputs, targets[:int(bucket[1])], weights[:int(bucket[1])],
average_across_timesteps=True,
softmax_loss_function=softmax_loss_function)
return bucket_outputs, loss, attention_weights_history
|
py | b41239548d53fe8b7d1afc9c9edf90d438a6845f | from sense_hat import SenseHat
import time
import subprocess
# Initialize the Sense Hat
sense = SenseHat()
sense.clear()
sense.low_light = True
# plus 0.5 second for status per wake and plus time to run loop
MINUTES_BETWEEN_WAKES = 0.1 # roughly every 5 seconds
# Define Colors
# (generated from ColorBrewer)
# Credit: Cynthia Brewer, Mark Harrower and The Pennsylvania State University
cb_orange = (252, 141, 89)
cb_yellow = (255, 255, 191)
cb_blue = (145, 191, 219)
# Raw colors
red = (255, 0, 0)
yellow = (255, 255, 0)
blue = (0, 0, 255)
white = (255, 255, 255)
off = (0, 0, 0)
def get_temperature():
# rounded to one decimal place
return round(sense.get_temperature(), 1)
def get_humidity():
# rounded to one decimal place
return round(sense.get_humidity(), 1)
def get_pressure():
# rounded to one decimal place
return round(sense.get_pressure(), 1)
def get_acceleration():
acceleration = sense.get_accelerometer_raw()
# rounded to two decimal places
x = round(acceleration['x'], 2)
y = round(acceleration['y'], 2)
z = round(acceleration['z'], 2)
return x, y, z
def get_gyro():
gyro = sense.get_gyroscope_raw()
# rounded to two decimal places
x = round(gyro['x'], 2)
y = round(gyro['y'], 2)
z = round(gyro['z'], 2)
return x, y, z
def get_compass():
compass = sense.get_compass_raw()
# rounded to two decimal places
x = round(compass['x'], 2)
y = round(compass['y'], 2)
z = round(compass['z'], 2)
return x, y, z
def display(x, y, color):
sense.set_pixel(x, y, color)
def check_internet():
output = subprocess.check_output(["bash", "/opt/AISonoBuoy/PiBuoyV2/scripts/internet_check.sh"])
if "Online" in output:
return True
return False
def main():
# TODO initial status states
ais_file = None
recording_file = None
uploads = None
# Throwaway readings to calibrate
for i in range(5):
# Light up top left to indicate running calibration
display(0, 0, white)
t = get_temperature()
p = get_pressure()
h = get_humidity()
ax, ay, az = get_acceleration()
gx, gy, gz = get_gyro()
cx, cy, cz = get_compass()
# Turn off top left to indicate calibration is done
display(0, 0, off)
# Cycle through getting readings forever
while True:
# Light up bottom right pixel for status
display(7, 7, blue)
# TODO check other items for updates (ais, hydrophone recordings, battery, uploads, patching, internet connection)
# internet update
inet = check_internet()
if inet:
display(7, 6, blue)
else:
display(7, 6, red)
# patching: run update file which should do everything including restarting services or rebooting
# ais: see if new detection since last cycle
# recordings: see if new recording file since last session, or see if process to record is running
# battery: check current battery level from pijuice hopefully, change color based on level
# uploads: see if files are gone ?
# Take readings from sensors
t = get_temperature()
p = get_pressure()
h = get_humidity()
ax, ay, az = get_acceleration()
gx, gy, gz = get_gyro()
cx, cy, cz = get_compass()
# TODO store data
# Keep lights for 0.5 second
time.sleep(0.5)
# Turn off pixels for status
display(7, 6, off)
display(7, 7, off)
# sleep between cycles
time.sleep(60*MINUTES_BETWEEN_WAKES)
main()
|
py | b412396439af4e8dd4029fb0a533aeda785630a0 | """Function Introspection
Functions are first class objects
They have attributes __doc__ __annotations__
We can attach our own attributes
def my_func(a, b):
return a + b
my_func.category = 'math'
my_func.sub_category = 'arithmetic'
print(my_func.category) # math
print(my_func.sub_category) # arithmetic
print(dir(my_func)) # ['__annotations__', '__call__', '__class__', '__closure__', '__code__', '__defaults__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__get__', '__getattribute__', '__globals__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__kwdefaults__', '__le__', '__lt__', '__module__', '__name__', '__ne__', '__new__', '__qualname__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', 'category', 'sub_category']
Function Attributes:__name__, __defaults__, __kwdefaults__
__name__ -> name of function
__defaults__ -> tuple containing positional parameter defaults
__kwdefaults__ -> dictionary containing keyword-only parameter defaults
"""
def my_func(a, b=2, c=3, *, kw1, kw2=2):
pass
my_func.__name__ # my_func
my_func.__defaults__ # (2, 3)
my_func.__kwdefaults__ # {'kw2',: 2}
# Function Attribute:__code__
def my_func(a, b=1, *args, **kwargs):
i = 10
b = min(i, b)
return a * b # my_func.__code__
# <code object my_func at 0x00020EEF ..
"""
This __code__ object itself has various properties, which include:
co_varnames # parameter and local variables
my_func.__code__.co_varnames -> ('a', 'b', 'args', 'i')
parameter names first, followed by local variable names
co_argcount number of parameters
my_func,__code__.co_argcount -> 2
does not count *args and **kwargs!
# The inspect module import inspect
ismethod(obj) isfunction(obj) istoutine(obj) and many others...
# What's the difference between a function and a mothod?
Classes and objects have attributes - an object that is bound (to the class or the object)
An attribute that is callable is called a method
"""
def my_func(): # func is bound to my_obj, an instance of MyClass
pass
# isfunction(my_func) -> True
def MyClass: # ismethod(my_func) -> False
def func(self):
# pass # isfunction(my_obj.func) -> False
my_obj = MyClass() # ismethod(my_obj.func) -> True
# isroutine(my_func) -> True
# isroutine(my_obj.func) -> True
'''Code Introspection
I can recover the source code of our functions/methods
inspect.getsource(my_func) -> a string contaning our entire def statement, including annotations, docstrings, etc.
I can find out in which module our function was created
inspect.getmodule(my_func) -> <module '__main__'>
inspect.getmodule(print) -> <module 'builtins' (built-in)>
inspect.getmodule(math.sin) -> <module 'math' (built-in)>
Function Comments
# setting up variable
i = 1o
'''
|
py | b41239c31d74437117c21f025593ee2e881bc1a9 | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'searchengine.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
py | b4123a72511e1767da640bf69f885031fb875e4c | """
init.py
Starting script to run NetPyNE-based RxD model.
Usage:
python init.py # Run simulation, optionally plot a raster
MPI usage:
mpiexec -n 4 nrniv -python -mpi init.py
"""
from netpyne import sim
from netParams import netParams
from cfg import cfg
# --------------------------------
# Instantiate network
# --------------------------------
sim.initialize(netParams, cfg) # create network object and set cfg and net params
sim.net.createPops() # instantiate network populations
sim.net.createCells() # instantiate network cells based on defined populations
sim.net.connectCells() # create connections between cells based on params
sim.net.addStims() # add external stimulation to cells (IClamps etc)
sim.net.addRxD() # add reaction-diffusion (RxD)
sim.setupRecording() # setup variables to record for each cell (spikes, V traces, etc)
sim.simulate()
sim.analyze() |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.