ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b41356072ee374c996eb89650a138070d8f0db1c | #!usr/bin/env python3
from functools import wraps
from time import monotonic, sleep
# In real life, this metrics will be saved to a metrics db such as InfluxDB
# There are many systems where we could store this metrics such as Prometheus,
# InfluxDB, the Elastic Stack, Nagios, etc.
# Ensure that performance is part of the monitoring and alerting on production.
# Be aware of not making the sending the metrics a performance issue by itself.
# For example, we can send metrics over a slow network, save them to disk or
# send them in a non blocking manner.
def timed(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
start = monotonic()
try:
return fn(*args, **kwargs)
finally:
duration = monotonic() - start
# Save to database
print('{} took {:.3f}sec'.format(fn.__name__, duration))
return wrapper
if __name__ == '__main__':
@timed
def add(a, b):
sleep(a / 10) # Simulate work
return a + b
# In [2]: add(3, 4)
# add took 0.304sec
# Out[2]: 7
|
py | b41356e58b70825fd25f1e881456b0e0dbc7023e | # -*- coding: utf-8 -*-
"""Module to compute flux statistics from spectra:
the power spectrum, the pdf and to normalise to a mean tau.
Useful for lyman alpha forest work."""
import math
import numpy as np
from scipy.optimize import brentq
from ._spectra_priv import _rescale_mean_flux
def obs_mean_tau(redshift):
"""The mean flux from 0711.1862: is (0.0023±0.0007) (1+z)^(3.65±0.21)
Todo: check for updated values."""
return 0.0023*(1.0+redshift)**3.65
def mean_flux(tau, mean_flux_desired, tol = 1e-5, thresh=1e30):
"""Scale the optical depths by a constant value until we get the observed mean flux.
ie, we want F_obs = bar{F} = < e^-tau >
Solves iteratively using Newton-Raphson.
This is safe because the exponential function is so well-behaved.
Arguments:
tau - optical depths to scale
mean_flux_desired - mean flux desired
tol - tolerance within which to hit mean flux
returns:
scaling factor for tau"""
return _rescale_mean_flux(tau, mean_flux_desired, np.size(tau), tol, thresh)
def flux_pdf(tau, nbins=20, mean_flux_desired=None):
"""Compute the flux pdf, a normalised histogram of the flux, exp(-tau)"""
scale = 1.
if mean_flux_desired is not None:
scale = mean_flux(tau, mean_flux_desired)
flux = np.exp(-scale * tau)
bins = np.arange(nbins+1)/(1.*nbins)
(fpdf, _) = np.histogram(flux, bins=bins,density=True)
cbins = (bins[1:] + bins[:-1])/2.
return cbins, fpdf
def _powerspectrum(inarray, axis=-1):
"""Compute the power spectrum of the input using np.fft"""
rfftd = np.fft.rfft(inarray, axis=axis)
# Want P(k)= F(k).re*F(k).re+F(k).im*F(k).im
power = np.abs(rfftd)**2
#Normalise the FFT so it is independent of input size.
power /= np.shape(inarray)[axis]**2
return power
def _window_function(k, *, R, dv):
"""The window function corresponding to the spectra response of the spectrograph.
R is the spectrograph resolution.
dv is the pixel width of the spectrograph.
Default values for BOSS are:
dv = 69, R = 60 at 5000 A and R = 80 at 4300 A."""
#FWHM of a Gaussian is 2 \sqrt(2 ln 2) sigma
sigma = R/(2*np.sqrt(2*np.log(2)))
return np.exp(-0.5 * (k * sigma)**2) * np.sinc(k * dv/2/math.pi)
def flux_power(tau, vmax, spec_res = 8, mean_flux_desired=None, window=True):
"""Get the power spectrum of (variations in) the flux along the line of sight.
This is: P_F(k_F) = <d_F d_F>
d_F = e^-tau / mean(e^-tau) - 1
If mean_flux_desired is set, the spectral optical depths will be rescaled
to match the desired mean flux.
We compute the power spectrum along each sightline and then average the result.
Arguments:
tau - optical depths. Shape is (NumLos, npix)
mean_flux_desired - Mean flux to rescale to.
vmax - velocity scale corresponding to maximal length of the sightline.
Returns:
flux_power - flux power spectrum in km/s. Shape is (npix)
bins - the frequency space bins of the power spectrum, in s/km.
"""
scale = 1.
if mean_flux_desired is not None:
scale = mean_flux(tau, mean_flux_desired)
#print("rescaled: ",scale,"frac: ",np.sum(tau>1)/np.sum(tau>0))
else:
mean_flux_desired = np.mean(np.exp(-tau))
(nspec, npix) = np.shape(tau)
mean_flux_power = np.zeros(npix//2+1, dtype=tau.dtype)
for i in range(10):
end = min((i+1)*nspec//10, nspec)
dflux=np.exp(-scale*tau[i*nspec//10:end])/mean_flux_desired - 1.
# Calculate flux power for each spectrum in turn
flux_power_perspectra = _powerspectrum(dflux, axis=1)
#Take the mean and convert units.
mean_flux_power += vmax*np.sum(flux_power_perspectra, axis=0)
mean_flux_power/= nspec
assert np.shape(mean_flux_power) == (npix//2+1,)
kf = _flux_power_bins(vmax, npix)
#Divide out the window function
if window:
mean_flux_power /= _window_function(kf, R=spec_res, dv=vmax/npix)**2
return kf,mean_flux_power
def _flux_power_bins(vmax, npix):
"""
Generate k bins for the flux power spectrum by converting the natural
(ie, fractions of the total spectrum) units output by the flux power spectrum
routine into physical km/s, accounting for Fourier convention.
Arguments:
vmax - the length of a spectrum in km/s and the conversion factor from comoving kpc is:
H(z) * a / h / 1000
defined in spectra.py:115
nbins - number of bins of *input spectrum* - not the fourier output!
Returns: bin center in s/km
"""
#Get the frequency component
kf = np.fft.rfftfreq(npix)
#Units:
#The largest frequency scale is the velocity scale of the box,
#not 1/nbins as rfftfreq gives.
#Adjust Fourier convention.
kf *= 2.0*math.pi * npix/vmax
return kf
|
py | b413576787f4e791ac147090273fb3f79d44f1a2 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Connector components of Google AI Platform (Unified) services."""
from typing import Any, Dict, List, Optional, Type, Union
from absl import logging
import collections
from kfp import dsl
from kfp.components import _structures
from kfp.dsl import artifact
from kfp.pipeline_spec import pipeline_spec_pb2
from kfp.v2.dsl import dsl_utils
from kfp.v2.dsl import type_utils
_AIPlatformCustomJobSpec = pipeline_spec_pb2.PipelineDeploymentConfig.AIPlatformCustomJobSpec
_DUMMY_CONTAINER_OP_IMAGE = 'dummy/image'
_DUMMY_PATH = 'dummy/path'
_MAX_PACKAGE_URIS = 100
_DEFAULT_CUSTOM_JOB_MACHINE_TYPE = 'n1-standard-2'
_ValueOrPipelineParam = Union[dsl.PipelineParam, str, float, int]
# TODO: Support all declared types in
# components._structures.CommandlineArgumenType
_CommandlineArgumentType = Union[
str, int, float,
_structures.InputValuePlaceholder,
_structures.InputPathPlaceholder,
_structures.OutputPathPlaceholder,
_structures.InputUriPlaceholder,
_structures.OutputUriPlaceholder,
]
# TODO: extract this to a utils module, and share with dsl.component_bridge
def _input_artifact_uri_placeholder(input_key: str) -> str:
return "{{{{$.inputs.artifacts['{}'].uri}}}}".format(input_key)
def _input_artifact_path_placeholder(input_key: str) -> str:
return "{{{{$.inputs.artifacts['{}'].path}}}}".format(input_key)
def _input_parameter_placeholder(input_key: str) -> str:
return "{{{{$.inputs.parameters['{}']}}}}".format(input_key)
def _output_artifact_uri_placeholder(output_key: str) -> str:
return "{{{{$.outputs.artifacts['{}'].uri}}}}".format(output_key)
def _output_artifact_path_placeholder(output_key: str) -> str:
return "{{{{$.outputs.artifacts['{}'].path}}}}".format(output_key)
def _output_parameter_path_placeholder(output_key: str) -> str:
return "{{{{$.outputs.parameters['{}'].output_file}}}}".format(output_key)
class AiPlatformCustomJobOp(dsl.ContainerOp):
"""V2 AiPlatformCustomJobOp class.
This class inherits V1 ContainerOp class so that it can be correctly picked
by compiler. The implementation of the task is an AiPlatformCustomJobSpec
proto message.
"""
def __init__(self,
name: str,
custom_job_spec: Dict[str, Any],
component_spec: pipeline_spec_pb2.ComponentSpec,
task_spec: pipeline_spec_pb2.PipelineTaskSpec,
task_inputs: Optional[List[dsl.InputArgumentPath]] = None,
task_outputs: Optional[Dict[str, str]] = None):
"""Instantiates the AiPlatformCustomJobOp object.
Args:
name: Name of the task.
custom_job_spec: JSON struct of the CustomJob spec, representing the job
that will be submitted to AI Platform (Unified) service. See
https://cloud.google.com/ai-platform-unified/docs/reference/rest/v1beta1/CustomJobSpec
for detailed reference.
task_inputs: Optional. List of InputArgumentPath of this task. Each
InputArgumentPath object has 3 attributes: input, path and argument
we actually only care about the input, which will be translated to the
input name of the component spec.
Path and argument are tied to artifact argument in Argo, which is not
used in this case.
task_outputs: Optional. Mapping of task outputs to its URL.
"""
old_warn_value = dsl.ContainerOp._DISABLE_REUSABLE_COMPONENT_WARNING
dsl.ContainerOp._DISABLE_REUSABLE_COMPONENT_WARNING = True
super().__init__(
name=name,
image=_DUMMY_CONTAINER_OP_IMAGE,
artifact_argument_paths=task_inputs,
file_outputs=task_outputs
)
self.component_spec = component_spec
self.task_spec = task_spec
self.custom_job_spec = custom_job_spec
dsl.ContainerOp._DISABLE_REUSABLE_COMPONENT_WARNING = old_warn_value
def _get_custom_job_op(
task_name: str,
job_spec: Dict[str, Any],
input_artifacts: Optional[Dict[str, dsl.PipelineParam]] = None,
input_parameters: Optional[Dict[str, _ValueOrPipelineParam]] = None,
output_artifacts: Optional[Dict[str, Type[artifact.Artifact]]] = None,
output_parameters: Optional[Dict[str, Any]] = None,
) -> AiPlatformCustomJobOp:
"""Gets an AiPlatformCustomJobOp from job spec and I/O definition."""
pipeline_task_spec = pipeline_spec_pb2.PipelineTaskSpec()
pipeline_component_spec = pipeline_spec_pb2.ComponentSpec()
pipeline_task_spec.task_info.CopyFrom(
pipeline_spec_pb2.PipelineTaskInfo(name=task_name))
# Iterate through the inputs/outputs declaration to get pipeline component
# spec.
for input_name, param in input_parameters.items():
if isinstance(param, dsl.PipelineParam):
pipeline_component_spec.input_definitions.parameters[
input_name].type = type_utils.get_parameter_type(param.param_type)
else:
pipeline_component_spec.input_definitions.parameters[
input_name].type = type_utils.get_parameter_type(type(param))
for input_name, art in input_artifacts.items():
if not isinstance(art, dsl.PipelineParam):
raise RuntimeError(
'Get unresolved input artifact for input %s. Input '
'artifacts must be connected to a producer task.' % input_name)
pipeline_component_spec.input_definitions.artifacts[
input_name].artifact_type.CopyFrom(
type_utils.get_artifact_type_schema_message(art.param_type))
for output_name, param_type in output_parameters.items():
pipeline_component_spec.output_definitions.parameters[
output_name].type = type_utils.get_parameter_type(param_type)
for output_name, artifact_type in output_artifacts.items():
pipeline_component_spec.output_definitions.artifacts[
output_name].artifact_type.CopyFrom(artifact_type.get_ir_type())
pipeline_component_spec.executor_label = dsl_utils.sanitize_executor_label(
task_name)
# Iterate through the inputs/outputs specs to get pipeline task spec.
for input_name, param in input_parameters.items():
if isinstance(param, dsl.PipelineParam) and param.op_name:
# If the param has a valid op_name, this should be a pipeline parameter
# produced by an upstream task.
pipeline_task_spec.inputs.parameters[input_name].CopyFrom(
pipeline_spec_pb2.TaskInputsSpec.InputParameterSpec(
task_output_parameter=pipeline_spec_pb2.TaskInputsSpec.InputParameterSpec.TaskOutputParameterSpec(
producer_task='task-{}'.format(param.op_name),
output_parameter_key=param.name
)))
elif isinstance(param, dsl.PipelineParam) and not param.op_name:
# If a valid op_name is missing, this should be a pipeline parameter.
pipeline_task_spec.inputs.parameters[input_name].CopyFrom(
pipeline_spec_pb2.TaskInputsSpec.InputParameterSpec(
component_input_parameter=param.name))
else:
# If this is not a pipeline param, then it should be a value.
pipeline_task_spec.inputs.parameters[input_name].CopyFrom(
pipeline_spec_pb2.TaskInputsSpec.InputParameterSpec(
runtime_value=pipeline_spec_pb2.ValueOrRuntimeParameter(
constant_value=dsl_utils.get_value(param))))
for input_name, art in input_artifacts.items():
if art.op_name:
# If the param has a valid op_name, this should be an artifact produced
# by an upstream task.
pipeline_task_spec.inputs.artifacts[input_name].CopyFrom(
pipeline_spec_pb2.TaskInputsSpec.InputArtifactSpec(
task_output_artifact=pipeline_spec_pb2.TaskInputsSpec.InputArtifactSpec.TaskOutputArtifactSpec(
producer_task='task-{}'.format(art.op_name),
output_artifact_key=art.name)))
else:
# Otherwise, this should be from the input of the subdag.
pipeline_task_spec.inputs.artifacts[input_name].CopyFrom(
pipeline_spec_pb2.TaskInputsSpec.InputArtifactSpec(
component_input_artifact=art.name
))
# TODO: Add task dependencies/trigger policies/caching/iterator
pipeline_task_spec.component_ref.name = dsl_utils.sanitize_component_name(
task_name)
# Construct dummy I/O declaration for the op.
# TODO: resolve name conflict instead of raising errors.
dummy_outputs = collections.OrderedDict()
for output_name, _ in output_artifacts.items():
dummy_outputs[output_name] = _DUMMY_PATH
for output_name, _ in output_parameters.items():
if output_name in dummy_outputs:
raise KeyError('Got name collision for output key %s. Consider renaming '
'either output parameters or output '
'artifacts.' % output_name)
dummy_outputs[output_name] = _DUMMY_PATH
dummy_inputs = collections.OrderedDict()
for input_name, art in input_artifacts.items():
dummy_inputs[input_name] = _DUMMY_PATH
for input_name, param in input_parameters.items():
if input_name in dummy_inputs:
raise KeyError('Got name collision for input key %s. Consider renaming '
'either input parameters or input '
'artifacts.' % input_name)
dummy_inputs[input_name] = _DUMMY_PATH
# Construct the AIP (Unified) custom job op.
return AiPlatformCustomJobOp(
name=task_name,
custom_job_spec=job_spec,
component_spec=pipeline_component_spec,
task_spec=pipeline_task_spec,
task_inputs=[
dsl.InputArgumentPath(
argument=dummy_inputs[input_name],
input=input_name,
path=path,
) for input_name, path in dummy_inputs.items()
],
task_outputs=dummy_outputs
)
def custom_job(
name: str,
input_artifacts: Optional[Dict[str, dsl.PipelineParam]] = None,
input_parameters: Optional[Dict[str, _ValueOrPipelineParam]] = None,
output_artifacts: Optional[Dict[str, Type[artifact.Artifact]]] = None,
output_parameters: Optional[Dict[str, Type[Union[str, float, int]]]] = None,
# Custom container training specs.
image_uri: Optional[str] = None,
commands: Optional[List[str]] = None,
# Custom Python training spec.
executor_image_uri: Optional[str] = None,
package_uris: Optional[List[str]] = None,
python_module: Optional[str] = None,
# Command line args of the user program.
args: Optional[List[Any]] = None,
machine_type: Optional[str] = None,
# Full-fledged custom job API spec. For details please see:
# https://cloud.google.com/ai-platform-unified/docs/reference/rest/v1beta1/CustomJobSpec
additional_job_spec: Optional[Dict[str, Any]] = None
) -> AiPlatformCustomJobOp:
"""DSL representation of a AI Platform (Unified) custom training job.
For detailed doc of the service, please refer to
https://cloud.google.com/ai-platform-unified/docs/training/create-custom-job
Args:
name: The name of this task.
input_artifacts: The input artifact specification. Should be a mapping from
input name to output from upstream tasks.
input_parameters: The input parameter specification. Should be a mapping
from input name to one of the following three:
- output from upstream tasks, or
- pipeline parameter, or
- constant value
output_artifacts: The output artifact declaration. Should be a mapping from
output name to a type subclassing artifact.Artifact.
output_parameters: The output parameter declaration. Should be a mapping
from output name to one of 1) str, 2) float, or 3) int.
image_uri: The URI of the container image containing the user training
program. Applicable for custom container training.
commands: The container command/entrypoint. Applicable for custom container
training.
executor_image_uri: The URI of the container image containing the
dependencies of user training program. Applicable for custom Python
training.
package_uris: The Python packages that are expected to be running on the
executor container. Applicable for custom Python training.
python_module: The entrypoint of user training program. Applicable for
custom Python training.
args: The command line arguments of user training program. This is expected
to be a list of either 1) constant string, or 2) KFP DSL placeholders, to
connect the user program with the declared component I/O.
machine_type: The machine type used to run the training program. The value
of this field will be propagated to all worker pools if not specified
otherwise in additional_job_spec.
additional_job_spec: Full-fledged custom job API spec. The value specified
in this field will override the defaults provided through other function
parameters.
For details please see:
https://cloud.google.com/ai-platform-unified/docs/reference/rest/v1beta1/CustomJobSpec
Returns:
A KFP ContainerOp object represents the launcher container job, from which
the user training program will be submitted to AI Platform (Unified) Custom
Job service.
Raises:
KeyError on name collision between parameter and artifact I/O declaration.
ValueError when:
1. neither or both image_uri and executor_image_uri are provided; or
2. no valid package_uris and python_module is provided for custom Python
training.
"""
# Check the sanity of the provided parameters.
input_artifacts = input_artifacts or {}
input_parameters = input_parameters or {}
output_artifacts = output_artifacts or {}
output_parameters = output_parameters or {}
if bool(set(input_artifacts.keys()) & set(input_parameters.keys())):
raise KeyError('Input key conflict between input parameters and artifacts.')
if bool(set(output_artifacts.keys()) & set(output_parameters.keys())):
raise KeyError('Output key conflict between output parameters and '
'artifacts.')
if not additional_job_spec and bool(image_uri) == bool(executor_image_uri):
raise ValueError('The user program needs to be either a custom container '
'training job, or a custom Python training job')
# For Python custom training job, package URIs and modules are also required.
if executor_image_uri:
if not package_uris or not python_module or len(
package_uris) > _MAX_PACKAGE_URIS:
raise ValueError('For custom Python training, package_uris with length < '
'100 and python_module are expected.')
# Check and scaffold the parameters to form the custom job request spec.
custom_job_spec = additional_job_spec or {}
if not custom_job_spec.get('workerPoolSpecs'):
# Single node training, deriving job spec from top-level parameters.
if image_uri:
# Single node custom container training
worker_pool_spec = {
"machineSpec": {
"machineType": machine_type or _DEFAULT_CUSTOM_JOB_MACHINE_TYPE
},
"replicaCount": "1",
"containerSpec": {
"imageUri": image_uri,
}
}
if commands:
worker_pool_spec['containerSpec']['command'] = commands
if args:
worker_pool_spec['containerSpec']['args'] = args
custom_job_spec['workerPoolSpecs'] = [worker_pool_spec]
if executor_image_uri:
worker_pool_spec = {
"machineSpec": {
"machineType": machine_type or _DEFAULT_CUSTOM_JOB_MACHINE_TYPE
},
"replicaCount": "1",
"pythonPackageSpec": {
"executorImageUri": executor_image_uri,
"packageUris": package_uris,
"pythonModule": python_module,
"args": args
}
}
custom_job_spec['workerPoolSpecs'] = [worker_pool_spec]
else:
# If the full-fledged job spec is provided. We'll use it as much as
# possible, and patch some top-level parameters.
for spec in custom_job_spec['workerPoolSpecs']:
if image_uri:
if (not spec.get('pythonPackageSpec')
and not spec.get('containerSpec', {}).get('imageUri')):
spec['containerSpec'] = spec.get('containerSpec', {})
spec['containerSpec']['imageUri'] = image_uri
if commands:
if (not spec.get('pythonPackageSpec')
and not spec.get('containerSpec', {}).get('command')):
spec['containerSpec'] = spec.get('containerSpec', {})
spec['containerSpec']['command'] = commands
if executor_image_uri:
if (not spec.get('containerSpec')
and not spec.get('pythonPackageSpec', {}).get('executorImageUri')):
spec['pythonPackageSpec'] = spec.get('pythonPackageSpec', {})
spec['pythonPackageSpec']['executorImageUri'] = executor_image_uri
if package_uris:
if (not spec.get('containerSpec')
and not spec.get('pythonPackageSpec', {}).get('packageUris')):
spec['pythonPackageSpec'] = spec.get('pythonPackageSpec', {})
spec['pythonPackageSpec']['packageUris'] = package_uris
if python_module:
if (not spec.get('containerSpec')
and not spec.get('pythonPackageSpec', {}).get('pythonModule')):
spec['pythonPackageSpec'] = spec.get('pythonPackageSpec', {})
spec['pythonPackageSpec']['pythonModule'] = python_module
if args:
if spec.get('containerSpec') and not spec['containerSpec'].get('args'):
spec['containerSpec']['args'] = args
if (spec.get('pythonPackageSpec')
and not spec['pythonPackageSpec'].get('args')):
spec['pythonPackageSpec']['args'] = args
# Resolve the custom job spec by wiring it with the I/O spec.
def _resolve_output_path_placeholder(output_key: str) -> str:
if output_key in output_parameters:
return _output_parameter_path_placeholder(output_key)
else:
return _output_artifact_path_placeholder(output_key)
def _resolve_cmd(cmd: Optional[_CommandlineArgumentType]) -> Optional[str]:
"""Resolves a single command line cmd/arg."""
if cmd is None:
return None
elif isinstance(cmd, (str, float, int)):
return str(cmd)
elif isinstance(cmd, _structures.InputValuePlaceholder):
return _input_parameter_placeholder(cmd.input_name)
elif isinstance(cmd, _structures.InputPathPlaceholder):
return _input_artifact_path_placeholder(cmd.input_name)
elif isinstance(cmd, _structures.InputUriPlaceholder):
return _input_artifact_uri_placeholder(cmd.input_name)
elif isinstance(cmd, _structures.OutputPathPlaceholder):
return _resolve_output_path_placeholder(cmd.output_name)
elif isinstance(cmd, _structures.OutputUriPlaceholder):
return _output_artifact_uri_placeholder(cmd.output_name)
else:
raise TypeError('Got unexpected placeholder type for %s' % cmd)
def _resolve_cmd_lines(cmds: Optional[List[_CommandlineArgumentType]]) -> None:
"""Resolves a list of commands/args."""
if not cmds:
return
for idx, cmd in enumerate(cmds):
cmds[idx] = _resolve_cmd(cmd)
for wp_spec in custom_job_spec['workerPoolSpecs']:
if 'containerSpec' in wp_spec:
# For custom container training, resolve placeholders in commands and
# program args.
container_spec = wp_spec['containerSpec']
if 'command' in container_spec:
_resolve_cmd_lines(container_spec['command'])
if 'args' in container_spec:
_resolve_cmd_lines(container_spec['args'])
else:
assert 'pythonPackageSpec' in wp_spec
# For custom Python training, resolve placeholders in args only.
python_spec = wp_spec['pythonPackageSpec']
if 'args' in python_spec:
_resolve_cmd_lines(python_spec['args'])
job_spec = {
'name': name,
'jobSpec': custom_job_spec
}
return _get_custom_job_op(
task_name=name,
job_spec=job_spec,
input_artifacts=input_artifacts,
input_parameters=input_parameters,
output_artifacts=output_artifacts,
output_parameters=output_parameters
)
|
py | b413579f3a81ab1b50a8ad002c96bd970440218e | import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from firmament import Renderer
from firmament.planets import earth
from firmament.sun import sun_direction
fish_eye = False
config = {
'size_x': 1000 if fish_eye else 1920//4,
'size_y': 1000 if fish_eye else 1080//4,
'ray_samples' : 16,
'light_samples': 8,
'exposure': 2.0,
'zoom': 1.0, # only for pinhole view
'eye_pos': np.array([0, 0, 1.0001]),
'eye_dir': np.array([0, 1, 0]), # only for pinhole view
'date': (2020, 1, 20),
'timezone': 1, # GMT+1
'summertime': False,
'latitude': 49.01356,
'longitude': 8.40444
}
time_range = (6, 20, 0.5)
renderer = Renderer(config, earth)
for time in np.arange(*time_range):
pit = datetime(*config['date'], int(np.floor(time)), int((time-np.floor(time))*60), 0)
sun_dir = sun_direction(config['latitude'], config['longitude'], pit, config['timezone'], 1.0 if config['summertime'] else 0.0)
sun =(
np.cos(sun_dir[0])*np.sin(sun_dir[1]),
np.cos(sun_dir[0])*np.cos(sun_dir[1]),
np.sin(sun_dir[0])
)
print(sun_dir)
np_picture = (renderer.render_fisheye if fish_eye else renderer.render_pinhole)(sun)
plt.imsave("sky_%05.1f.png" % time, np_picture, origin='lower')
|
py | b41357c9af0ba4d93bf44c740f9b16e8573b8fc9 | import sys
import logging
def graceful_exit():
logging.error("Invalid input, now exiting program.")
sys.exit()
|
py | b4135aa749a3e7b80755b15c2e5688a7f3fe29e0 | """The TitleXtract model."""
import string
from collections import defaultdict
import torch
import torch.nn as nn
from citextract.utils.model import load_model_params
class TitleTagging(nn.Module):
"""TitleTagging model."""
def __init__(self, input_size, hidden_size, n_layers, n_classes, device):
"""Initialize the model.
Parameters
----------
input_size : int
The number of input neurons.
hidden_size : int
The number of hidden neurons.
n_layers : int
The number of layers.
n_classes : int
The number of output classes.
device : torch.device
The device to run the computations on.
"""
super(TitleTagging, self).__init__()
self.device = device
self.hidden_size = hidden_size
self.n_layers = n_layers
self.lstm = nn.LSTM(input_size, hidden_size, n_layers, batch_first=True, bidirectional=True, dropout=0.5)
self.fc = nn.Linear(hidden_size * 2, n_classes)
def forward(self, x):
"""Forward-propagate the input data.
Parameters
----------
x : torch.Tensor
The input tensor of size (batch_size, sequence_length, input_size).
Returns
-------
torch.Tensor
The output tensor of size (batch_size, sequence_length, n_classes).
"""
# Initiatlize parameters for the first step
h_0 = torch.zeros(2 * self.n_layers, x.size(0), self.hidden_size).to(self.device)
c_0 = torch.zeros(2 * self.n_layers, x.size(0), self.hidden_size).to(self.device)
# Return the output and parameters for the n-th step (n=sequence_len)
lstm_output, _ = self.lstm(x, (h_0, c_0))
# Fully connected layer (hidden_size*2 --> n_classes)
fc_output = self.fc(lstm_output)
# Softmax
softmax_output = nn.Softmax(dim=2)(fc_output)
return softmax_output
def build_titlextract_model(preprocessor, embed_size=32, hidden_size=64, device=None):
"""Build an instance of the TitleXtract model.
Parameters
----------
preprocessor : TitleXtractPreprocessor
The preprocessor to use.
embed_size : int
The number of embedding neurons to use.
hidden_size : int
The number of hidden neurons to use.
device : torch.device
The device to compute on.
Returns
-------
torch.nn.modules.container.Sequential
A RefXtract model instance.
"""
vocab_size = len(preprocessor.chars)
n_classes = 2
return nn.Sequential(
torch.nn.Embedding(vocab_size, embed_size),
TitleTagging(input_size=embed_size, hidden_size=hidden_size, n_layers=2, n_classes=n_classes, device=device).to(
device)
).to(device)
class TitleXtractPreprocessor:
"""TitleXtract preprocessor."""
def __init__(self, device=None):
"""Initialize the preprocessor.
Parameters
----------
device : torch.device
The device to use.
"""
chars = list(string.ascii_letters + string.digits + string.punctuation + string.whitespace)
self.chars = ['<PAD>', '<UNK>'] + chars
self.device = device
self.char_mapping = defaultdict(lambda: 1)
for index, char in enumerate(self.chars):
self.char_mapping[char] = index
def map_text_chars(self, text):
"""Map text to numerical character representations.
Parameters
----------
text : str
The text to map.
Returns
-------
torch.Tensor
The tensor representing the mapped characters.
"""
mapped_chars = list(map(lambda char: self.char_mapping.get(char, 1), text))
return torch.Tensor(mapped_chars).long().view(1, -1).to(self.device)
def map_text_targets(self, text, title):
"""Align and map the targets of a text.
Parameters
----------
text : str
The text to map.
title : str
The title (substring of the text) to map.
Returns
-------
torch.Tensor
A tensor representing the characters of the text for which an element is 1 if and only if a character
is both represented by the text and by the title, 0 otherwise.
"""
start_position = text.index(title)
mapped_target = [1 if start_position <= index < start_position + len(title) else 0 for index in
range(len(text))]
return torch.Tensor(mapped_target).view(1, -1).long().to(self.device)
def __call__(self, text, title):
"""Preprocess a text and a title.
Parameters
----------
text : str
The text to preprocess.
title : str
The title to preprocess.
Returns
-------
tuple
A tuple consisting of the following elements:
- A tensor of the characters of the text.
- A tensor of the targets of the characters of the text.
"""
return self.map_text_chars(text), self.map_text_targets(text, title)
class TitleXtractor:
"""TitleXtractor wrapper class."""
def __init__(self, model=None, preprocessor=None, device=None):
"""Initialize the TitleXtractor.
Parameters
----------
model : torch.nn.modules.container.Sequential
The model to use.
preprocessor : TitleXtractPreprocessor
The preprocessor to use.
device : torch.device
The device to use.
"""
self.device = device
self.preprocessor = preprocessor if preprocessor else TitleXtractPreprocessor(device=device)
self.model = model if model else build_titlextract_model(self.preprocessor, device=device)
def load(self, model_uri=None, ignore_cache=False):
"""Load model parameters from the internet.
Parameters
----------
model_uri : str
The model URI to load from.
ignore_cache : bool
When true, all caches are ignored and the model parameters are forcefully downloaded.
Returns
-------
TitleXtractor
The wrapper itself.
"""
self.model = load_model_params(self.model, 'titlextract', model_uri, ignore_cache=ignore_cache,
device=self.device)
return self
def __call__(self, ref):
"""Run the TitleXtract model.
Parameters
----------
ref : str
Reference to find a title for.
Returns
-------
str
The found title, and none if no title was found.
"""
result = self.model(self.preprocessor.map_text_chars(ref)).argmax(dim=2).cpu()[0].detach().numpy().tolist()
if 1 not in result:
return None
start_pos = result.index(1)
subselection = result[start_pos:]
if 0 in subselection:
length = result[start_pos:].index(0)
title = ref[start_pos:start_pos + length]
else:
title = ref[start_pos:]
return title.strip()
|
py | b4135aca58084cf0a9bcd0717e0f1cd0b68d10e5 | from conans import ConanFile, CMake, tools
import os
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake", "cmake_find_package_multi"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
if not tools.cross_building(self):
with tools.environment_append({"TERM": "xtermc"}):
bin_path = os.path.join("bin", "test_package")
self.run(bin_path, run_environment=True)
|
py | b4135b999eca0455321ed463fa8989bbd822ce2c | #!/usr/bin/env python3
import inspect
import logging
# import gevent
# from gevent import monkey
# monkey.patch_all()
import rpyc
class EchoService(rpyc.Service):
def on_connect(self, conn):
msg = f"on connect service peer name: {conn._channel.stream.sock.getpeername()}"
conn._config["logger"].debug(msg)
def on_disconnect(self, conn):
pass
def exposed_echo(self, message):
if message == "Echo":
return "Echo Reply"
else:
return "Parameter Problem"
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
echo_svc = rpyc.OneShotServer(service=EchoService, port=18861, protocol_config={'allow_all_attrs': True})
echo_svc.start()
|
py | b4135f650dd276f02ea93cc10dd2f9fd99f0b812 | import re
import json
import decimal
import pytest
import mock
from pytest import fixture
from six import BytesIO
from six.moves.BaseHTTPServer import HTTPServer
from chalice import app
from chalice import local, BadRequestError, CORSConfig
from chalice import Response
from chalice import IAMAuthorizer
from chalice import CognitoUserPoolAuthorizer
from chalice.config import Config
from chalice.local import LambdaContext
from chalice.local import LocalARNBuilder
from chalice.local import LocalGateway
from chalice.local import LocalGatewayAuthorizer
from chalice.local import NotAuthorizedError
from chalice.local import ForbiddenError
from chalice.local import InvalidAuthorizerError
from chalice.local import LocalDevServer
AWS_REQUEST_ID_PATTERN = re.compile(
'^[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}$',
re.I)
class FakeTimeSource(object):
def __init__(self, times):
"""Create a fake source of second-precision time.
:type time: List
:param time: List of times that the time source should return in the
order it should return them. These should be in seconds.
"""
self._times = times
def time(self):
"""Get the next time.
This is for mimicing the Clock interface used in local.
"""
time = self._times.pop(0)
return time
class ChaliceStubbedHandler(local.ChaliceRequestHandler):
requestline = ''
request_version = 'HTTP/1.1'
def setup(self):
self.rfile = BytesIO()
self.wfile = BytesIO()
self.requestline = ''
def finish(self):
pass
class CustomSampleChalice(app.Chalice):
def custom_method(self):
return "foo"
@pytest.fixture
def arn_builder():
return LocalARNBuilder()
@pytest.fixture
def lambda_context_args():
# LambdaContext has several positional args before the ones that we
# care about for the timing tests, this gives reasonable defaults for
# those arguments.
return ['lambda_name', 256]
@fixture
def custom_sample_app():
demo = CustomSampleChalice(app_name='custom-demo-app')
demo.debug = True
return demo
@fixture
def sample_app():
demo = app.Chalice('demo-app')
demo.debug = True
@demo.route('/index', methods=['GET'])
def index():
return {'hello': 'world'}
@demo.route('/names/{name}', methods=['GET'])
def name(name):
return {'provided-name': name}
@demo.route('/put', methods=['PUT'])
def put():
return {'body': demo.current_request.json_body}
@demo.route('/cors', methods=['GET', 'PUT'], cors=True)
def cors():
return {'cors': True}
@demo.route('/custom_cors', methods=['GET', 'PUT'], cors=CORSConfig(
allow_origin='https://foo.bar',
allow_headers=['Header-A', 'Header-B'],
expose_headers=['Header-A', 'Header-B'],
max_age=600,
allow_credentials=True
))
def custom_cors():
return {'cors': True}
@demo.route('/cors-enabled-for-one-method', methods=['GET'])
def without_cors():
return {'ok': True}
@demo.route('/cors-enabled-for-one-method', methods=['POST'], cors=True)
def with_cors():
return {'ok': True}
@demo.route('/options', methods=['OPTIONS'])
def options():
return {'options': True}
@demo.route('/delete', methods=['DELETE'])
def delete():
return {'delete': True}
@demo.route('/patch', methods=['PATCH'])
def patch():
return {'patch': True}
@demo.route('/badrequest')
def badrequest():
raise BadRequestError('bad-request')
@demo.route('/decimals')
def decimals():
return decimal.Decimal('100')
@demo.route('/query-string')
def query_string():
return demo.current_request.query_params
@demo.route('/query-string-multi')
def query_string_multi():
params = demo.current_request.query_params
keys = {k: params.getlist(k) for k in params}
return keys
@demo.route('/custom-response')
def custom_response():
return Response(body='text',
status_code=200,
headers={'Content-Type': 'text/plain'})
@demo.route('/binary', methods=['POST'],
content_types=['application/octet-stream'])
def binary_round_trip():
return Response(body=demo.current_request.raw_body,
status_code=200,
headers={'Content-Type': 'application/octet-stream'})
@demo.route('/multi-value-header')
def multi_value_header():
return Response(body={},
status_code=200,
headers={
'Set-Cookie': ['CookieA=ValueA', 'CookieB=ValueB']
})
return demo
@fixture
def demo_app_auth():
demo = app.Chalice('app-name')
def _policy(effect, resource, action='execute-api:Invoke'):
return {
'context': {},
'principalId': 'user',
'policyDocument': {
'Version': '2012-10-17',
'Statement': [
{
'Action': action,
'Effect': effect,
'Resource': resource,
}
]
}
}
@demo.authorizer()
def auth_with_explicit_policy(auth_request):
token = auth_request.token
if token == 'allow':
return _policy(
effect='Allow', resource=[
"arn:aws:execute-api:mars-west-1:123456789012:"
"ymy8tbxw7b/api/GET/explicit"])
else:
return _policy(
effect='Deny', resource=[
"arn:aws:execute-api:mars-west-1:123456789012:"
"ymy8tbxw7b/api/GET/explicit"])
@demo.authorizer()
def demo_authorizer_returns_none(auth_request):
return None
@demo.authorizer()
def auth_with_multiple_actions(auth_request):
return _policy(
effect='Allow', resource=[
"arn:aws:execute-api:mars-west-1:123456789012:"
"ymy8tbxw7b/api/GET/multi"],
action=['execute-api:Invoke', 'execute-api:Other']
)
@demo.authorizer()
def demo_auth(auth_request):
token = auth_request.token
if token == 'allow':
return app.AuthResponse(routes=['/index'], principal_id='user')
else:
return app.AuthResponse(routes=[], principal_id='user')
@demo.authorizer()
def resource_auth(auth_request):
token = auth_request.token
if token == 'allow':
return app.AuthResponse(routes=['/resource/foobar'],
principal_id='user')
else:
return app.AuthResponse(routes=[], principal_id='user')
@demo.authorizer()
def all_auth(auth_request):
token = auth_request.token
if token == 'allow':
return app.AuthResponse(routes=['*'], principal_id='user')
else:
return app.AuthResponse(routes=[], principal_id='user')
@demo.authorizer()
def landing_page_auth(auth_request):
token = auth_request.token
if token == 'allow':
return app.AuthResponse(routes=['/'], principal_id='user')
else:
return app.AuthResponse(routes=[], principal_id='user')
iam_authorizer = IAMAuthorizer()
cognito_authorizer = CognitoUserPoolAuthorizer('app-name', [])
@demo.route('/', authorizer=landing_page_auth)
def landing_view():
return {}
@demo.route('/index', authorizer=demo_auth)
def index_view():
return {}
@demo.route('/secret', authorizer=demo_auth)
def secret_view():
return {}
@demo.route('/resource/{name}', authorizer=resource_auth)
def single_value(name):
return {'resource': name}
@demo.route('/secret/{value}', authorizer=all_auth)
def secret_view_value(value):
return {'secret': value}
@demo.route('/explicit', authorizer=auth_with_explicit_policy)
def explicit():
return {}
@demo.route('/multi', authorizer=auth_with_multiple_actions)
def multi():
return {}
@demo.route('/iam', authorizer=iam_authorizer)
def iam_route():
return {}
@demo.route('/cognito', authorizer=cognito_authorizer)
def cognito_route():
return {}
@demo.route('/none', authorizer=demo_authorizer_returns_none)
def none_auth():
return {}
return demo
@fixture
def handler(sample_app):
config = Config()
chalice_handler = ChaliceStubbedHandler(
None, ('127.0.0.1', 2000), None, app_object=sample_app, config=config)
chalice_handler.sample_app = sample_app
return chalice_handler
@fixture
def auth_handler(demo_app_auth):
config = Config()
chalice_handler = ChaliceStubbedHandler(
None, ('127.0.0.1', 2000), None, app_object=demo_app_auth,
config=config)
chalice_handler.sample_app = demo_app_auth
return chalice_handler
def _get_raw_body_from_response_stream(handler):
# This is going to include things like status code and
# response headers in the raw stream. We just care about the
# body for now so we'll split lines.
raw_response = handler.wfile.getvalue()
body = raw_response.splitlines()[-1]
return body
def _get_body_from_response_stream(handler):
body = _get_raw_body_from_response_stream(handler)
return json.loads(body)
def set_current_request(handler, method, path, headers=None):
if headers is None:
headers = {'content-type': 'application/json'}
handler.command = method
handler.path = path
handler.headers = headers
def test_can_convert_request_handler_to_lambda_event(handler):
set_current_request(handler, method='GET', path='/index')
handler.do_GET()
assert _get_body_from_response_stream(handler) == {'hello': 'world'}
def test_uses_http_11(handler):
set_current_request(handler, method='GET', path='/index')
handler.do_GET()
response_lines = handler.wfile.getvalue().splitlines()
assert b'HTTP/1.1 200 OK' in response_lines
def test_can_route_url_params(handler):
set_current_request(handler, method='GET', path='/names/james')
handler.do_GET()
assert _get_body_from_response_stream(handler) == {
'provided-name': 'james'}
def test_can_route_put_with_body(handler):
body = b'{"foo": "bar"}'
headers = {'content-type': 'application/json',
'content-length': len(body)}
set_current_request(handler, method='PUT', path='/put',
headers=headers)
handler.rfile.write(body)
handler.rfile.seek(0)
handler.do_PUT()
assert _get_body_from_response_stream(handler) == {
'body': {'foo': 'bar'}}
def test_will_respond_with_cors_enabled(handler):
headers = {'content-type': 'application/json', 'origin': 'null'}
set_current_request(handler, method='GET', path='/cors', headers=headers)
handler.do_GET()
response_lines = handler.wfile.getvalue().splitlines()
assert b'Access-Control-Allow-Origin: *' in response_lines
def test_will_respond_with_custom_cors_enabled(handler):
headers = {'content-type': 'application/json', 'origin': 'null'}
set_current_request(handler, method='GET', path='/custom_cors',
headers=headers)
handler.do_GET()
response = handler.wfile.getvalue().splitlines()
assert b'HTTP/1.1 200 OK' in response
assert b'Access-Control-Allow-Origin: https://foo.bar' in response
assert (b'Access-Control-Allow-Headers: Authorization,Content-Type,'
b'Header-A,Header-B,X-Amz-Date,X-Amz-Security-Token,'
b'X-Api-Key') in response
assert b'Access-Control-Expose-Headers: Header-A,Header-B' in response
assert b'Access-Control-Max-Age: 600' in response
assert b'Access-Control-Allow-Credentials: true' in response
def test_will_respond_with_custom_cors_enabled_options(handler):
headers = {'content-type': 'application/json', 'origin': 'null'}
set_current_request(handler, method='OPTIONS', path='/custom_cors',
headers=headers)
handler.do_OPTIONS()
response = handler.wfile.getvalue().decode().splitlines()
assert 'HTTP/1.1 200 OK' in response
assert 'Access-Control-Allow-Origin: https://foo.bar' in response
assert ('Access-Control-Allow-Headers: Authorization,Content-Type,'
'Header-A,Header-B,X-Amz-Date,X-Amz-Security-Token,'
'X-Api-Key') in response
assert 'Access-Control-Expose-Headers: Header-A,Header-B' in response
assert 'Access-Control-Max-Age: 600' in response
assert 'Access-Control-Allow-Credentials: true' in response
assert 'Content-Length: 0' in response
# Ensure that the Access-Control-Allow-Methods header is sent
# and that it sends all the correct methods over.
methods_lines = [line for line in response
if line.startswith('Access-Control-Allow-Methods')]
assert len(methods_lines) == 1
method_line = methods_lines[0]
_, methods_header_value = method_line.split(': ')
methods = methods_header_value.strip().split(',')
assert ['GET', 'OPTIONS', 'PUT'] == sorted(methods)
def test_can_preflight_request(handler):
headers = {'content-type': 'application/json', 'origin': 'null'}
set_current_request(handler, method='OPTIONS', path='/cors',
headers=headers)
handler.do_OPTIONS()
response_lines = handler.wfile.getvalue().splitlines()
assert b'Access-Control-Allow-Origin: *' in response_lines
def test_non_preflight_options_request(handler):
headers = {'content-type': 'application/json', 'origin': 'null'}
set_current_request(handler, method='OPTIONS', path='/options',
headers=headers)
handler.do_OPTIONS()
assert _get_body_from_response_stream(handler) == {'options': True}
def test_preflight_request_should_succeed_even_if_cors_disabled(handler):
headers = {'content-type': 'application/json', 'origin': 'null'}
set_current_request(handler, method='OPTIONS', path='/index',
headers=headers)
handler.do_OPTIONS()
response_lines = handler.wfile.getvalue().splitlines()
assert b'HTTP/1.1 200 OK' in response_lines
def test_preflight_returns_correct_methods_in_access_allow_header(handler):
headers = {'content-type': 'application/json', 'origin': 'null'}
set_current_request(handler, method='OPTIONS',
path='/cors-enabled-for-one-method',
headers=headers)
handler.do_OPTIONS()
response_lines = handler.wfile.getvalue().splitlines()
assert b'HTTP/1.1 200 OK' in response_lines
assert b'Access-Control-Allow-Methods: POST,OPTIONS' in response_lines
def test_errors_converted_to_json_response(handler):
set_current_request(handler, method='GET', path='/badrequest')
handler.do_GET()
assert _get_body_from_response_stream(handler) == {
'Code': 'BadRequestError',
'Message': 'bad-request'
}
def test_can_support_delete_method(handler):
set_current_request(handler, method='DELETE', path='/delete')
handler.do_DELETE()
assert _get_body_from_response_stream(handler) == {'delete': True}
def test_can_support_patch_method(handler):
set_current_request(handler, method='PATCH', path='/patch')
handler.do_PATCH()
assert _get_body_from_response_stream(handler) == {'patch': True}
def test_can_support_decimals(handler):
set_current_request(handler, method='GET', path='/decimals')
handler.do_PATCH()
assert _get_body_from_response_stream(handler) == 100
def test_unsupported_methods_raise_error(handler):
set_current_request(handler, method='POST', path='/index')
handler.do_POST()
assert _get_body_from_response_stream(handler) == {
'Code': 'MethodNotAllowedError',
'Message': 'Unsupported method: POST'
}
def test_can_round_trip_binary(handler):
body = b'\xFE\xED'
set_current_request(
handler, method='POST', path='/binary',
headers={
'content-type': 'application/octet-stream',
'accept': 'application/octet-stream',
'content-length': len(body)
}
)
handler.rfile.write(body)
handler.rfile.seek(0)
handler.do_POST()
response = _get_raw_body_from_response_stream(handler)
assert response == body
def test_querystring_is_mapped(handler):
set_current_request(handler, method='GET', path='/query-string?a=b&c=d')
handler.do_GET()
assert _get_body_from_response_stream(handler) == {'a': 'b', 'c': 'd'}
def test_empty_querystring_is_none(handler):
set_current_request(handler, method='GET', path='/query-string')
handler.do_GET()
assert _get_body_from_response_stream(handler) is None
def test_querystring_list_is_mapped(handler):
set_current_request(
handler,
method='GET', path='/query-string-multi?a=b&c=d&a=c&e='
)
handler.do_GET()
expected = {'a': ['b', 'c'], 'c': ['d'], 'e': ['']}
assert _get_body_from_response_stream(handler) == expected
def test_querystring_undefined_is_mapped_consistent_with_apigateway(handler):
# API Gateway picks up the last element of duplicate keys in a
# querystring
set_current_request(handler, method='GET', path='/query-string?a=b&a=c')
handler.do_GET()
assert _get_body_from_response_stream(handler) == {'a': 'c'}
def test_content_type_included_once(handler):
set_current_request(handler, method='GET', path='/custom-response')
handler.do_GET()
value = handler.wfile.getvalue()
response_lines = value.splitlines()
content_header_lines = [line for line in response_lines
if line.startswith(b'Content-Type')]
assert len(content_header_lines) == 1
def test_can_deny_unauthed_request(auth_handler):
set_current_request(auth_handler, method='GET', path='/index')
auth_handler.do_GET()
value = auth_handler.wfile.getvalue()
response_lines = value.splitlines()
assert b'HTTP/1.1 401 Unauthorized' in response_lines
assert b'x-amzn-ErrorType: UnauthorizedException' in response_lines
assert b'Content-Type: application/json' in response_lines
assert b'{"message":"Unauthorized"}' in response_lines
def test_multi_value_header(handler):
set_current_request(handler, method='GET', path='/multi-value-header')
handler.do_GET()
response = handler.wfile.getvalue().decode().splitlines()
assert 'Set-Cookie: CookieA=ValueA' in response
assert 'Set-Cookie: CookieB=ValueB' in response
@pytest.mark.parametrize('actual_url,matched_url', [
('/foo', '/foo'),
('/foo/', '/foo'),
('/foo/bar', '/foo/bar'),
('/foo/other', '/foo/{capture}'),
('/names/foo', '/names/{capture}'),
('/names/bar', '/names/{capture}'),
('/names/bar/', '/names/{capture}'),
('/names/', None),
('/nomatch', None),
('/names/bar/wrong', None),
('/a/z/c', '/a/{capture}/c'),
('/a/b/c', '/a/b/c'),
])
def test_can_match_exact_route(actual_url, matched_url):
matcher = local.RouteMatcher([
'/foo', '/foo/{capture}', '/foo/bar',
'/names/{capture}',
'/a/{capture}/c', '/a/b/c'
])
if matched_url is not None:
assert matcher.match_route(actual_url).route == matched_url
else:
with pytest.raises(ValueError):
matcher.match_route(actual_url)
def test_lambda_event_contains_source_ip():
converter = local.LambdaEventConverter(
local.RouteMatcher(['/foo/bar']))
event = converter.create_lambda_event(
method='GET',
path='/foo/bar',
headers={'content-type': 'application/json'}
)
source_ip = event.get('requestContext').get('identity').get('sourceIp')
assert source_ip == local.LambdaEventConverter.LOCAL_SOURCE_IP
def test_can_create_lambda_event():
converter = local.LambdaEventConverter(
local.RouteMatcher(['/foo/bar', '/foo/{capture}']))
event = converter.create_lambda_event(
method='GET',
path='/foo/other',
headers={'content-type': 'application/json'}
)
assert event == {
'requestContext': {
'httpMethod': 'GET',
'resourcePath': '/foo/{capture}',
'path': '/foo/other',
'identity': {
'sourceIp': local.LambdaEventConverter.LOCAL_SOURCE_IP
},
},
'headers': {'content-type': 'application/json'},
'pathParameters': {'capture': 'other'},
'multiValueQueryStringParameters': None,
'body': None,
'stageVariables': {},
}
def test_parse_query_string():
converter = local.LambdaEventConverter(
local.RouteMatcher(['/foo/bar', '/foo/{capture}']))
event = converter.create_lambda_event(
method='GET',
path='/foo/other?a=1&b=&c=3',
headers={'content-type': 'application/json'}
)
assert event == {
'requestContext': {
'httpMethod': 'GET',
'resourcePath': '/foo/{capture}',
'path': '/foo/other',
'identity': {
'sourceIp': local.LambdaEventConverter.LOCAL_SOURCE_IP
},
},
'headers': {'content-type': 'application/json'},
'pathParameters': {'capture': 'other'},
'multiValueQueryStringParameters': {'a': ['1'], 'b': [''], 'c': ['3']},
'body': None,
'stageVariables': {},
}
def test_can_create_lambda_event_for_put_request():
converter = local.LambdaEventConverter(
local.RouteMatcher(['/foo/bar', '/foo/{capture}']))
event = converter.create_lambda_event(
method='PUT',
path='/foo/other',
headers={'content-type': 'application/json'},
body='{"foo": "bar"}',
)
assert event == {
'requestContext': {
'httpMethod': 'PUT',
'resourcePath': '/foo/{capture}',
'path': '/foo/other',
'identity': {
'sourceIp': local.LambdaEventConverter.LOCAL_SOURCE_IP
},
},
'headers': {'content-type': 'application/json'},
'pathParameters': {'capture': 'other'},
'multiValueQueryStringParameters': None,
'body': '{"foo": "bar"}',
'stageVariables': {},
}
def test_can_create_lambda_event_for_post_with_formencoded_body():
converter = local.LambdaEventConverter(
local.RouteMatcher(['/foo/bar', '/foo/{capture}']))
form_body = 'foo=bar&baz=qux'
event = converter.create_lambda_event(
method='POST',
path='/foo/other',
headers={'content-type': 'application/x-www-form-urlencoded'},
body=form_body,
)
assert event == {
'requestContext': {
'httpMethod': 'POST',
'resourcePath': '/foo/{capture}',
'path': '/foo/other',
'identity': {
'sourceIp': local.LambdaEventConverter.LOCAL_SOURCE_IP
},
},
'headers': {'content-type': 'application/x-www-form-urlencoded'},
'pathParameters': {'capture': 'other'},
'multiValueQueryStringParameters': None,
'body': form_body,
'stageVariables': {},
}
def test_can_provide_port_to_local_server(sample_app):
dev_server = local.create_local_server(sample_app, None, '127.0.0.1',
port=23456)
assert dev_server.server.server_port == 23456
def test_can_provide_host_to_local_server(sample_app):
dev_server = local.create_local_server(sample_app, None, host='0.0.0.0',
port=23456)
assert dev_server.host == '0.0.0.0'
def test_wraps_custom_sample_app_with_local_chalice(custom_sample_app):
dev_server = local.create_local_server(custom_sample_app, None,
host='0.0.0.0', port=23456)
assert isinstance(dev_server.app_object, local.LocalChalice)
assert isinstance(dev_server.app_object, custom_sample_app.__class__)
assert dev_server.app_object.custom_method() == 'foo'
class TestLambdaContext(object):
def test_can_get_remaining_time_once(self, lambda_context_args):
time_source = FakeTimeSource([0, 5])
context = LambdaContext(*lambda_context_args, max_runtime_ms=10000,
time_source=time_source)
time_remaining = context.get_remaining_time_in_millis()
assert time_remaining == 5000
def test_can_get_remaining_time_multiple(self, lambda_context_args):
time_source = FakeTimeSource([0, 3, 7, 9])
context = LambdaContext(*lambda_context_args, max_runtime_ms=10000,
time_source=time_source)
time_remaining = context.get_remaining_time_in_millis()
assert time_remaining == 7000
time_remaining = context.get_remaining_time_in_millis()
assert time_remaining == 3000
time_remaining = context.get_remaining_time_in_millis()
assert time_remaining == 1000
def test_does_populate_aws_request_id_with_valid_uuid(self,
lambda_context_args):
context = LambdaContext(*lambda_context_args)
assert AWS_REQUEST_ID_PATTERN.match(context.aws_request_id)
def test_does_set_version_to_latest(self, lambda_context_args):
context = LambdaContext(*lambda_context_args)
assert context.function_version == '$LATEST'
class TestLocalGateway(object):
def test_can_invoke_function(self):
demo = app.Chalice('app-name')
@demo.route('/')
def index_view():
return {'foo': 'bar'}
gateway = LocalGateway(demo, Config())
response = gateway.handle_request('GET', '/', {}, '')
body = json.loads(response['body'])
assert body['foo'] == 'bar'
def test_does_populate_context(self):
demo = app.Chalice('app-name')
@demo.route('/context')
def context_view():
context = demo.lambda_context
return {
'name': context.function_name,
'memory': context.memory_limit_in_mb,
'version': context.function_version,
'timeout': context.get_remaining_time_in_millis(),
'request_id': context.aws_request_id,
}
disk_config = {
'lambda_timeout': 10,
'lambda_memory_size': 256,
}
config = Config(chalice_stage='api', config_from_disk=disk_config)
gateway = LocalGateway(demo, config)
response = gateway.handle_request('GET', '/context', {}, '')
body = json.loads(response['body'])
assert body['name'] == 'api_handler'
assert body['memory'] == 256
assert body['version'] == '$LATEST'
assert body['timeout'] > 10
assert body['timeout'] <= 10000
assert AWS_REQUEST_ID_PATTERN.match(body['request_id'])
def test_defaults_timeout_if_needed(self):
demo = app.Chalice('app-name')
@demo.route('/context')
def context_view():
context = demo.lambda_context
return {
'remaining': context.get_remaining_time_in_millis(),
}
disk_config = {}
config = Config(chalice_stage='api', config_from_disk=disk_config)
gateway = LocalGateway(demo, config)
response = gateway.handle_request('GET', '/context', {}, '')
body = json.loads(response['body'])
assert body['remaining'] <= gateway.MAX_LAMBDA_EXECUTION_TIME * 1000
def test_can_validate_route_with_variables(self, demo_app_auth):
gateway = LocalGateway(demo_app_auth, Config())
response = gateway.handle_request(
'GET', '/secret/foobar', {'Authorization': 'allow'}, '')
json_body = json.loads(response['body'])
assert json_body['secret'] == 'foobar'
def test_can_allow_route_with_variables(self, demo_app_auth):
gateway = LocalGateway(demo_app_auth, Config())
response = gateway.handle_request(
'GET', '/resource/foobar', {'Authorization': 'allow'}, '')
json_body = json.loads(response['body'])
assert json_body['resource'] == 'foobar'
def test_does_send_500_when_authorizer_returns_none(self, demo_app_auth):
gateway = LocalGateway(demo_app_auth, Config())
with pytest.raises(InvalidAuthorizerError):
gateway.handle_request(
'GET', '/none', {'Authorization': 'foobarbaz'}, '')
def test_can_deny_route_with_variables(self, demo_app_auth):
gateway = LocalGateway(demo_app_auth, Config())
with pytest.raises(ForbiddenError):
gateway.handle_request(
'GET', '/resource/foobarbaz', {'Authorization': 'allow'}, '')
def test_does_deny_unauthed_request(self, demo_app_auth):
gateway = LocalGateway(demo_app_auth, Config())
with pytest.raises(ForbiddenError) as ei:
gateway.handle_request(
'GET', '/index', {'Authorization': 'deny'}, '')
exception_body = str(ei.value.body)
assert ('{"Message": '
'"User is not authorized to '
'access this resource"}') in exception_body
def test_does_throw_unauthorized_when_no_auth_token_present_on_valid_route(
self, demo_app_auth):
gateway = LocalGateway(demo_app_auth, Config())
with pytest.raises(NotAuthorizedError) as ei:
gateway.handle_request(
'GET', '/index', {}, '')
exception_body = str(ei.value.body)
assert '{"message":"Unauthorized"}' in exception_body
def test_does_deny_with_forbidden_when_route_not_found(
self, demo_app_auth):
gateway = LocalGateway(demo_app_auth, Config())
with pytest.raises(ForbiddenError) as ei:
gateway.handle_request('GET', '/badindex', {}, '')
exception_body = str(ei.value.body)
assert 'Missing Authentication Token' in exception_body
def test_does_deny_with_forbidden_when_auth_token_present(
self, demo_app_auth):
gateway = LocalGateway(demo_app_auth, Config())
with pytest.raises(ForbiddenError) as ei:
gateway.handle_request('GET', '/badindex',
{'Authorization': 'foobar'}, '')
# The message should be a more complicated error message to do with
# signing the request. It always ends with the Authorization token
# that we passed up, so we can check for that.
exception_body = str(ei.value.body)
assert 'Authorization=foobar' in exception_body
class TestLocalBuiltinAuthorizers(object):
def test_can_authorize_empty_path(self, lambda_context_args,
demo_app_auth, create_event):
# Ensures that / routes work since that is a special case in the
# API Gateway arn generation where an extra / is appended to the end
# of the arn.
authorizer = LocalGatewayAuthorizer(demo_app_auth)
path = '/'
event = create_event(path, 'GET', {})
event['headers']['authorization'] = 'allow'
context = LambdaContext(*lambda_context_args)
event, context = authorizer.authorize(path, event, context)
assert event['requestContext']['authorizer']['principalId'] == 'user'
def test_can_call_method_without_auth(self, lambda_context_args,
create_event):
demo = app.Chalice('app-name')
@demo.route('/index')
def index_view():
return {}
path = '/index'
authorizer = LocalGatewayAuthorizer(demo)
original_event = create_event(path, 'GET', {})
original_context = LambdaContext(*lambda_context_args)
event, context = authorizer.authorize(
path, original_event, original_context)
# Assert that when the authorizer.authorize is called and there is no
# authorizer defined for a particular route that it is a noop.
assert original_event == event
assert original_context == context
def test_does_raise_not_authorized_error(self, demo_app_auth,
lambda_context_args,
create_event):
authorizer = LocalGatewayAuthorizer(demo_app_auth)
path = '/index'
event = create_event(path, 'GET', {})
context = LambdaContext(*lambda_context_args)
with pytest.raises(NotAuthorizedError):
authorizer.authorize(path, event, context)
def test_does_authorize_valid_requests(self, demo_app_auth,
lambda_context_args, create_event):
authorizer = LocalGatewayAuthorizer(demo_app_auth)
path = '/index'
event = create_event(path, 'GET', {})
event['headers']['authorization'] = 'allow'
context = LambdaContext(*lambda_context_args)
event, context = authorizer.authorize(path, event, context)
assert event['requestContext']['authorizer']['principalId'] == 'user'
def test_does_authorize_unsupported_authorizer(self, demo_app_auth,
lambda_context_args,
create_event):
authorizer = LocalGatewayAuthorizer(demo_app_auth)
path = '/iam'
event = create_event(path, 'GET', {})
context = LambdaContext(*lambda_context_args)
with pytest.warns(None) as recorded_warnings:
new_event, new_context = authorizer.authorize(path, event, context)
assert event == new_event
assert context == new_context
assert len(recorded_warnings) == 1
warning = recorded_warnings[0]
assert issubclass(warning.category, UserWarning)
assert ('IAMAuthorizer is not a supported in local '
'mode. All requests made against a route will be authorized'
' to allow local testing.') in str(warning.message)
def test_cannot_access_view_without_permission(self, demo_app_auth,
lambda_context_args,
create_event):
authorizer = LocalGatewayAuthorizer(demo_app_auth)
path = '/secret'
event = create_event(path, 'GET', {})
event['headers']['authorization'] = 'allow'
context = LambdaContext(*lambda_context_args)
with pytest.raises(ForbiddenError):
authorizer.authorize(path, event, context)
def test_can_understand_explicit_auth_policy(self, demo_app_auth,
lambda_context_args,
create_event):
authorizer = LocalGatewayAuthorizer(demo_app_auth)
path = '/explicit'
event = create_event(path, 'GET', {})
event['headers']['authorization'] = 'allow'
context = LambdaContext(*lambda_context_args)
event, context = authorizer.authorize(path, event, context)
assert event['requestContext']['authorizer']['principalId'] == 'user'
def test_can_understand_explicit_deny_policy(self, demo_app_auth,
lambda_context_args,
create_event):
# Our auto-generated policies from the AuthResponse object do not
# contain any Deny clauses, however we also allow the user to return
# a dictionary that is transated into a policy, so we have to
# account for the ability for a user to set an explicit deny policy.
# It should behave exactly as not getting permission added with an
# allow.
authorizer = LocalGatewayAuthorizer(demo_app_auth)
path = '/explicit'
event = create_event(path, 'GET', {})
context = LambdaContext(*lambda_context_args)
with pytest.raises(NotAuthorizedError):
authorizer.authorize(path, event, context)
def test_can_understand_multi_actions(self, demo_app_auth,
lambda_context_args,
create_event):
authorizer = LocalGatewayAuthorizer(demo_app_auth)
path = '/multi'
event = create_event(path, 'GET', {})
event['headers']['authorization'] = 'allow'
context = LambdaContext(*lambda_context_args)
event, context = authorizer.authorize(path, event, context)
assert event['requestContext']['authorizer']['principalId'] == 'user'
def test_can_understand_cognito_token(self, lambda_context_args,
demo_app_auth, create_event):
# Ensures that / routes work since that is a special case in the
# API Gateway arn generation where an extra / is appended to the end
# of the arn.
authorizer = LocalGatewayAuthorizer(demo_app_auth)
path = '/cognito'
event = create_event(path, 'GET', {})
event["headers"]["authorization"] = "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJhYWFhYWFhYS1iYmJiLWNjY2MtZGRkZC1lZWVlZWVlZWVlZWUiLCJhdWQiOiJ4eHh4eHh4eHh4eHhleGFtcGxlIiwiZW1haWxfdmVyaWZpZWQiOnRydWUsInRva2VuX3VzZSI6ImlkIiwiYXV0aF90aW1lIjoxNTAwMDA5NDAwLCJpc3MiOiJodHRwczovL2NvZ25pdG8taWRwLnVzLWVhc3QtMS5hbWF6b25hd3MuY29tL3VzLWVhc3QtMV9leGFtcGxlIiwiY29nbml0bzp1c2VybmFtZSI6ImphbmVkb2UiLCJleHAiOjE1ODQ3MjM2MTYsImdpdmVuX25hbWUiOiJKYW5lIiwiaWF0IjoxNTAwMDA5NDAwLCJlbWFpbCI6ImphbmVkb2VAZXhhbXBsZS5jb20iLCJqdGkiOiJkN2UxMTMzYS0xZTNhLTQyMzEtYWU3Yi0yOGQ4NWVlMGIxNGQifQ.p35Yj9KJD5RbfPWGL08IJHgson8BhdGLPQqUOiF0-KM" # noqa
context = LambdaContext(*lambda_context_args)
event, context = authorizer.authorize(path, event, context)
principal_id = event['requestContext']['authorizer']['principalId']
assert principal_id == 'janedoe'
def test_does_authorize_unsupported_cognito_token(self,
lambda_context_args,
demo_app_auth,
create_event):
authorizer = LocalGatewayAuthorizer(demo_app_auth)
path = '/cognito'
event = create_event(path, 'GET', {})
event["headers"]["authorization"] = "eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJzdWIiOiJhYWFhYWFhYS1iYmJiLWNjY2MtZGRkZC1lZWVlZWVlZWVlZWUiLCJhdWQiOiJ4eHh4eHh4eHh4eHhleGFtcGxlIiwiZW1haWxfdmVyaWZpZWQiOnRydWUsInRva2VuX3VzZSI6ImlkIiwiYXV0aF90aW1lIjoxNTAwMDA5NDAwLCJpc3MiOiJodHRwczovL2NvZ25pdG8taWRwLnVzLWVhc3QtMS5hbWF6b25hd3MuY29tL3VzLWVhc3QtMV9leGFtcGxlIiwiZXhwIjoxNTg0NzIzNjE2LCJnaXZlbl9uYW1lIjoiSmFuZSIsImlhdCI6MTUwMDAwOTQwMCwiZW1haWwiOiJqYW5lZG9lQGV4YW1wbGUuY29tIiwianRpIjoiZDdlMTEzM2EtMWUzYS00MjMxLWFlN2ItMjhkODVlZTBiMTRkIn0.SN5n-A3kxboNYg0sGIOipVUksCdn6xRJmAK9kSZof10" # noqa
context = LambdaContext(*lambda_context_args)
with pytest.warns(None) as recorded_warnings:
new_event, new_context = authorizer.authorize(path, event, context)
assert event == new_event
assert context == new_context
assert len(recorded_warnings) == 1
warning = recorded_warnings[0]
assert issubclass(warning.category, UserWarning)
assert ('CognitoUserPoolAuthorizer for machine-to-machine '
'communicaiton is not supported in local mode. All requests '
'made against a route will be authorized to allow local '
'testing.') in str(warning.message)
class TestArnBuilder(object):
def test_can_create_basic_arn(self, arn_builder):
arn = ('arn:aws:execute-api:mars-west-1:123456789012:ymy8tbxw7b'
'/api/GET/resource')
built_arn = arn_builder.build_arn('GET', '/resource')
assert arn == built_arn
def test_can_create_root_arn(self, arn_builder):
arn = ('arn:aws:execute-api:mars-west-1:123456789012:ymy8tbxw7b'
'/api/GET//')
built_arn = arn_builder.build_arn('GET', '/')
assert arn == built_arn
def test_can_create_multi_part_arn(self, arn_builder):
arn = ('arn:aws:execute-api:mars-west-1:123456789012:ymy8tbxw7b'
'/api/GET/path/to/resource')
built_arn = arn_builder.build_arn('GET', '/path/to/resource')
assert arn == built_arn
def test_can_create_glob_method_arn(self, arn_builder):
arn = ('arn:aws:execute-api:mars-west-1:123456789012:ymy8tbxw7b'
'/api/*/resource')
built_arn = arn_builder.build_arn('*', '/resource')
assert arn == built_arn
def test_build_arn_with_query_params(self, arn_builder):
arn = ('arn:aws:execute-api:mars-west-1:123456789012:ymy8tbxw7b/api/'
'*/resource')
built_arn = arn_builder.build_arn('*', '/resource?foo=bar')
assert arn == built_arn
@pytest.mark.parametrize('arn,pattern', [
('mars-west-2:123456789012:ymy8tbxw7b/api/GET/foo',
'mars-west-2:123456789012:ymy8tbxw7b/api/GET/foo'
),
('mars-west-1:123456789012:ymy8tbxw7b/api/GET/foobar',
'mars-west-1:123456789012:ymy8tbxw7b/api/GET/*'
),
('mars-west-1:123456789012:ymy8tbxw7b/api/PUT/foobar',
'mars-west-1:123456789012:ymy8tbxw7b/api/???/foobar'
),
('mars-west-1:123456789012:ymy8tbxw7b/api/GET/foobar',
'mars-west-1:123456789012:ymy8tbxw7b/api/???/*'
),
('mars-west-1:123456789012:ymy8tbxw7b/api/GET/foobar',
'mars-west-1:123456789012:*/api/GET/*'
),
('mars-west-2:123456789012:ymy8tbxw7b/api/GET/foobar',
'*'
),
('mars-west-2:123456789012:ymy8tbxw7b/api/GET/foo.bar',
'mars-west-2:123456789012:ymy8tbxw7b/*/GET/*')
])
def test_can_allow_route_arns(arn, pattern):
prefix = 'arn:aws:execute-api:'
full_arn = '%s%s' % (prefix, arn)
full_pattern = '%s%s' % (prefix, pattern)
matcher = local.ARNMatcher(full_arn)
does_match = matcher.does_any_resource_match([full_pattern])
assert does_match is True
@pytest.mark.parametrize('arn,pattern', [
('mars-west-1:123456789012:ymy8tbxw7b/api/GET/foobar',
'mars-west-1:123456789012:ymy8tbxw7b/api/PUT/*'
),
('mars-west-1:123456789012:ymy8tbxw7b/api/GET/foobar',
'mars-west-1:123456789012:ymy8tbxw7b/api/??/foobar'
),
('mars-west-1:123456789012:ymy8tbxw7b/api/GET/foobar',
'mars-west-2:123456789012:ymy8tbxw7b/api/???/*'
),
('mars-west-2:123456789012:ymy8tbxw7b/api/GET/foobar',
'mars-west-2:123456789012:ymy8tbxw7b/*/GET/foo...')
])
def test_can_deny_route_arns(arn, pattern):
prefix = 'arn:aws:execute-api:'
full_arn = '%s%s' % (prefix, arn)
full_pattern = '%s%s' % (prefix, pattern)
matcher = local.ARNMatcher(full_arn)
does_match = matcher.does_any_resource_match([full_pattern])
assert does_match is False
@pytest.mark.parametrize('arn,patterns', [
('mars-west-1:123456789012:ymy8tbxw7b/api/GET/foobar',
[
'mars-west-1:123456789012:ymy8tbxw7b/api/PUT/*',
'mars-west-1:123456789012:ymy8tbxw7b/api/GET/foobar'
]),
('mars-west-1:123456789012:ymy8tbxw7b/api/GET/foobar',
[
'mars-west-1:123456789012:ymy8tbxw7b/api/GET/foobar',
'mars-west-1:123456789012:ymy8tbxw7b/api/PUT/*'
]),
('mars-west-1:123456789012:ymy8tbxw7b/api/GET/foobar',
[
'mars-west-1:123456789012:ymy8tbxw7b/api/PUT/foobar',
'*'
])
])
def test_can_allow_multiple_resource_arns(arn, patterns):
prefix = 'arn:aws:execute-api:'
full_arn = '%s%s' % (prefix, arn)
full_patterns = ['%s%s' % (prefix, pattern) for pattern in patterns]
matcher = local.ARNMatcher(full_arn)
does_match = matcher.does_any_resource_match(full_patterns)
assert does_match is True
@pytest.mark.parametrize('arn,patterns', [
('mars-west-1:123456789012:ymy8tbxw7b/api/GET/foobar',
[
'mars-west-1:123456789012:ymy8tbxw7b/api/POST/*',
'mars-west-1:123456789012:ymy8tbxw7b/api/PUT/foobar'
]),
('mars-west-1:123456789012:ymy8tbxw7b/api/GET/foobar',
[
'mars-west-2:123456789012:ymy8tbxw7b/api/GET/foobar',
'mars-west-2:123456789012:ymy8tbxw7b/api/*/*'
])
])
def test_can_deny_multiple_resource_arns(arn, patterns):
prefix = 'arn:aws:execute-api:'
full_arn = '%s%s' % (prefix, arn)
full_patterns = ['%s%s' % (prefix, pattern) for pattern in patterns]
matcher = local.ARNMatcher(full_arn)
does_match = matcher.does_any_resource_match(full_patterns)
assert does_match is False
class TestLocalDevServer(object):
def test_can_delegate_to_server(self, sample_app):
http_server = mock.Mock(spec=HTTPServer)
dev_server = LocalDevServer(
sample_app, Config(), '0.0.0.0', 8000,
server_cls=lambda *args: http_server,
)
dev_server.handle_single_request()
http_server.handle_request.assert_called_with()
dev_server.serve_forever()
http_server.serve_forever.assert_called_with()
def test_host_and_port_forwarded_to_server_creation(self, sample_app):
provided_args = []
def args_recorder(*args):
provided_args[:] = list(args)
LocalDevServer(
sample_app, Config(), '0.0.0.0', 8000,
server_cls=args_recorder,
)
assert provided_args[0] == ('0.0.0.0', 8000)
def test_does_use_daemon_threads(self, sample_app):
server = LocalDevServer(
sample_app, Config(), '0.0.0.0', 8000
)
assert server.server.daemon_threads
|
py | b4135f8f01cc401db6d0408c7b360d382993776a | import collections.abc as cabc
from typing import Union, Optional, Sequence, Any, Mapping, List, Tuple, Callable
import numpy as np
from anndata import AnnData
from cycler import Cycler
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from pandas.api.types import is_categorical_dtype
from matplotlib import pyplot as pl
from matplotlib import rcParams
from matplotlib import patheffects
from matplotlib.colors import Colormap
from .. import _utils
from .._utils import _IGraphLayout, _FontWeight, _FontSize
from .._docs import doc_adata_color_etc, doc_edges_arrows, doc_scatter_embedding, doc_show_save_ax
from ... import logging as logg
from ..._settings import settings
from ..._utils import sanitize_anndata, _doc_params
from ..._compat import Literal
VMinMax = Union[str, float, Callable[[Sequence[float]], float]]
@_doc_params(adata_color_etc=doc_adata_color_etc, edges_arrows=doc_edges_arrows, scatter_bulk=doc_scatter_embedding, show_save_ax=doc_show_save_ax)
def embedding(
adata: AnnData,
basis: str,
*,
color: Union[str, Sequence[str], None] = None,
gene_symbols: Optional[str] = None,
use_raw: Optional[bool] = None,
sort_order: bool = True,
edges: bool = False,
edges_width: float = 0.1,
edges_color: Union[str, Sequence[float], Sequence[str]] = 'grey',
arrows: bool = False,
arrows_kwds: Optional[Mapping[str, Any]] = None,
groups: Optional[str] = None,
components: Union[str, Sequence[str]] = None,
layer: Optional[str] = None,
projection: Literal['2d', '3d'] = '2d',
color_map: Union[Colormap, str, None] = None,
palette: Union[str, Sequence[str], Cycler, None] = None,
size: Union[float, Sequence[float], None] = None,
frameon: Optional[bool] = None,
legend_fontsize: Union[int, float, _FontSize, None] = None,
legend_fontweight: Union[int, _FontWeight] = 'bold',
legend_loc: str = 'right margin',
legend_fontoutline: Optional[int] = None,
vmax: Union[VMinMax, Sequence[VMinMax], None] = None,
vmin: Union[VMinMax, Sequence[VMinMax], None] = None,
add_outline: Optional[bool] = False,
outline_width: Tuple[float, float] = (0.3, 0.05),
outline_color: Tuple[str, str] = ('black', 'white'),
ncols: int = 4,
hspace: float = 0.25,
wspace: Optional[float] = None,
title: Union[str, Sequence[str], None] = None,
show: Optional[bool] = None,
save: Union[bool, str, None] = None,
ax: Optional[Axes] = None,
return_fig: Optional[bool] = None,
**kwargs,
) -> Union[Figure, Axes, None]:
"""\
Scatter plot for user specified embedding basis (e.g. umap, pca, etc)
Parameters
----------
basis
Name of the `obsm` basis to use.
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
sanitize_anndata(adata)
if color_map is not None:
kwargs['cmap'] = color_map
if size is not None:
kwargs['s'] = size
if 'edgecolor' not in kwargs:
# by default turn off edge color. Otherwise, for
# very small sizes the edge will not reduce its size
# (https://github.com/theislab/scanpy/issues/293)
kwargs['edgecolor'] = 'none'
if groups:
if isinstance(groups, str):
groups = [groups]
if projection == '3d':
from mpl_toolkits.mplot3d import Axes3D
args_3d = {'projection': '3d'}
else:
args_3d = {}
# Deal with Raw
if use_raw is None:
# check if adata.raw is set
use_raw = layer is None and adata.raw is not None
if use_raw and layer is not None:
raise ValueError(
"Cannot use both a layer and the raw representation. Was passed:"
f"use_raw={use_raw}, layer={layer}."
)
if wspace is None:
# try to set a wspace that is not too large or too small given the
# current figure size
wspace = 0.75 / rcParams['figure.figsize'][0] + 0.02
if adata.raw is None and use_raw:
raise ValueError(
"`use_raw` is set to True but AnnData object does not have raw. "
"Please check."
)
# turn color into a python list
color = [color] if isinstance(color, str) or color is None else list(color)
if title is not None:
# turn title into a python list if not None
title = [title] if isinstance(title, str) else list(title)
# get the points position and the components list
# (only if components is not None)
data_points, components_list = _get_data_points(adata, basis, projection, components)
# Setup layout.
# Most of the code is for the case when multiple plots are required
# 'color' is a list of names that want to be plotted.
# Eg. ['Gene1', 'louvain', 'Gene2'].
# component_list is a list of components [[0,1], [1,2]]
if (
(
not isinstance(color, str)
and isinstance(color, cabc.Sequence)
and len(color) > 1
) or len(components_list) > 1
):
if ax is not None:
raise ValueError(
"Cannot specify `ax` when plotting multiple panels "
"(each for a given value of 'color')."
)
if len(components_list) == 0:
components_list = [None]
# each plot needs to be its own panel
num_panels = len(color) * len(components_list)
fig, grid = _panel_grid(hspace, wspace, ncols, num_panels)
else:
if len(components_list) == 0:
components_list = [None]
grid = None
if ax is None:
fig = pl.figure()
ax = fig.add_subplot(111, **args_3d)
# turn vmax and vmin into a sequence
if isinstance(vmax, str) or not isinstance(vmax, cabc.Sequence):
vmax = [vmax]
if isinstance(vmin, str) or not isinstance(vmin, cabc.Sequence):
vmin = [vmin]
if 's' in kwargs:
size = kwargs.pop('s')
if size is not None:
# check if size is any type of sequence, and if so
# set as ndarray
import pandas.core.series
if (
size is not None
and isinstance(size, (
cabc.Sequence,
pandas.core.series.Series,
np.ndarray,
))
and len(size) == adata.shape[0]
):
size = np.array(size, dtype=float)
else:
size = 120000 / adata.shape[0]
###
# make the plots
axs = []
import itertools
idx_components = range(len(components_list))
# use itertools.product to make a plot for each color and for each component
# For example if color=[gene1, gene2] and components=['1,2, '2,3'].
# The plots are: [
# color=gene1, components=[1,2], color=gene1, components=[2,3],
# color=gene2, components = [1, 2], color=gene2, components=[2,3],
# ]
for count, (value_to_plot, component_idx) in enumerate(itertools.product(color, idx_components)):
color_vector, categorical = _get_color_values(
adata, value_to_plot, layer=layer,
groups=groups, palette=palette,
use_raw=use_raw, gene_symbols=gene_symbols,
)
# check if higher value points should be plot on top
if sort_order is True and value_to_plot is not None and categorical is False:
order = np.argsort(color_vector)
color_vector = color_vector[order]
_data_points = data_points[component_idx][order, :]
# check if 'size' is given (stored in kwargs['s']
# and reorder it.
if isinstance(size, np.ndarray):
size = np.array(size)[order]
else:
_data_points = data_points[component_idx]
# if plotting multiple panels, get the ax from the grid spec
# else use the ax value (either user given or created previously)
if grid:
ax = pl.subplot(grid[count], **args_3d)
axs.append(ax)
if not (settings._frameon if frameon is None else frameon):
ax.axis('off')
if title is None:
if value_to_plot is not None:
ax.set_title(value_to_plot)
else:
ax.set_title('')
else:
try:
ax.set_title(title[count])
except IndexError:
logg.warning(
"The title list is shorter than the number of panels. "
"Using 'color' value instead for some plots."
)
ax.set_title(value_to_plot)
# check vmin and vmax options
if categorical:
kwargs['vmin'] = kwargs['vmax'] = None
else:
kwargs['vmin'], kwargs['vmax'] = _get_vmin_vmax(vmin, vmax, count, color_vector)
# make the scatter plot
if projection == '3d':
cax = ax.scatter(
_data_points[:, 0], _data_points[:, 1], _data_points[:, 2],
marker=".", c=color_vector, rasterized=settings._vector_friendly,
**kwargs,
)
else:
if add_outline:
# the default outline is a black edge followed by a
# thin white edged added around connected clusters.
# To add an outline
# three overlapping scatter plots are drawn:
# First black dots with slightly larger size,
# then, white dots a bit smaller, but still larger
# than the final dots. Then the final dots are drawn
# with some transparency.
bg_width, gap_width = outline_width
point = np.sqrt(size)
gap_size = (point + (point * gap_width)*2)**2
bg_size = (np.sqrt(gap_size) + (point * bg_width)*2)**2
# the default black and white colors can be changes using
# the contour_config parameter
bg_color, gap_color = outline_color
# remove edge from kwargs if present
# because edge needs to be set to None
kwargs['edgecolor'] = 'none'
# remove alpha for outline
alpha = kwargs.pop('alpha') if 'alpha' in kwargs else None
ax.scatter(
_data_points[:, 0], _data_points[:, 1], s=bg_size,
marker=".", c=bg_color, rasterized=settings._vector_friendly,
**kwargs)
ax.scatter(
_data_points[:, 0], _data_points[:, 1], s=gap_size,
marker=".", c=gap_color, rasterized=settings._vector_friendly,
**kwargs)
# if user did not set alpha, set alpha to 0.7
kwargs['alpha'] = 0.7 if alpha is None else alpha
if groups:
# first plot non-groups and then plot the
# required groups on top
in_groups = np.array(adata.obs[value_to_plot].isin(groups))
if isinstance(size, np.ndarray):
in_groups_size = size[in_groups]
not_in_groups_size = size[~in_groups]
else:
in_groups_size = not_in_groups_size = size
ax.scatter(
_data_points[~in_groups, 0],
_data_points[~in_groups, 1],
s=not_in_groups_size,
marker=".",
c=color_vector[~in_groups],
rasterized=settings._vector_friendly,
**kwargs,
)
cax = ax.scatter(
_data_points[in_groups, 0],
_data_points[in_groups, 1],
s=in_groups_size,
marker=".",
c=color_vector[in_groups],
rasterized=settings._vector_friendly,
**kwargs,
)
else:
cax = ax.scatter(
_data_points[:, 0],
_data_points[:, 1],
s=size,
marker=".",
c=color_vector,
rasterized=settings._vector_friendly,
**kwargs,
)
# remove y and x ticks
ax.set_yticks([])
ax.set_xticks([])
if projection == '3d':
ax.set_zticks([])
# set default axis_labels
name = _basis2name(basis)
if components is not None:
axis_labels = [name + str(x + 1) for x in components_list[component_idx]]
elif projection == '3d':
axis_labels = [name + str(x + 1) for x in range(3)]
else:
axis_labels = [name + str(x + 1) for x in range(2)]
ax.set_xlabel(axis_labels[0])
ax.set_ylabel(axis_labels[1])
if projection == '3d':
# shift the label closer to the axis
ax.set_zlabel(axis_labels[2], labelpad=-7)
ax.autoscale_view()
if edges:
_utils.plot_edges(ax, adata, basis, edges_width, edges_color)
if arrows:
_utils.plot_arrows(ax, adata, basis, arrows_kwds)
if value_to_plot is None:
# if only dots were plotted without an associated value
# there is not need to plot a legend or a colorbar
continue
if legend_fontoutline is not None:
path_effect = [patheffects.withStroke(
linewidth=legend_fontoutline,
foreground='w',
)]
else:
path_effect = None
_add_legend_or_colorbar(
adata, ax, cax, categorical, value_to_plot, legend_loc,
_data_points, legend_fontweight, legend_fontsize, path_effect,
groups, bool(grid),
)
if return_fig is True:
return fig
axs = axs if grid else ax
_utils.savefig_or_show(basis, show=show, save=save)
if show is False:
return axs
def _panel_grid(hspace, wspace, ncols, num_panels):
from matplotlib import gridspec
n_panels_x = min(ncols, num_panels)
n_panels_y = np.ceil(num_panels / n_panels_x).astype(int)
# each panel will have the size of rcParams['figure.figsize']
fig = pl.figure(figsize=(
n_panels_x * rcParams['figure.figsize'][0] * (1 + wspace),
n_panels_y * rcParams['figure.figsize'][1]),
)
left = 0.2 / n_panels_x
bottom = 0.13 / n_panels_y
gs = gridspec.GridSpec(
nrows=n_panels_y, ncols=n_panels_x,
left=left, right=1 - (n_panels_x - 1) * left - 0.01 / n_panels_x,
bottom=bottom, top=1 - (n_panels_y - 1) * bottom - 0.1 / n_panels_y,
hspace=hspace, wspace=wspace,
)
return fig, gs
def _get_vmin_vmax(
vmin: Sequence[VMinMax],
vmax: Sequence[VMinMax],
index: int,
color_vector: Sequence[float],
) -> Tuple[Union[float, None], Union[float, None]]:
"""
Evaluates the value of vmin and vmax, which could be a
str in which case is interpreted as a percentile and should
be specified in the form 'pN' where N is the percentile.
Eg. for a percentile of 85 the format would be 'p85'.
Floats are accepted as p99.9
Alternatively, vmin/vmax could be a function that is applied to
the list of color values (`color_vector`). E.g.
def my_vmax(color_vector): np.percentile(color_vector, p=80)
Parameters
----------
index
This index of the plot
color_vector
List or values for the plot
Returns
-------
(vmin, vmax) containing None or float values
"""
out = []
for v_name, v in [('vmin', vmin), ('vmax', vmax)]:
if len(v) == 1:
# this case usually happens when the user sets eg vmax=0.9, which
# is internally converted into list of len=1, but is expected that this
# value applies to all plots.
v_value = v[0]
else:
try:
v_value = v[index]
except IndexError:
logg.error(f"The parameter {v_name} is not valid. If setting multiple {v_name} values,"
f"check that the length of the {v_name} list is equal to the number "
"of plots. ")
v_value = None
if v_value is not None:
if isinstance(v_value, str) and v_value.startswith('p'):
try:
float(v_value[1:])
except ValueError:
logg.error(f"The parameter {v_name}={v_value} for plot number {index + 1} is not valid. "
f"Please check the correct format for percentiles.")
# interpret value of vmin/vmax as quantile with the following syntax 'p99.9'
v_value = np.percentile(color_vector, q=float(v_value[1:]))
elif callable(v_value):
# interpret vmin/vmax as function
v_value = v_value(color_vector)
if not isinstance(v_value, float):
logg.error(f"The return of the function given for {v_name} is not valid. "
"Please check that the function returns a number.")
v_value = None
else:
try:
float(v_value)
except ValueError:
logg.error(f"The given {v_name}={v_value} for plot number {index + 1} is not valid. "
f"Please check that the value given is a valid number, a string "
f"starting with 'p' for percentiles or a valid function.")
v_value = None
out.append(v_value)
return tuple(out)
def _wraps_plot_scatter(wrapper):
annots_orig = {
k: v for k, v in wrapper.__annotations__.items()
if k not in {'adata', 'kwargs'}
}
annots_scatter = {
k: v for k, v in embedding.__annotations__.items()
if k != 'basis'
}
wrapper.__annotations__ = {**annots_scatter, **annots_orig}
wrapper.__wrapped__ = embedding
return wrapper
# API
@_wraps_plot_scatter
@_doc_params(adata_color_etc=doc_adata_color_etc, edges_arrows=doc_edges_arrows, scatter_bulk=doc_scatter_embedding, show_save_ax=doc_show_save_ax)
def umap(adata, **kwargs) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in UMAP basis.
Parameters
----------
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return embedding(adata, 'umap', **kwargs)
@_wraps_plot_scatter
@_doc_params(adata_color_etc=doc_adata_color_etc, edges_arrows=doc_edges_arrows, scatter_bulk=doc_scatter_embedding, show_save_ax=doc_show_save_ax)
def tsne(adata, **kwargs) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in tSNE basis.
Parameters
----------
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return embedding(adata, 'tsne', **kwargs)
@_wraps_plot_scatter
@_doc_params(adata_color_etc=doc_adata_color_etc, edges_arrows=doc_edges_arrows, scatter_bulk=doc_scatter_embedding, show_save_ax=doc_show_save_ax)
def phate(adata, **kwargs) -> Union[List[Axes], None]:
"""\
Scatter plot in PHATE basis.
Parameters
----------
{adata_color_etc}
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False`, a list of :class:`~matplotlib.axes.Axes` objects. Every second element
corresponds to the 'right margin' drawing area for color bars and legends.
Examples
--------
>>> from anndata import AnnData
>>> import scanpy.external as sce
>>> import phate
>>> data, branches = phate.tree.gen_dla(
... n_dim=100,
... n_branch=20,
... branch_length=100,
... )
>>> data.shape
(2000, 100)
>>> adata = AnnData(data)
>>> adata.obs['branches'] = branches
>>> sce.tl.phate(adata, k=5, a=20, t=150)
>>> adata.obsm['X_phate'].shape
(2000, 2)
>>> sce.pl.phate(
... adata,
... color='branches',
... color_map='tab20',
... )
"""
return embedding(adata, 'phate', **kwargs)
@_wraps_plot_scatter
@_doc_params(adata_color_etc=doc_adata_color_etc, scatter_bulk=doc_scatter_embedding, show_save_ax=doc_show_save_ax)
def diffmap(adata, **kwargs) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in Diffusion Map basis.
Parameters
----------
{adata_color_etc}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return embedding(adata, 'diffmap', **kwargs)
@_wraps_plot_scatter
@_doc_params(adata_color_etc=doc_adata_color_etc, edges_arrows=doc_edges_arrows, scatter_bulk=doc_scatter_embedding, show_save_ax=doc_show_save_ax)
def draw_graph(
adata: AnnData,
layout: Optional[_IGraphLayout] = None,
**kwargs,
) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in graph-drawing basis.
Parameters
----------
{adata_color_etc}
layout
One of the :func:`~scanpy.tl.draw_graph` layouts.
By default, the last computed layout is used.
{edges_arrows}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
if layout is None:
layout = str(adata.uns['draw_graph']['params']['layout'])
basis = 'draw_graph_' + layout
if 'X_' + basis not in adata.obsm_keys():
raise ValueError('Did not find {} in adata.obs. Did you compute layout {}?'
.format('draw_graph_' + layout, layout))
return embedding(adata, basis, **kwargs)
@_wraps_plot_scatter
@_doc_params(adata_color_etc=doc_adata_color_etc, scatter_bulk=doc_scatter_embedding, show_save_ax=doc_show_save_ax)
def pca(adata, **kwargs) -> Union[Axes, List[Axes], None]:
"""\
Scatter plot in PCA coordinates.
Parameters
----------
{adata_color_etc}
{scatter_bulk}
{show_save_ax}
Returns
-------
If `show==False` a :class:`~matplotlib.axes.Axes` or a list of it.
"""
return embedding(adata, 'pca', **kwargs)
# Helpers
def _get_data_points(adata, basis, projection, components) -> Tuple[List[np.ndarray], List[Tuple[int, int]]]:
"""
Returns the data points corresponding to the selected basis, projection and/or components.
Because multiple components are given (eg components=['1,2', '2,3'] the
returned data are lists, containing each of the components. When only one component is plotted
the list length is 1.
Returns
-------
data_points
Each entry is a numpy array containing the data points
components
The cleaned list of components. Eg. [(0,1)] or [(0,1), (1,2)]
for components = [1,2] and components=['1,2', '2,3'] respectively
"""
if basis in adata.obsm.keys():
basis_key = basis
elif f"X_{basis}" in adata.obsm.keys():
basis_key = f"X_{basis}"
else:
raise KeyError(
f"Could not find entry in `obsm` for '{basis}'.\n"
f"Available keys are: {list(adata.obsm.keys())}."
)
n_dims = 2
if projection == '3d':
# check if the data has a third dimension
if adata.obsm[basis_key].shape[1] == 2:
if settings._low_resolution_warning:
logg.warning(
'Selected projections is "3d" but only two dimensions '
'are available. Only these two dimensions will be plotted'
)
else:
n_dims = 3
if components == 'all':
from itertools import combinations
r_value = 3 if projection == '3d' else 2
_components_list = np.arange(adata.obsm[basis_key].shape[1]) + 1
components = [",".join(map(str, x)) for x in combinations(_components_list, r=r_value)]
components_list = []
offset = 0
if basis == 'diffmap': offset = 1
if components is not None:
# components have different formats, either a list with integers, a string
# or a list of strings.
if isinstance(components, str):
# eg: components='1,2'
components_list.append(tuple(int(x.strip()) - 1 + offset for x in components.split(',')))
elif isinstance(components, cabc.Sequence):
if isinstance(components[0], int):
# components=[1,2]
components_list.append(tuple(int(x) - 1 + offset for x in components))
else:
# in this case, the components are str
# eg: components=['1,2'] or components=['1,2', '2,3]
# More than one component can be given and is stored
# as a new item of components_list
for comp in components:
components_list.append(tuple(int(x.strip()) - 1 + offset for x in comp.split(',')))
else:
raise ValueError("Given components: '{}' are not valid. Please check. "
"A valid example is `components='2,3'`")
# check if the components are present in the data
try:
data_points = []
for comp in components_list:
data_points.append(adata.obsm[basis_key][:, comp])
except:
raise ValueError("Given components: '{}' are not valid. Please check. "
"A valid example is `components='2,3'`")
if basis == 'diffmap':
# remove the offset added in the case of diffmap, such that
# plot_scatter can print the labels correctly.
components_list = [tuple(number-1 for number in comp) for comp in components_list]
else:
data_points = [adata.obsm[basis_key][:, offset:offset+n_dims]]
components_list = []
return data_points, components_list
def _add_legend_or_colorbar(adata, ax, cax, categorical, value_to_plot, legend_loc,
scatter_array, legend_fontweight, legend_fontsize,
legend_fontoutline, groups, multi_panel):
"""
Adds a color bar or a legend to the given ax. A legend is added when the
data is categorical and a color bar is added when a continuous value was used.
"""
# add legends or colorbars
if categorical is True:
# add legend to figure
categories = list(adata.obs[value_to_plot].cat.categories)
colors = adata.uns[value_to_plot + '_colors']
if multi_panel is True:
# Shrink current axis by 10% to fit legend and match
# size of plots that are not categorical
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.91, box.height])
if groups is not None:
# only label groups with the respective color
colors = [colors[categories.index(x)] for x in groups]
categories = groups
if legend_loc == 'right margin':
for idx, label in enumerate(categories):
color = colors[idx]
# use empty scatter to set labels
ax.scatter([], [], c=color, label=label)
ax.legend(
frameon=False, loc='center left',
bbox_to_anchor=(1, 0.5),
ncol=(1 if len(categories) <= 14
else 2 if len(categories) <= 30 else 3),
fontsize=legend_fontsize)
if legend_loc == 'on data':
# identify centroids to put labels
all_pos = np.zeros((len(categories), 2))
for ilabel, label in enumerate(categories):
_scatter = scatter_array[adata.obs[value_to_plot] == label, :]
x_pos, y_pos = np.median(_scatter, axis=0)
ax.text(x_pos, y_pos, label,
weight=legend_fontweight,
verticalalignment='center',
horizontalalignment='center',
fontsize=legend_fontsize,
path_effects=legend_fontoutline)
all_pos[ilabel] = [x_pos, y_pos]
# this is temporary storage for access by other tools
_utils._tmp_cluster_pos = all_pos
else:
# add colorbar to figure
pl.colorbar(cax, ax=ax, pad=0.01, fraction=0.08, aspect=30)
def _get_color_values(
adata,
value_to_plot,
groups=None,
palette: Union[str, Sequence[str], Cycler, None] = None,
use_raw=False,
gene_symbols=None,
layer=None,
) -> Tuple[Union[np.ndarray, str], bool]:
"""
Returns the value or color associated to each data point.
For categorical data, the return value is list of colors taken
from the category palette or from the given `palette` value.
For non-categorical data, the values are returned
Returns
-------
values
Values to plot
is_categorical
Are the values categorical?
"""
if value_to_plot is None:
return "lightgray", False
if (gene_symbols is not None
and value_to_plot not in adata.obs.columns
and value_to_plot not in adata.var_names):
# We should probably just make an index for this, and share it over runs
value_to_plot = adata.var.index[adata.var[gene_symbols] == value_to_plot][0] # TODO: Throw helpful error if this doesn't work
if use_raw and value_to_plot not in adata.obs.columns:
values = adata.raw.obs_vector(value_to_plot)
else:
values = adata.obs_vector(value_to_plot, layer=layer)
###
# when plotting, the color of the dots is determined for each plot
# the data is either categorical or continuous and the data could be in
# 'obs' or in 'var'
if not is_categorical_dtype(values):
return values, False
else: # is_categorical_dtype(values)
color_key = f"{value_to_plot}_colors"
if palette:
_utils._set_colors_for_categorical_obs(adata, value_to_plot, palette)
elif color_key not in adata.uns or \
len(adata.uns[color_key]) < len(values.categories):
# set a default palette in case that no colors or few colors are found
_utils._set_default_colors_for_categorical_obs(adata, value_to_plot)
else:
_utils._validate_palette(adata, value_to_plot)
color_vector = np.asarray(adata.uns[color_key])[values.codes]
# Handle groups
if groups:
color_vector = np.array(color_vector, dtype='<U15')
# set color to 'light gray' for all values
# that are not in the groups
color_vector[~adata.obs[value_to_plot].isin(groups)] = "lightgray"
return color_vector, True
def _basis2name(basis):
"""
converts the 'basis' into the proper name.
"""
component_name = (
'DC' if basis == 'diffmap'
else 'tSNE' if basis == 'tsne'
else 'UMAP' if basis == 'umap'
else 'PC' if basis == 'pca'
else basis.replace('draw_graph_', '').upper() if 'draw_graph' in basis
else basis)
return component_name
|
py | b4135fbcf392b3ca6ab17f17f63ff854d676d4df | """
Basic toolkits to process Action Potential signals
==================================================
metrics
rhythm
routines
signal
"""
__all__ = ['metrics',
'rhythm',
'routines',
'signal']
from . import metrics
from . import rhythm
from . import routines
from . import signal
|
py | b41361b4d42ddafbe665e99bfbd9f1f7ca228d31 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Relax Tuning Pass API"""
# TODO(sunggg):
# 1) Better Integration with MetaSchedule
# 1-1) Trace with MetaSchedule Trace
# 1-2) Database (includes serialization)
# 1-3) Other componets (e.g., MetaSchedule Instr, Cost model)
# 2) Better example for subgraph-level tuning
# *** This is currently blocked by pattern matcher, modular compilation, etc. ***
from typing import Callable, Union, Dict, List, Optional
import copy
import sys
import itertools
import logging
import numpy as np
import tvm
from tvm.runtime import Object
from tvm.ir.module import IRModule
from tvm.relax import Expr
from tvm.ir.transform import PassContext, Pass
from tvm import meta_schedule
from tvm.meta_schedule.arg_info import TensorInfo
from tvm.meta_schedule.builder import BuilderInput, LocalBuilder
from tvm.meta_schedule.utils import get_global_func_with_default_on_worker
from tvm.meta_schedule.runner import (
EvaluatorConfig,
LocalRunner,
RunnerInput,
)
from tvm._ffi import register_object
from tvm._ffi.registry import register_func
from . import _ffi_api
logger = logging.getLogger("TuningAPI") # pylint: disable=invalid-name
# Default constraint func that always returns true
def f_default_constr(mod: IRModule): # pylint: disable=unused-argument
return True
@register_object("relax.transform.Choice")
class Choice(Object):
"""
A TVM object Choice that maintains a set of transformation and constraint functions.
Transformation function should be applied when constraint function returns true.
Parameters
----------
f_transform : Callable
Transformation function.
f_constr : Callable
Constraint function.
Examples
--------
The following code block defines a Choice.
.. code-block:: python
def apply(mod):
return relax.transform.FoldConstant()(mod)
def constr(mod):
return len(mod.functions) == 3
# Define a choice to apply constant folding only when IRModule has three functions.
choice = Choice(apply, constr)
"""
def __init__(self, f_transform: Callable, f_constr: Optional[Callable] = None):
"""Constructor
Parameters
----------
f_transform : Callable
Transformation function.
f_constr : Callable
Constraint function.
"""
f_constr = f_constr if f_constr else f_default_constr
self.__init_handle_by_constructor__(
_ffi_api.Choice, f_transform, f_constr # type: ignore # pylint: disable=no-member
)
def get_transform_func(self) -> Callable:
"""Getter for f_transform
Returns
-------
ret: Callable
registered transformation function
"""
return _ffi_api.ChoiceGetTransformFunc(self)
def get_constr_func(self) -> Callable:
"""Getter for f_constr
Returns
-------
ret: Callable
registered constraint function
"""
return _ffi_api.ChoiceGetConstrFunc(self)
def check_constr(self, mod: IRModule) -> bool:
"""Perform f_constr
Returns
-------
ret: Bool
Returns whether the IRModule satisfies the constraint or not
"""
return _ffi_api.ChoiceCheckConstr(self, mod)
@register_object("relax.transform.Knob")
class Knob(Object):
"""
A TVM object Knob that maintains a set of valid Choices.
By using Knobs, a tuning pass can generate candidates and define the search space.
Parameters
----------
name : str
Name of the knob.
choices: Union[List[Choice], Dict[str, Choice]]
A list of valid choices
Examples
--------
The following code block defines a Knob.
.. code-block:: python
def apply(mod):
return relax.transform.FoldConstant()(mod)
def noapply(mod):
return mod
choices = {"apply": Choice(apply), "noapply": Choice(noapply)}
# A knob manages a set of its valid choices
knob = Knob("MockTuningKnob", choices)
"""
def __init__(self, name: str, choices: Union[List[Choice], Dict[str, Choice]]):
"""Constructor."""
if isinstance(choices, list):
choices = {str(idx): val for idx, val in enumerate(choices)}
self.__init_handle_by_constructor__(
_ffi_api.Knob, name, choices # type: ignore # pylint: disable=no-member
)
def verify(self, decision: Union[str, int]) -> bool:
"""Verify if the decision is valid."""
if isinstance(decision, int):
decision = str(decision)
return _ffi_api.KnobVerify(self, decision)
def apply(self, mod: IRModule, decision: Union[str, int]) -> IRModule:
"""Get choice if a decision is valid."""
if isinstance(decision, int):
decision = str(decision)
return _ffi_api.KnobApply(self, mod, decision)
def __str__(self) -> str:
msg = f"{self.name} (# of choices: {len(self.choices)})\n"
for name, choice in self.choices.items():
msg += f" - {name}: {choice}\n"
return msg
@register_object("relax.transform.Trace")
class Trace(Object):
"""
A TVM object Trace logs the history of transformations (decisions).
Parameters
----------
in_mod : IRModule
Input IRModule.
knobs: Optional[List[Knob]]
A list of knobs applied in the trace.
decisions: Optional[List[Union[str, int]]]
A list of decisions made for each knob
Examples
--------
The following code block defines a Trace.
.. code-block:: python
trace = Trace(mod, [knob1, knob2, knob3], ["c1", "c0", "c3"])
assert trace.size == 3 # Length of history.
# 'out' contains IRModule that applies transformations in the trace.
out: IRModule = trace.add(knob4, "c2")
assert trace.size == 4 # Length of history.
trace.set_perf(0.03) # Set the performance number of the trace.
"""
def __init__(
self,
in_mod: IRModule,
knobs: Optional[List[Knob]] = None,
decisions: Optional[List[Union[str, int]]] = None,
):
"""Constructor."""
knobs = knobs if knobs else list()
decisions = (
[str(v) if isinstance(v, int) else v for v in decisions] if decisions else list()
)
self.__init_handle_by_constructor__(
_ffi_api.Trace, in_mod, knobs, decisions # type: ignore # pylint: disable=no-member
)
def verify(self) -> bool:
"""Verify if current history is valid."""
return _ffi_api.TraceVerify()
def add(self, knob: Knob, decision: Union[str, int]) -> IRModule:
"""Add & Apply new decision (with knob)."""
if isinstance(decision, int):
decision = str(decision)
return _ffi_api.TraceAdd(self, knob, decision)
def set_perf(self, perf: float) -> None:
"""Set performance number for the trace."""
return _ffi_api.TraceSetPerf(self, perf)
def __str__(self) -> str:
n = len(self.knobs)
msg = f"Trace length: {n}\n"
for idx in range(n):
msg += f"[{idx+1}] {self.knobs[idx].name}: {self.decisions[idx]}\n"
return msg
@register_func("relax.transform.default_generate_candidate")
def default_generate_candidate(
knobs: List[Knob], trace: Trace, eval_passes: Optional[List[Pass]] = None
) -> List[Trace]:
"""
Default function to generate the search space for a given trace by using registered choices.
This function simply expands candidate space as long as the knob's constraint satisfies.
To reduce the search space, a developer may expand each choice with smart search method.
(e.g., genetic search, multi-armed bandit)
Note, each pass generates candidates without worrying about the interaction with other passes.
i.e., it only uses its incoming trace/IRModule and Choices for candidate generation.
This will help alleviating the complexity of joint-optimization significantly.
- consideration of interaction between optimizations has known to be extremely difficult.
Parameters
----------
knobs : List[Knob]
List of Knobs to consider to generate candidate for input trace.
trace: Trace
Input trace.
eval_passes: Optional[List[Pass]]
List of passes to consider to evaluate each candidate.
This will enable joint-optimization.
Return
----------
candidates: List[Trace]
List of candidate traces
"""
candidates = [trace]
# Iterate over every decision
for knob in knobs:
num = len(candidates)
for _ in range(num):
cur_trace = candidates.pop(0)
for decision in knob.choices.keys():
choice = knob.choices[decision]
# Generate new candidate when this condition satisfies.
if choice.check_constr(cur_trace.out_mod):
new_trace = copy.deepcopy(cur_trace)
new_trace.add(knob, decision)
candidates.append(new_trace)
# Expand candidates by using eval passes if provided. This will enable joint-optimization.
if eval_passes:
candidates = default_consider_eval_passes(candidates, eval_passes)
return candidates
@register_func("relax.transform.default_consider_eval_passes")
def default_consider_eval_passes(
init_candidates: List[Trace], eval_passes: Optional[List[Pass]] = None
) -> List[Trace]:
"""
Default function to update traces with eval passes.
It visits each eval_pass in dfs order in transform.Sequential() and
returns the best possible candidate trace for each candidate.
Parameters
----------
init_candidates: List[Trace]
Initial candidates
eval_passes: Optional[List[Pass]]
List of passes to consider to evaluate each candidate.
This will enable joint-optimization.
Return
----------
candidates: List[Trace]
List of candidate traces
"""
if not eval_passes:
return init_candidates
eval_passes = list(eval_passes) if not isinstance(eval_passes, list) else eval_passes
ctx = PassContext.current()
candidates = []
# for _ in range(len(candidates)):
for trace in init_candidates:
ctx.push_trace(trace)
tvm.transform.Sequential(eval_passes)(trace.out_mod)
new_trace = ctx.pop_trace()
# A new trace contains the best decisions in eval_passes
candidates.append(new_trace)
return candidates
@register_func("relax.transform.default_evaluate")
def default_evaluate(
candidates: List[Trace],
target_str: str,
params: Optional[Dict[str, np.ndarray]] = None,
builder: Optional[meta_schedule.builder.Builder] = None,
runner: Optional[meta_schedule.runner.Runner] = None,
) -> None:
"""
Default function to evaluate a set of candidate traces by using MetaSchedule builder/runner.
Parameters
----------
candidates: List[Trace]
List of traces to evaluate.
target_str: str,
Compilation target (e.g., llvm, cuda).
params: Optional[Dict[str, np.ndarray]]
Params to bind.
builder: Optional[meta_schedule.builder.Builder]
builder function. If not provided, default local builder will be used.
runner: Optional[meta_schedule.runner.Runner]
runner function. If not provided, default local runner will be used.
"""
ctx = PassContext.current()
target = tvm.target.Target(target_str)
# Setup default local builder if not provided
if builder is None:
def relax_build(
mod: IRModule,
target: tvm.target.Target,
params: Optional[Dict[str, np.ndarray]],
):
if params:
mod = tvm.relax.transform.BindParams("main", params)(mod)
relax_exec = tvm.relax.vm.build(mod, target)
return relax_exec.mod
builder = LocalBuilder(f_build=relax_build)
# Setup default local runner if not provided
if runner is None:
def relax_eval_func(rt_mod, device, evaluator_config, repeated_args):
relax_exec = tvm.relax.vm.Executable(rt_mod)
relax_vm = tvm.relax.VirtualMachine(exec=relax_exec, device=device)
evaluator = relax_vm.module.time_evaluator(
func_name="main",
dev=device,
number=evaluator_config.number,
repeat=evaluator_config.repeat,
min_repeat_ms=evaluator_config.min_repeat_ms,
)
repeated_costs: List[List[float]] = []
for args in repeated_args:
profile_result = evaluator(*args)
repeated_costs.append(profile_result.results)
costs = [float(cost) for cost in itertools.chain.from_iterable(repeated_costs)]
return costs
runner = LocalRunner(
evaluator_config=EvaluatorConfig(),
f_run_evaluator=relax_eval_func,
)
# set up clean up function
f_clean_build = get_global_func_with_default_on_worker("meta_schedule.remove_build_dir", None)
assert f_clean_build
# Keep track of number of evaluations (mostly for the debugging purpose)
num_evals = 0
# Evaluation
for candidate in candidates:
# If this candidate is already evaluated, skip the measurement
if candidate.perf != -1:
continue
# Evaluate candidates
num_evals += 1
mod = candidate.out_mod
# Build candidate
(builder_result,) = builder.build([BuilderInput(mod, target, params)])
# Build error
# Assign the worst performance and move on to the next candidate.
if builder_result.artifact_path is None:
logger.warning(builder_result.error_msg)
candidate.set_perf(1e100)
continue
# If build passes, set up runner input and measure the performance.
runner_input = RunnerInput(
builder_result.artifact_path,
target_str,
args_info=[
TensorInfo(shape=[int(i) for i in p.shape], dtype=p.checked_type.dtype)
for p in mod["main"].params
], # convert list[Var] to list[TensorInfo]
)
(runner_future,) = runner.run([runner_input])
runner_result = runner_future.result()
# Runtime error
# Assign the worst performance and move on to the next candidate.
if runner_result.error_msg is not None:
logger.warning(runner_result.error_msg)
candidate.set_perf(1e100)
# For valid measurments, compute the average and update the trace performance.
else:
perfs = []
for result in runner_result.run_secs:
if isinstance(result, tvm.tir.FloatImm):
result = result.value
assert isinstance(result, float)
assert result >= 0.0
perfs.append(result)
# Store the evaluation result
# TODO(sunggg): Match with MetaSchedule
candidate.set_perf(np.mean(perfs))
# Clean up the artifact
f_clean_build(builder_result.artifact_path)
ctx.inc_num_evals(num_evals)
def select_best_candidate(candidates: List[Trace]) -> Trace:
"""
Select the best trace.
Parameters
----------
candidates: List[Trace]
Candidate traces
Return
----------
best_trace: Trace
Trace with the best performance
"""
best_perf, best_trace = sys.maxsize, None
for candidate in candidates:
avg = candidate.perf
# Select best one
if best_perf > avg:
best_perf = avg
best_trace = candidate
return best_trace
def get_trace(in_: Union[Trace, IRModule, Expr]) -> Trace:
"""
Getter for a trace wrapper.
Parameters
----------
in_: Union[Trace, IRModule, Expr]
Input entity
Return
----------
wrapped: Trace
Traced entity
"""
if isinstance(in_, Trace):
return in_
if isinstance(in_, IRModule):
return Trace(in_)
if isinstance(in_, Expr):
return Trace(tvm.IRModule.from_expr(in_))
raise Exception(f"Invalid input type for trace: {type(in_)}")
|
py | b413625eda5be1a095ad99e4fdf4dc596affc318 | # tests.test_classifier.test_class_prediction_error
# Testing for the ClassPredictionError visualizer
#
# Author: Benjamin Bengfort
# Author: Rebecca Bilbro
# Author: Larry Gray
# Created: Tue May 23 13:41:55 2017 -0700
#
# Copyright (C) 2017 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: test_rocauc.py [] [email protected] $
"""
Testing for the ClassPredictionError visualizer
"""
##########################################################################
## Imports
##########################################################################
import pytest
import matplotlib.pyplot as plt
from yellowbrick.exceptions import ModelError
from yellowbrick.datasets import load_occupancy
from yellowbrick.classifier.class_prediction_error import *
from sklearn.svm import LinearSVC, SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_multilabel_classification
from sklearn.model_selection import train_test_split as tts
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import MinMaxScaler
from unittest.mock import patch
from tests.base import VisualTestCase
try:
import pandas as pd
except ImportError:
pd = None
##########################################################################
## Tests
##########################################################################
class TestClassPredictionError(VisualTestCase):
"""
Test ClassPredictionError visualizer
"""
@pytest.mark.filterwarnings("ignore:could not determine class_counts_")
def test_numpy_integration(self):
"""
Assert no errors during class prediction error integration with NumPy arrays
"""
X, y = load_occupancy(return_dataset=True).to_numpy()
classes = ["unoccupied", "occupied"]
model = SVC(random_state=42)
model.fit(X, y)
visualizer = ClassPredictionError(model, classes=classes)
visualizer.score(X, y)
visualizer.finalize()
# AppVeyor and Linux conda fail due to non-text-based differences
# AppVeyor fails with RMS 13.161 - 13.289 (python - miniconda)
self.assert_images_similar(visualizer, tol=12.5, windows_tol=13.3)
@pytest.mark.filterwarnings("ignore:could not determine class_counts_")
@pytest.mark.skipif(pd is None, reason="test requires pandas")
def test_pandas_integration(self):
"""
Assert no errors during class prediction error integration with Pandas
"""
X, y = load_occupancy(return_dataset=True).to_pandas()
classes = ["unoccupied", "occupied"]
model = SVC(random_state=42)
model.fit(X, y)
visualizer = ClassPredictionError(model, classes=classes)
visualizer.score(X, y)
visualizer.finalize()
# AppVeyor and Linux conda fail due to non-text-based differences
# AppVeyor fails with RMS 13.161 - 13.289 (python - miniconda)
self.assert_images_similar(visualizer, tol=12.5, windows_tol=13.3)
def test_class_prediction_error_quickmethod(self):
"""
Test the ClassPredictionError quickmethod
"""
X, y = load_occupancy(return_dataset=True).to_numpy()
fig = plt.figure()
ax = fig.add_subplot()
clf = SVC(random_state=42)
viz = class_prediction_error(clf, X, y, ax=ax, show=False)
# Not sure why the tolerance must be so high for this
# Failing on travis with RMS 9.544
# AppVeyor and Linux conda fail due to non-text-based differences: RMS 12.961
# yellowbrick.exceptions.ImageComparisonFailure: images not close (RMS 15.538)
self.assert_images_similar(viz, tol=16, windows_tol=16)
def test_class_prediction_error_quickmethod_X_test_only(self):
"""
Test the ClassPredictionError quickmethod
"""
X, y = load_occupancy(return_dataset=True).to_numpy()
X_train, X_test, y_train, y_test = tts(
X, y, test_size=0.2, shuffle=True, random_state=42
)
fig = plt.figure()
ax = fig.add_subplot()
clf = LinearSVC(random_state=42)
with pytest.raises(
YellowbrickValueError,
match="must specify both X_test and y_test or neither",
):
class_prediction_error(
clf, X_train=X_train, y_train=y_train, X_test=X_test, ax=ax, show=False
)
def test_class_prediction_error_quickmethod_X_test_and_y_test(self):
"""
Test the ClassPredictionError quickmethod
"""
X, y = load_occupancy(return_dataset=True).to_numpy()
X_train, X_test, y_train, y_test = tts(
X, y, test_size=0.2, shuffle=True, random_state=42
)
fig = plt.figure()
ax = fig.add_subplot()
clf = SVC(random_state=42)
viz = class_prediction_error(
clf,
X_train=X_train,
y_train=y_train,
X_test=X_test,
y_test=y_test,
ax=ax,
show=False,
)
# Not sure why the tolerance must be so high for this
# Failing on travis with RMS 9.544
# AppVeyor and Linux conda fail due to non-text-based differences: RMS 12.961
self.assert_images_similar(viz, tol=13, windows_tol=13)
@pytest.mark.filterwarnings("ignore:could not determine class_counts_")
def test_classes_greater_than_indices(self):
"""
A model error should be raised when there are more classes in fit than score
"""
X, y = load_occupancy(return_dataset=True).to_numpy()
classes = ["unoccupied", "occupied", "partytime"]
model = LinearSVC(random_state=42)
model.fit(X, y)
with pytest.raises(ModelError):
visualizer = ClassPredictionError(model, classes=classes)
visualizer.score(X, y)
def test_classes_less_than_indices(self):
"""
Assert error when there is an attempt to filter classes
"""
X, y = load_occupancy(return_dataset=True).to_numpy()
classes = ["unoccupied"]
model = LinearSVC(random_state=42)
model.fit(X, y)
with pytest.raises(NotImplementedError):
visualizer = ClassPredictionError(model, classes=classes)
visualizer.score(X, y)
@pytest.mark.skip(reason="not implemented yet")
def test_no_classes_provided(self):
"""
Assert no errors when no classes are provided
"""
pass
def test_class_type(self):
"""
Test class must be either binary or multiclass type
"""
X, y = make_multilabel_classification()
model = RandomForestClassifier()
model.fit(X, y)
with pytest.raises(YellowbrickValueError):
visualizer = ClassPredictionError(model)
visualizer.score(X, y)
def test_score_returns_score(self):
"""
Test that ClassPredictionError score() returns a score between 0 and 1
"""
X, y = load_occupancy(return_dataset=True).to_numpy()
# Create and fit the visualizer
visualizer = ClassPredictionError(LinearSVC(random_state=42))
visualizer.fit(X, y)
# Score the visualizer
s = visualizer.score(X, y)
assert 0 <= s <= 1
def test_with_fitted(self):
"""
Test that visualizer properly handles an already-fitted model
"""
X, y = load_occupancy(return_dataset=True).to_numpy()
model = RandomForestClassifier().fit(X, y)
classes = ["unoccupied", "occupied"]
with patch.object(model, "fit") as mockfit:
oz = ClassPredictionError(model, classes=classes)
oz.fit(X, y)
mockfit.assert_not_called()
with patch.object(model, "fit") as mockfit:
oz = ClassPredictionError(model, classes=classes, is_fitted=True)
oz.fit(X, y)
mockfit.assert_not_called()
with patch.object(model, "fit") as mockfit:
oz = ClassPredictionError(model, classes=classes, is_fitted=False)
oz.fit(X, y)
mockfit.assert_called_once_with(X, y)
def test_within_pipeline(self):
"""
Test that visualizer can be accessed within a sklearn pipeline
"""
X, y = load_occupancy(return_dataset=True).to_pandas()
classes = ["unoccupied", "occupied"]
X_train, X_test, y_train, y_test = tts(
X, y, test_size=0.2, shuffle=True, random_state=42
)
model = Pipeline([
('minmax', MinMaxScaler()),
('cpe', ClassPredictionError(SVC(random_state=42), classes=classes))
])
model.fit(X_train, y_train)
model.score(X_test, y_test)
model['cpe'].finalize()
self.assert_images_similar(model['cpe'], tol=12.5, windows_tol=13.3)
def test_within_pipeline_quickmethod(self):
"""
Test that visualizer quickmethod can be accessed within a
sklearn pipeline
"""
X, y = load_occupancy(return_dataset=True).to_pandas()
X_train, X_test, y_train, y_test = tts(
X, y, test_size=0.2, shuffle=True, random_state=42
)
model = Pipeline([
('minmax', MinMaxScaler()),
('cpe', class_prediction_error(SVC(random_state=42),
X_train, y_train, X_test, y_test,
classes=["vacant", "occupied"], show=False))
])
self.assert_images_similar(model['cpe'], tol=12.5, windows_tol=13.3)
def test_pipeline_as_model_input(self):
"""
Test that visualizer can handle sklearn pipeline as model input
"""
X, y = load_occupancy(return_dataset=True).to_pandas()
classes = ["unoccupied", "occupied"]
X_train, X_test, y_train, y_test = tts(
X, y, test_size=0.2, shuffle=True, random_state=42
)
model = Pipeline([
('minmax', MinMaxScaler()),
('svc', SVC(random_state=42))
])
oz = ClassPredictionError(model, classes=classes)
oz.fit(X_train, y_train)
oz.score(X_test, y_test)
oz.finalize()
self.assert_images_similar(oz, tol=12.5, windows_tol=13.3)
def test_pipeline_as_model_input_quickmethod(self):
"""
Test that visualizer can handle sklearn pipeline as model input
within a quickmethod
"""
X, y = load_occupancy(return_dataset=True).to_pandas()
X_train, X_test, y_train, y_test = tts(
X, y, test_size=0.2, shuffle=True, random_state=42
)
model = Pipeline([
('minmax', MinMaxScaler()),
('svc', SVC(random_state=42))
])
oz = class_prediction_error(model,
X_train, y_train, X_test, y_test,
classes=["vacant", "occupied"],
show=False)
self.assert_images_similar(oz, tol=12.5, windows_tol=13.3) |
py | b413629ca4b3ea463c5d082471335d2eca84d1af | # THE FARM PROBLEM EDABIT SOLUTION:
# creating a function to solve the problem.
def animals(chickens, cows, pigs):
# returning the sum of all legs for the animals.
return (2 * chickens) + (4 * cows) + (4 * pigs) |
py | b41362aa75822392809b89fef60fdf1870645fc7 | # noinspection PyUnusedLocal
def fizz_buzz(number):
def isFake(number):
fcheck = divmod(number,2)
return fcheck[1]==1
def isDeluxe(number):
nstr=str(number)
if (isDivThree(number) and containsThree(number)) or (isDivFive(number) and containsFive(number)):
return True
else:
return False
def containsThree(n):
if str.find(str(number),'3')!= -1:
return True
else:
return False
def containsFive(n):
if str.find(str(number),'5')!= -1:
return True
else:
return False
def isDivThree(n):
if (n%3) == 0:
return True
else:
return False
def isDivFive(n):
if (n%5) == 0:
return True
else:
return False
if isDivThree(number) or containsThree(number):
op= 'fizz'
elif isDivFive(number) or containsFive(number):
op= 'buzz'
else:
op= str(number)
if (isDivFive(number) or containsFive(number)) and (isDivThree(number) or containsThree(number)) :
op= 'fizz buzz'
if (isDeluxe(number)):
if op==str(number):
if isFake(number):
op="fake deluxe"
else:
op="deluxe"
else:
if isFake(number):
op = op + " fake deluxe"
else:
op=op+" deluxe"
if number==0:
op= str(number)
return op
#test function
#for i in range (-4,35):
# print(i,fizz_buzz(i))
#print (10, fizz_buzz(10))
print (12, fizz_buzz(12))
#print (11, fizz_buzz(11))
print (1111, fizz_buzz(1111))
#print (15, fizz_buzz(15))
#print (33, fizz_buzz(33))
#print (22, fizz_buzz(22))
print (30, fizz_buzz(30))
#print (465, fizz_buzz(465))
#print (444, fizz_buzz(444))
#print (222, fizz_buzz(222))
print (555, fizz_buzz(555))
print (500, fizz_buzz(500))
|
py | b4136343555deb62a83726dbc60fc629cfdeb5df | #!/usr/bin/env python
"""
Simple script for checking what all is in `meetings.shelve` - our
persistent object for storing meeting history.
"""
from pprint import pprint
from utils import open_store
store = open_store()
for key in store:
print(" == {} == ".format(key))
pprint(store[key])
print()
store.close()
|
py | b41363698bfebf24380451b34f77b528f3fb49dd | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of multiheaded attention and self-attention layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class Attention(tf.layers.Layer):
"""Multi-headed attention layer."""
def __init__(self, hidden_size, num_heads, attention_dropout, train):
if hidden_size % num_heads != 0:
raise ValueError("Hidden size must be evenly divisible by the number of "
"heads.")
super(Attention, self).__init__()
self.hidden_size = hidden_size
self.num_heads = num_heads
self.attention_dropout = attention_dropout
self.train = train
# Layers for linearly projecting the queries, keys, and values.
self.q_dense_layer = tf.layers.Dense(hidden_size, use_bias=False, name="q")
self.k_dense_layer = tf.layers.Dense(hidden_size, use_bias=False, name="k")
self.v_dense_layer = tf.layers.Dense(hidden_size, use_bias=False, name="v")
self.output_dense_layer = tf.layers.Dense(hidden_size, use_bias=False,
name="output_transform")
def split_heads(self, x):
"""Split x into different heads, and transpose the resulting value.
The tensor is transposed to insure the inner dimensions hold the correct
values during the matrix multiplication.
Args:
x: A tensor with shape [batch_size, length, hidden_size]
Returns:
A tensor with shape [batch_size, num_heads, length, hidden_size/num_heads]
"""
with tf.name_scope("split_heads"):
batch_size = tf.shape(x)[0]
length = tf.shape(x)[1]
# Calculate depth of last dimension after it has been split.
depth = (self.hidden_size // self.num_heads)
# Split the last dimension
x = tf.reshape(x, [batch_size, length, self.num_heads, depth])
# Transpose the result
return tf.transpose(x, [0, 2, 1, 3])
def combine_heads(self, x):
"""Combine tensor that has been split.
Args:
x: A tensor [batch_size, num_heads, length, hidden_size/num_heads]
Returns:
A tensor with shape [batch_size, length, hidden_size]
"""
with tf.name_scope("combine_heads"):
batch_size = tf.shape(x)[0]
length = tf.shape(x)[2]
x = tf.transpose(x, [0, 2, 1, 3]) # --> [batch, length, num_heads, depth]
return tf.reshape(x, [batch_size, length, self.hidden_size])
def call(self, x, y, bias, cache=None):
"""Apply attention mechanism to x and y.
Args:
x: a tensor with shape [batch_size, length_x, hidden_size]
y: a tensor with shape [batch_size, length_y, hidden_size]
bias: attention bias that will be added to the result of the dot product.
cache: (Used during prediction) dictionary with tensors containing results
of previous attentions. The dictionary must have the items:
{"k": tensor with shape [batch_size, i, key_channels],
"v": tensor with shape [batch_size, i, value_channels]}
where i is the current decoded length.
Returns:
Attention layer output with shape [batch_size, length_x, hidden_size]
"""
# Linearly project the query (q), key (k) and value (v) using different
# learned projections. This is in preparation of splitting them into
# multiple heads. Multi-head attention uses multiple queries, keys, and
# values rather than regular attention (which uses a single q, k, v).
q = self.q_dense_layer(x)
k = self.k_dense_layer(y)
v = self.v_dense_layer(y)
if cache is not None:
# Combine cached keys and values with new keys and values.
k = tf.concat([cache["k"], k], axis=1)
v = tf.concat([cache["v"], v], axis=1)
# Update cache
cache["k"] = k
cache["v"] = v
# Split q, k, v into heads.
q = self.split_heads(q)
k = self.split_heads(k)
v = self.split_heads(v)
# Scale q to prevent the dot product between q and k from growing too large.
depth = (self.hidden_size // self.num_heads)
q *= depth ** -0.5
# Calculate dot product attention
# q: [batch_size, num_heads, lengths, lenghts/ num_heads]
# k: [batch_size, num_heads, lengths/num_num_heads, lenghts]
logits = tf.matmul(q, k, transpose_b=True)
logits += bias
weights = tf.nn.softmax(logits, name="attention_weights")
if self.train:
weights = tf.nn.dropout(weights, 1.0 - self.attention_dropout)
#v --- > [batch_size, num_heads, hidden_size, hidden_size/num_heads]
# weight ----> [batch_size, num_headas,
attention_output = tf.matmul(weights, v)
# Recombine heads --> [batch_size, length, hidden_size]
attention_output = self.combine_heads(attention_output)
# Run the combined outputs through another linear projection layer.
attention_output = self.output_dense_layer(attention_output)
return attention_output
class SelfAttention(Attention):
"""Multiheaded self-attention layer."""
def call(self, x, bias, cache=None):
return super(SelfAttention, self).call(x, x, bias, cache)
|
py | b4136472465a40dda944b377cb53360c97f78f81 | """Dataset loading module.
Adapted from: https://github.com/YannDubs/disentangling-vae"""
import abc
import glob
import hashlib
import h5py
import logging
import numpy as np
import os
import subprocess
import tarfile
import torch
import urllib.request
import zipfile
from PIL import Image
from skimage.io import imread
from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms
from tqdm import tqdm
DIR = os.path.abspath(os.path.dirname(__file__))
COLOUR_BLACK = 0
COLOUR_WHITE = 1
DATASETS_DICT = {"mnist": "MNIST",
"fashion": "FashionMNIST",
"nmnist": "NoisyMNIST",
"bmnist": "BinarizedMNIST",
"dsprites": "DSprites",
"celeba": "CelebA",
"chairs": "Chairs"}
DATASETS = list(DATASETS_DICT.keys())
def get_dataset(dataset):
"""Return the correct dataset."""
dataset = dataset.lower()
try:
# eval because stores name as string in order to put it at top of file
return eval(DATASETS_DICT[dataset])
except KeyError:
raise ValueError(f"Unkown dataset: {dataset}")
def get_img_size(dataset):
"""Return the correct image size."""
return get_dataset(dataset).img_size
def get_background(dataset):
"""Return the image background color."""
return get_dataset(dataset).background_color
def get_dataloaders(dataset, train=True, noise=None, root=None,
pin_memory=True, batch_size=128,
logger=logging.getLogger(__name__), **kwargs):
"""A generic data loader
Parameters
----------
dataset : {"mnist", "fashion", "dsprites", "celeba", "chairs"}
Name of the dataset to load
root : str
Path to the dataset root. If `None` uses the default one.
kwargs :
Additional arguments to `DataLoader`. Default values are modified.
"""
pin_memory = pin_memory and torch.cuda.is_available # only pin if GPU
Dataset = get_dataset(dataset)
if root is None:
if noise == 0.0:
dataset = Dataset(train=train, logger=logger)
else:
dataset = Dataset(train=train, logger=logger)
else:
if noise is None:
dataset = Dataset(train=train, root=root, logger=logger)
else:
dataset = Dataset(train=train, noise=noise, root=root,
logger=logger)
return DataLoader(dataset,
batch_size=batch_size,
shuffle=train,
pin_memory=pin_memory,
**kwargs)
class DisentangledDataset(Dataset, abc.ABC):
"""Base Class for disentangled VAE datasets.
Parameters
----------
root : string
Root directory of dataset.
transforms_list : list
List of `torch.vision.transforms` to apply to the data when loading it.
"""
def __init__(self, root, transforms_list=[], logger=logging.getLogger(__name__)):
self.root = root
self.train_data = os.path.join(root, type(self).files["train"])
self.transforms = transforms.Compose(transforms_list)
self.logger = logger
if not os.path.isdir(root):
self.logger.info("Downloading {} ...".format(str(type(self))))
self.download()
self.logger.info("Finished Downloading.")
def __len__(self):
return len(self.imgs)
@abc.abstractmethod
def __getitem__(self, idx):
"""Get the image of `idx`.
Return
------
sample : torch.Tensor
Tensor in [0.,1.] of shape `img_size`.
"""
pass
@abc.abstractmethod
def download(self):
"""Download the dataset. """
pass
class DSprites(DisentangledDataset):
"""DSprites Dataset from [1].
Disentanglement test Sprites dataset.Procedurally generated 2D shapes, from 6
disentangled latent factors. This dataset uses 6 latents, controlling the color,
shape, scale, rotation and position of a sprite. All possible variations of
the latents are present. Ordering along dimension 1 is fixed and can be mapped
back to the exact latent values that generated that image. Pixel outputs are
different. No noise added.
Notes
-----
- Link : https://github.com/deepmind/dsprites-dataset/
- hard coded metadata because issue with python 3 loading of python 2
Parameters
----------
root : string
Root directory of dataset.
References
----------
[1] Higgins, I., Matthey, L., Pal, A., Burgess, C., Glorot, X., Botvinick,
M., ... & Lerchner, A. (2017). beta-vae: Learning basic visual concepts
with a constrained variational framework. In International Conference
on Learning Representations.
"""
urls = {"train": "https://github.com/deepmind/dsprites-dataset/blob/master/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz?raw=true"}
files = {"train": "dsprite_train.npz"}
lat_names = ('shape', 'scale', 'orientation', 'posX', 'posY')
lat_sizes = np.array([3, 6, 40, 32, 32])
img_size = (1, 64, 64)
background_color = COLOUR_BLACK
lat_values = {
'posX': np.array([0., 0.03225806, 0.06451613, 0.09677419, 0.12903226,
0.16129032, 0.19354839, 0.22580645, 0.25806452,
0.29032258, 0.32258065, 0.35483871, 0.38709677,
0.41935484, 0.4516129, 0.48387097, 0.51612903,
0.5483871, 0.58064516, 0.61290323, 0.64516129,
0.67741935, 0.70967742, 0.74193548, 0.77419355,
0.80645161, 0.83870968, 0.87096774, 0.90322581,
0.93548387, 0.96774194, 1.]),
'posY': np.array([0., 0.03225806, 0.06451613, 0.09677419, 0.12903226,
0.16129032, 0.19354839, 0.22580645, 0.25806452,
0.29032258, 0.32258065, 0.35483871, 0.38709677,
0.41935484, 0.4516129, 0.48387097, 0.51612903,
0.5483871, 0.58064516, 0.61290323, 0.64516129,
0.67741935, 0.70967742, 0.74193548, 0.77419355,
0.80645161, 0.83870968, 0.87096774, 0.90322581,
0.93548387, 0.96774194, 1.]),
'scale': np.array([0.5, 0.6, 0.7, 0.8, 0.9, 1.]),
'orientation': np.array([0., 0.16110732, 0.32221463, 0.48332195,
0.64442926, 0.80553658, 0.96664389, 1.12775121,
1.28885852, 1.44996584, 1.61107316, 1.77218047,
1.93328779, 2.0943951, 2.25550242, 2.41660973,
2.57771705, 2.73882436, 2.89993168, 3.061039,
3.22214631, 3.38325363, 3.54436094, 3.70546826,
3.86657557, 4.02768289, 4.1887902, 4.34989752,
4.51100484, 4.67211215, 4.83321947, 4.99432678,
5.1554341, 5.31654141, 5.47764873, 5.63875604,
5.79986336, 5.96097068, 6.12207799, 6.28318531]),
'shape': np.array([1., 2., 3.]),
'color': np.array([1.])}
def __init__(self, train=True, root=os.path.join(DIR, '../data/dsprites/'), **kwargs):
super().__init__(root, [transforms.ToTensor()], **kwargs)
dataset_zip = np.load(self.train_data)
self.imgs = dataset_zip['imgs']
self.lat_values = dataset_zip['latents_values']
def download(self):
"""Download the dataset."""
os.makedirs(self.root)
subprocess.check_call(["curl", "-L", type(self).urls["train"],
"--output", self.train_data])
def __getitem__(self, idx):
"""Get the image of `idx`
Return
------
sample : torch.Tensor
Tensor in [0.,1.] of shape `img_size`.
lat_value : np.array
Array of length 6, that gives the value of each factor of variation.
"""
# stored image have binary and shape (H x W) so multiply by 255 to get pixel
# values + add dimension
sample = np.expand_dims(self.imgs[idx] * 255, axis=-1)
# ToTensor transforms numpy.ndarray (H x W x C) in the range
# [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
sample = self.transforms(sample)
lat_value = self.lat_values[idx]
return sample, lat_value
class CelebA(DisentangledDataset):
"""CelebA Dataset from [1].
CelebFaces Attributes Dataset (CelebA) is a large-scale face attributes dataset
with more than 200K celebrity images, each with 40 attribute annotations.
The images in this dataset cover large pose variations and background clutter.
CelebA has large diversities, large quantities, and rich annotations, including
10,177 number of identities, and 202,599 number of face images.
Notes
-----
- Link : http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html
Parameters
----------
root : string
Root directory of dataset.
References
----------
[1] Liu, Z., Luo, P., Wang, X., & Tang, X. (2015). Deep learning face
attributes in the wild. In Proceedings of the IEEE international conference
on computer vision (pp. 3730-3738).
"""
urls = {"train": "https://s3-us-west-1.amazonaws.com/udacity-dlnfd/datasets/celeba.zip"}
files = {"train": "img_align_celeba"}
img_size = (3, 64, 64)
background_color = COLOUR_WHITE
def __init__(self, root=os.path.join(DIR, '../data/celeba'), **kwargs):
super().__init__(root, [transforms.ToTensor()], **kwargs)
self.imgs = glob.glob(self.train_data + '/*')
def download(self):
"""Download the dataset."""
save_path = os.path.join(self.root, 'celeba.zip')
os.makedirs(self.root)
subprocess.check_call(["curl", "-L", type(self).urls["train"],
"--output", save_path])
hash_code = '00d2c5bc6d35e252742224ab0c1e8fcb'
assert hashlib.md5(open(save_path, 'rb').read()).hexdigest() == hash_code, \
'{} file is corrupted. Remove the file and try again.'.format(save_path)
with zipfile.ZipFile(save_path) as zf:
self.logger.info("Extracting CelebA ...")
zf.extractall(self.root)
os.remove(save_path)
self.logger.info("Resizing CelebA ...")
preprocess(self.train_data, size=type(self).img_size[1:])
def __getitem__(self, idx):
"""Get the image of `idx`
Return
------
sample : torch.Tensor
Tensor in [0.,1.] of shape `img_size`.
placeholder :
Placeholder value as their are no targets.
"""
img_path = self.imgs[idx]
# img values already between 0 and 255
img = imread(img_path)
# put each pixel in [0.,1.] and reshape to (C x H x W)
img = self.transforms(img)
# no label so return 0 (note that can't return None because)
# dataloaders requires so
return img, 0
class Chairs(datasets.ImageFolder):
"""Chairs Dataset from [1].
Notes
-----
- Link : https://www.di.ens.fr/willow/research/seeing3Dchairs
Parameters
----------
root : string
Root directory of dataset.
References
----------
[1] Aubry, M., Maturana, D., Efros, A. A., Russell, B. C., & Sivic, J. (2014).
Seeing 3d chairs: exemplar part-based 2d-3d alignment using a large dataset
of cad models. In Proceedings of the IEEE conference on computer vision
and pattern recognition (pp. 3762-3769).
"""
urls = {"train": "https://www.di.ens.fr/willow/research/seeing3Dchairs/data/rendered_chairs.tar"}
files = {"train": "chairs_64"}
img_size = (1, 64, 64)
background_color = COLOUR_WHITE
def __init__(self, train=True, root=os.path.join(DIR, '../data/chairs'),
logger=logging.getLogger(__name__)):
self.root = root
self.train_data = os.path.join(root, type(self).files["train"])
self.transforms = transforms.Compose([transforms.Grayscale(),
transforms.ToTensor()])
self.logger = logger
if not os.path.isdir(root):
self.logger.info("Downloading {} ...".format(str(type(self))))
self.download()
self.logger.info("Finished Downloading.")
super().__init__(self.train_data, transform=self.transforms)
def download(self):
"""Download the dataset."""
save_path = os.path.join(self.root, 'chairs.tar')
os.makedirs(self.root)
subprocess.check_call(["curl", type(self).urls["train"],
"--output", save_path])
self.logger.info("Extracting Chairs ...")
tar = tarfile.open(save_path)
tar.extractall(self.root)
tar.close()
os.rename(os.path.join(self.root, 'rendered_chairs'), self.train_data)
os.remove(save_path)
self.logger.info("Preprocessing Chairs ...")
preprocess(os.path.join(self.train_data, '*/*'), # root/*/*/*.png structure
size=type(self).img_size[1:],
center_crop=(400, 400))
class MNIST(datasets.MNIST):
"""Mnist wrapper. Docs: `datasets.MNIST.`"""
img_size = (1, 32, 32)
background_color = COLOUR_BLACK
def __init__(self, train=True, root=os.path.join(DIR, '../data/mnist'), **kwargs):
super().__init__(root,
train=train,
download=True,
transform=transforms.Compose([
transforms.Pad(2),
transforms.ToTensor()
]))
class FashionMNIST(datasets.FashionMNIST):
"""Fashion Mnist wrapper. Docs: `datasets.FashionMNIST.`"""
img_size = (1, 32, 32)
background_color = COLOUR_BLACK
def __init__(self, train=True,
root=os.path.join(DIR, '../data/fashionMnist'), **kwargs):
super().__init__(root,
train=train,
download=True,
transform=transforms.Compose([
transforms.Pad(2),
transforms.ToTensor()
]))
class NoisyMNIST(Dataset):
"""Noisy MNIST wrapper."""
img_size = (1, 32, 32)
background_color = COLOUR_BLACK
def __init__(self, train=True, noise=None,
root=os.path.join(DIR, '../data/mnist'), **kwargs):
super().__init__()
if train:
mnist_data = torch.load(
os.path.join(root, 'MNIST', 'processed', 'training.pt'))
else:
mnist_data = torch.load(
os.path.join(root, 'MNIST', 'processed', 'test.pt'))
self.x = mnist_data[0]
self.mnist_transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.Pad(2),
transforms.ToTensor()
])
if noise is not None:
self.add_noise = AddGaussianNoise(mean=0.0, std=noise)
self.noise = noise
self.len = len(self.x)
def __len__(self):
return self.len
def __getitem__(self, idx):
input = self.mnist_transforms(self.x[idx:idx+1])
if self.noise is not None:
input = self.add_noise(input)
output = self.mnist_transforms(self.x[idx:idx+1])
return input, output
class BinarizedMNIST(Dataset):
""" Binarized MNIST dataset, proposed in
http://proceedings.mlr.press/v15/larochelle11a/larochelle11a.pdf """
train_file = 'binarized_mnist_train.amat'
val_file = 'binarized_mnist_valid.amat'
test_file = 'binarized_mnist_test.amat'
img_size = (1, 32, 32)
background_color = COLOUR_BLACK
def __init__(self, train=True, root=os.path.join(DIR, '../data/bmnist'),
logger=logging.getLogger(__name__)):
# we ignore transform.
self.root = root
self.train = train # training set or test set
if not self._check_exists():
self.download()
self.data = self._get_data(train=train)
self.mnist_transforms = transforms.Compose([
transforms.Pad(2),
transforms.ToTensor()
])
def __getitem__(self, index):
img = self.data[index]
img = Image.fromarray(img)
img = self.mnist_transforms(img)
# img = transforms.Pad(2)(transforms.ToTensor()(img)).type(torch.FloatTensor)
return img.float(), torch.tensor(-1) # Meaningless tensor instead of target
def __len__(self):
return len(self.data)
def _get_data(self, train=True):
with h5py.File(os.path.join(self.root, 'data.h5'), 'r') as hf:
data = hf.get('train' if train else 'test')
data = np.array(data)
return data
def get_mean_img(self):
return self.data.mean(0).flatten()
def download(self):
if self._check_exists():
return
if not os.path.exists(self.root):
os.makedirs(self.root)
print('Downloading MNIST with fixed binarization...')
for dataset in ['train', 'valid', 'test']:
filename = 'binarized_mnist_{}.amat'.format(dataset)
url = 'http://www.cs.toronto.edu/~larocheh/public/datasets/binarized_mnist/binarized_mnist_{}.amat'.format(dataset)
print('Downloading from {}...'.format(url))
local_filename = os.path.join(self.root, filename)
urllib.request.urlretrieve(url, local_filename)
print('Saved to {}'.format(local_filename))
def filename_to_np(filename):
with open(filename) as f:
lines = f.readlines()
return np.array([[int(i)for i in line.split()] for line in lines]).astype('int8')
train_data = np.concatenate([filename_to_np(os.path.join(self.root, self.train_file)),
filename_to_np(os.path.join(self.root, self.val_file))])
test_data = filename_to_np(os.path.join(self.root, self.val_file))
with h5py.File(os.path.join(self.root, 'data.h5'), 'w') as hf:
hf.create_dataset('train', data=train_data.reshape(-1, 28, 28))
hf.create_dataset('test', data=test_data.reshape(-1, 28, 28))
print('Done!')
def _check_exists(self):
return os.path.exists(os.path.join(self.root, 'data.h5'))
# HELPERS
def preprocess(root, size=(64, 64), img_format='JPEG', center_crop=None):
"""Preprocess a folder of images.
Parameters
----------
root : string
Root directory of all images.
size : tuple of int
Size (width, height) to rescale the images. If `None` don't rescale.
img_format : string
Format to save the image in. Possible formats:
https://pillow.readthedocs.io/en/3.1.x/handbook/image-file-formats.html.
center_crop : tuple of int
Size (width, height) to center-crop the images. If `None` don't center-crop.
"""
imgs = []
for ext in [".png", ".jpg", ".jpeg"]:
imgs += glob.glob(os.path.join(root, '*' + ext))
for img_path in tqdm(imgs):
img = Image.open(img_path)
width, height = img.size
if size is not None and width != size[1] or height != size[0]:
img = img.resize(size, Image.ANTIALIAS)
if center_crop is not None:
new_width, new_height = center_crop
left = (width - new_width) // 2
top = (height - new_height) // 2
right = (width + new_width) // 2
bottom = (height + new_height) // 2
img.crop((left, top, right, bottom))
img.save(img_path, img_format)
class AddGaussianNoise(object):
def __init__(self, mean=0.0, std=1.0):
self.std = std
self.mean = mean
def __call__(self, tensor):
return tensor + torch.randn(tensor.size()) * self.std + self.mean
def __repr__(self):
return self.__class__.__name__ + f'(mean={self.mean}, std={self.std})'
|
py | b41364760f9d9a282bc2e33711fee428e445beb7 | """
The main purpose of this module is to expose LinkCollector.collect_links().
"""
import cgi
import functools
import itertools
import logging
import mimetypes
import os
import re
from collections import OrderedDict
from pip._vendor import html5lib, requests
from pip._vendor.distlib.compat import unescape
from pip._vendor.requests.exceptions import RetryError, SSLError
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip._internal.exceptions import NetworkConnectionError
from pip._internal.models.link import Link
from pip._internal.models.search_scope import SearchScope
from pip._internal.network.utils import raise_for_status
from pip._internal.utils.compat import lru_cache
from pip._internal.utils.filetypes import is_archive_file
from pip._internal.utils.misc import pairwise, redact_auth_from_url
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.utils.urls import path_to_url, url_to_path
from pip._internal.vcs import is_url, vcs
if MYPY_CHECK_RUNNING:
import xml.etree.ElementTree
from optparse import Values
from typing import (
Callable,
Iterable,
List,
MutableMapping,
Optional,
Sequence,
Tuple,
Union,
)
from pip._vendor.requests import Response
from pip._internal.network.session import PipSession
HTMLElement = xml.etree.ElementTree.Element
ResponseHeaders = MutableMapping[str, str]
logger = logging.getLogger(__name__)
def _match_vcs_scheme(url):
# type: (str) -> Optional[str]
"""Look for VCS schemes in the URL.
Returns the matched VCS scheme, or None if there's no match.
"""
for scheme in vcs.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
return scheme
return None
class _NotHTML(Exception):
def __init__(self, content_type, request_desc):
# type: (str, str) -> None
super(_NotHTML, self).__init__(content_type, request_desc)
self.content_type = content_type
self.request_desc = request_desc
def _ensure_html_header(response):
# type: (Response) -> None
"""Check the Content-Type header to ensure the response contains HTML.
Raises `_NotHTML` if the content type is not text/html.
"""
content_type = response.headers.get("Content-Type", "")
if not content_type.lower().startswith("text/html"):
raise _NotHTML(content_type, response.request.method)
class _NotHTTP(Exception):
pass
def _ensure_html_response(url, session):
# type: (str, PipSession) -> None
"""Send a HEAD request to the URL, and ensure the response contains HTML.
Raises `_NotHTTP` if the URL is not available for a HEAD request, or
`_NotHTML` if the content type is not text/html.
"""
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in {'http', 'https'}:
raise _NotHTTP()
resp = session.head(url, allow_redirects=True)
raise_for_status(resp)
_ensure_html_header(resp)
def _get_html_response(url, session):
# type: (str, PipSession) -> Response
"""Access an HTML page with GET, and return the response.
This consists of three parts:
1. If the URL looks suspiciously like an archive, send a HEAD first to
check the Content-Type is HTML, to avoid downloading a large file.
Raise `_NotHTTP` if the content type cannot be determined, or
`_NotHTML` if it is not HTML.
2. Actually perform the request. Raise HTTP exceptions on network failures.
3. Check the Content-Type header to make sure we got HTML, and raise
`_NotHTML` otherwise.
"""
if is_archive_file(Link(url).filename):
_ensure_html_response(url, session=session)
logger.debug('Getting page %s', redact_auth_from_url(url))
resp = session.get(
url,
headers={
"Accept": "text/html",
# We don't want to blindly returned cached data for
# /simple/, because authors generally expecting that
# twine upload && pip install will function, but if
# they've done a pip install in the last ~10 minutes
# it won't. Thus by setting this to zero we will not
# blindly use any cached data, however the benefit of
# using max-age=0 instead of no-cache, is that we will
# still support conditional requests, so we will still
# minimize traffic sent in cases where the page hasn't
# changed at all, we will just always incur the round
# trip for the conditional GET now instead of only
# once per 10 minutes.
# For more information, please see pypa/pip#5670.
"Cache-Control": "max-age=0",
},
)
raise_for_status(resp)
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
_ensure_html_header(resp)
return resp
def _get_encoding_from_headers(headers):
# type: (ResponseHeaders) -> Optional[str]
"""Determine if we have any encoding information in our headers.
"""
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
return params['charset']
return None
def _determine_base_url(document, page_url):
# type: (HTMLElement, str) -> str
"""Determine the HTML document's base URL.
This looks for a ``<base>`` tag in the HTML document. If present, its href
attribute denotes the base URL of anchor tags in the document. If there is
no such tag (or if it does not have a valid href attribute), the HTML
file's URL is used as the base URL.
:param document: An HTML document representation. The current
implementation expects the result of ``html5lib.parse()``.
:param page_url: The URL of the HTML document.
"""
for base in document.findall(".//base"):
href = base.get("href")
if href is not None:
return href
return page_url
def _clean_url_path_part(part):
# type: (str) -> str
"""
Clean a "part" of a URL path (i.e. after splitting on "@" characters).
"""
# We unquote prior to quoting to make sure nothing is double quoted.
return urllib_parse.quote(urllib_parse.unquote(part))
def _clean_file_url_path(part):
# type: (str) -> str
"""
Clean the first part of a URL path that corresponds to a local
filesystem path (i.e. the first part after splitting on "@" characters).
"""
# We unquote prior to quoting to make sure nothing is double quoted.
# Also, on Windows the path part might contain a drive letter which
# should not be quoted. On Linux where drive letters do not
# exist, the colon should be quoted. We rely on urllib.request
# to do the right thing here.
return urllib_request.pathname2url(urllib_request.url2pathname(part))
# percent-encoded: /
_reserved_chars_re = re.compile('(@|%2F)', re.IGNORECASE)
def _clean_url_path(path, is_local_path):
# type: (str, bool) -> str
"""
Clean the path portion of a URL.
"""
if is_local_path:
clean_func = _clean_file_url_path
else:
clean_func = _clean_url_path_part
# Split on the reserved characters prior to cleaning so that
# revision strings in VCS URLs are properly preserved.
parts = _reserved_chars_re.split(path)
cleaned_parts = []
for to_clean, reserved in pairwise(itertools.chain(parts, [''])):
cleaned_parts.append(clean_func(to_clean))
# Normalize %xx escapes (e.g. %2f -> %2F)
cleaned_parts.append(reserved.upper())
return ''.join(cleaned_parts)
def _clean_link(url):
# type: (str) -> str
"""
Make sure a link is fully quoted.
For example, if ' ' occurs in the URL, it will be replaced with "%20",
and without double-quoting other characters.
"""
# Split the URL into parts according to the general structure
# `scheme://netloc/path;parameters?query#fragment`.
result = urllib_parse.urlparse(url)
# If the netloc is empty, then the URL refers to a local filesystem path.
is_local_path = not result.netloc
path = _clean_url_path(result.path, is_local_path=is_local_path)
return urllib_parse.urlunparse(result._replace(path=path))
def _create_link_from_element(
anchor, # type: HTMLElement
page_url, # type: str
base_url, # type: str
):
# type: (...) -> Optional[Link]
"""
Convert an anchor element in a simple repository page to a Link.
"""
href = anchor.get("href")
if not href:
return None
url = _clean_link(urllib_parse.urljoin(base_url, href))
pyrequire = anchor.get('data-requires-python')
pyrequire = unescape(pyrequire) if pyrequire else None
yanked_reason = anchor.get('data-yanked')
if yanked_reason:
# This is a unicode string in Python 2 (and 3).
yanked_reason = unescape(yanked_reason)
link = Link(
url,
comes_from=page_url,
requires_python=pyrequire,
yanked_reason=yanked_reason,
)
return link
class CacheablePageContent(object):
def __init__(self, page):
# type: (HTMLPage) -> None
assert page.cache_link_parsing
self.page = page
def __eq__(self, other):
# type: (object) -> bool
return (isinstance(other, type(self)) and
self.page.url == other.page.url)
def __hash__(self):
# type: () -> int
return hash(self.page.url)
def with_cached_html_pages(
fn, # type: Callable[[HTMLPage], Iterable[Link]]
):
# type: (...) -> Callable[[HTMLPage], List[Link]]
"""
Given a function that parses an Iterable[Link] from an HTMLPage, cache the
function's result (keyed by CacheablePageContent), unless the HTMLPage
`page` has `page.cache_link_parsing == False`.
"""
@lru_cache(maxsize=None)
def wrapper(cacheable_page):
# type: (CacheablePageContent) -> List[Link]
return list(fn(cacheable_page.page))
@functools.wraps(fn)
def wrapper_wrapper(page):
# type: (HTMLPage) -> List[Link]
if page.cache_link_parsing:
return wrapper(CacheablePageContent(page))
return list(fn(page))
return wrapper_wrapper
@with_cached_html_pages
def parse_links(page):
# type: (HTMLPage) -> Iterable[Link]
"""
Parse an HTML document, and yield its anchor elements as Link objects.
"""
document = html5lib.parse(
page.content,
transport_encoding=page.encoding,
namespaceHTMLElements=False,
)
url = page.url
base_url = _determine_base_url(document, url)
for anchor in document.findall(".//a"):
link = _create_link_from_element(
anchor,
page_url=url,
base_url=base_url,
)
if link is None:
continue
yield link
class HTMLPage(object):
"""Represents one page, along with its URL"""
def __init__(
self,
content, # type: bytes
encoding, # type: Optional[str]
url, # type: str
cache_link_parsing=True, # type: bool
):
# type: (...) -> None
"""
:param encoding: the encoding to decode the given content.
:param url: the URL from which the HTML was downloaded.
:param cache_link_parsing: whether links parsed from this page's url
should be cached. PyPI index urls should
have this set to False, for example.
"""
self.content = content
self.encoding = encoding
self.url = url
self.cache_link_parsing = cache_link_parsing
def __str__(self):
# type: () -> str
return redact_auth_from_url(self.url)
def _handle_get_page_fail(
link, # type: Link
reason, # type: Union[str, Exception]
meth=None # type: Optional[Callable[..., None]]
):
# type: (...) -> None
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
def _make_html_page(response, cache_link_parsing=True):
# type: (Response, bool) -> HTMLPage
encoding = _get_encoding_from_headers(response.headers)
return HTMLPage(
response.content,
encoding=encoding,
url=response.url,
cache_link_parsing=cache_link_parsing)
def _get_html_page(link, session=None):
# type: (Link, Optional[PipSession]) -> Optional[HTMLPage]
if session is None:
raise TypeError(
"_get_html_page() missing 1 required keyword argument: 'session'"
)
url = link.url.split('#', 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
vcs_scheme = _match_vcs_scheme(url)
if vcs_scheme:
logger.warning('Cannot look at %s URL %s because it does not support '
'lookup as web pages.', vcs_scheme, link)
return None
# Tack index.html onto file:// URLs that point to directories
scheme, _, path, _, _, _ = urllib_parse.urlparse(url)
if (scheme == 'file' and os.path.isdir(urllib_request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urllib_parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
try:
resp = _get_html_response(url, session=session)
except _NotHTTP:
logger.warning(
'Skipping page %s because it looks like an archive, and cannot '
'be checked by a HTTP HEAD request.', link,
)
except _NotHTML as exc:
logger.warning(
'Skipping page %s because the %s request got Content-Type: %s.'
'The only supported Content-Type is text/html',
link, exc.request_desc, exc.content_type,
)
except NetworkConnectionError as exc:
_handle_get_page_fail(link, exc)
except RetryError as exc:
_handle_get_page_fail(link, exc)
except SSLError as exc:
reason = "There was a problem confirming the ssl certificate: "
reason += str(exc)
_handle_get_page_fail(link, reason, meth=logger.info)
except requests.ConnectionError as exc:
_handle_get_page_fail(link, "connection error: {}".format(exc))
except requests.Timeout:
_handle_get_page_fail(link, "timed out")
else:
return _make_html_page(resp,
cache_link_parsing=link.cache_link_parsing)
return None
def _remove_duplicate_links(links):
# type: (Iterable[Link]) -> List[Link]
"""
Return a list of links, with duplicates removed and ordering preserved.
"""
# We preserve the ordering when removing duplicates because we can.
return list(OrderedDict.fromkeys(links))
def group_locations(locations, expand_dir=False):
# type: (Sequence[str], bool) -> Tuple[List[str], List[str]]
"""
Divide a list of locations into two groups: "files" (archives) and "urls."
:return: A pair of lists (files, urls).
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
# type: (str) -> None
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
else:
logger.warning(
"Path '%s' is ignored: it is a directory.", path,
)
elif os.path.isfile(path):
sort_path(path)
else:
logger.warning(
"Url '%s' is ignored: it is neither a file "
"nor a directory.", url,
)
elif is_url(url):
# Only add url with clear scheme
urls.append(url)
else:
logger.warning(
"Url '%s' is ignored. It is either a non-existing "
"path or lacks a specific scheme.", url,
)
return files, urls
class CollectedLinks(object):
"""
Encapsulates the return value of a call to LinkCollector.collect_links().
The return value includes both URLs to project pages containing package
links, as well as individual package Link objects collected from other
sources.
This info is stored separately as:
(1) links from the configured file locations,
(2) links from the configured find_links, and
(3) urls to HTML project pages, as described by the PEP 503 simple
repository API.
"""
def __init__(
self,
files, # type: List[Link]
find_links, # type: List[Link]
project_urls, # type: List[Link]
):
# type: (...) -> None
"""
:param files: Links from file locations.
:param find_links: Links from find_links.
:param project_urls: URLs to HTML project pages, as described by
the PEP 503 simple repository API.
"""
self.files = files
self.find_links = find_links
self.project_urls = project_urls
class LinkCollector(object):
"""
Responsible for collecting Link objects from all configured locations,
making network requests as needed.
The class's main method is its collect_links() method.
"""
def __init__(
self,
session, # type: PipSession
search_scope, # type: SearchScope
):
# type: (...) -> None
self.search_scope = search_scope
self.session = session
@classmethod
def create(cls, session, options, suppress_no_index=False):
# type: (PipSession, Values, bool) -> LinkCollector
"""
:param session: The Session to use to make requests.
:param suppress_no_index: Whether to ignore the --no-index option
when constructing the SearchScope object.
"""
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index and not suppress_no_index:
logger.debug(
'Ignoring indexes: %s',
','.join(redact_auth_from_url(url) for url in index_urls),
)
index_urls = []
# Make sure find_links is a list before passing to create().
find_links = options.find_links or []
search_scope = SearchScope.create(
find_links=find_links, index_urls=index_urls,
)
link_collector = LinkCollector(
session=session, search_scope=search_scope,
)
return link_collector
@property
def find_links(self):
# type: () -> List[str]
return self.search_scope.find_links
def fetch_page(self, location):
# type: (Link) -> Optional[HTMLPage]
"""
Fetch an HTML page containing package links.
"""
return _get_html_page(location, session=self.session)
def collect_links(self, project_name):
# type: (str) -> CollectedLinks
"""Find all available links for the given project name.
:return: All the Link objects (unfiltered), as a CollectedLinks object.
"""
search_scope = self.search_scope
index_locations = search_scope.get_index_urls_locations(project_name)
index_file_loc, index_url_loc = group_locations(index_locations)
fl_file_loc, fl_url_loc = group_locations(
self.find_links, expand_dir=True,
)
file_links = [
Link(url) for url in itertools.chain(index_file_loc, fl_file_loc)
]
# We trust every directly linked archive in find_links
find_link_links = [Link(url, '-f') for url in self.find_links]
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links.
# We want to filter out anything that does not have a secure origin.
url_locations = [
link for link in itertools.chain(
# Mark PyPI indices as "cache_link_parsing == False" -- this
# will avoid caching the result of parsing the page for links.
(Link(url, cache_link_parsing=False) for url in index_url_loc),
(Link(url) for url in fl_url_loc),
)
if self.session.is_secure_origin(link)
]
url_locations = _remove_duplicate_links(url_locations)
lines = [
'{} location(s) to search for versions of {}:'.format(
len(url_locations), project_name,
),
]
for link in url_locations:
lines.append('* {}'.format(link))
logger.debug('\n'.join(lines))
return CollectedLinks(
files=file_links,
find_links=find_link_links,
project_urls=url_locations,
)
|
py | b413655597771b80dee957d84367f3baf7316af8 | #!/usr/bin/python
#####################################################################
# This script presents different formats of the screen buffer.
# OpenCV is used here to display images, install it or remove any
# references to cv2
# Configuration is loaded from "../../examples/config/basic.cfg" file.
# <episodes> number of episodes are played.
# Random combination of buttons is chosen for every action.
# Game variables from state and last reward are printed.
# To see the scenario description go to "../../scenarios/README.md"
#
#####################################################################
from __future__ import print_function
from vizdoom import *
from time import sleep
from time import time
from random import choice
import cv2
game = DoomGame()
# Use other config file if you wish.
game.load_config("../../examples/config/basic.cfg")
#game.set_window_visible(False)
# Just umcomment desired format. The last uncommented will be applied.
# Formats with C were ommited cause they are not cv2 friendly
#game.set_screen_format(ScreenFormat.RGB24)
#game.set_screen_format(ScreenFormat.ARGB32)
#game.set_screen_format(ScreenFormat.GRAY8)
# This is most fun. It looks best if you inverse colors.
game.set_screen_format(ScreenFormat.DEPTH_BUFFER8)
#These formats can be use bet they do not make much sense for cv2, you'll just get mixed up colors.
#game.set_screen_format(ScreenFormat.BGR24)
#game.set_screen_format(ScreenFormat.RGBA32)
#game.set_screen_format(ScreenFormat.BGRA32)
#game.set_screen_format(ScreenFormat.ABGR32)
#This one makes no sense in particular
#game.set_screen_format(ScreenFormat.DOOM_256_COLORS)
game.set_screen_resolution(ScreenResolution.RES_640X480)
game.init()
actions = [[True,False,False],[False,True,False],[False,False,True]]
episodes = 10
# sleep time in ms
sleep_time = 20
for i in range(episodes):
print("Episode #" +str(i+1))
# Not needed for the first episdoe but the loop is nicer.
game.new_episode()
while not game.is_episode_finished():
# Gets the state and possibly to something with it
s = game.get_state()
img = s.image_buffer
misc = s.game_variables
# Gray8 shape is not cv2 compliant
if game.get_screen_format() in [ScreenFormat.GRAY8, ScreenFormat.DEPTH_BUFFER8]:
img = img.reshape(img.shape[1],img.shape[2],1)
# Display the image here!
cv2.imshow('Doom Buffer',img)
cv2.waitKey(sleep_time)
# Makes a random action and save the reward.
r = game.make_action(choice(actions))
print("State #" +str(s.number))
print("Game Variables:", misc)
print("Last Reward:",r)
print("=====================")
print("Episode finished!")
print("total reward:", game.get_total_reward())
print("************************")
cv2.destroyAllWindows()
|
py | b41365c60ed13ba9ee1f88cf829957906517eb44 | #TIPOS DE ATRIBUTO
#atributos de instancia criados normalmente dentro do __init__
#atributos de instancia criados dinamicamente e removidos com a palavra reservada del
#atributos de instancia criados na CLASSE , o valor sendo o mesmo para todos os objetos.
class Pessoa:
olhos = 2 # atributos de classe criado fora do dander init cria somente um atributo na memoria sem duplica-lo
def __init__(self,*filhos, nome = None, idade=35):
#cria atributos na memoria
self.idade = idade
self.nome = nome
self.filhos = list(filhos)
def cumprimentar(self): #Método
return f'Olá {id(self)}'
@staticmethod ##decorator #Metodos de Classe
def metodo_estatico():
return 42
@classmethod
#qdo quer acessar dados da propria classe
def nome_e_atributos_de_classe(cls):
return f'{cls} - olhos {cls.olhos}'
if __name__ == '__main__':
mauricio = Pessoa(nome='Mauricio')
luciano = Pessoa(mauricio,nome='Luciano')
print(Pessoa.cumprimentar(luciano))
print(id(luciano))
print(luciano.cumprimentar())
print(luciano.nome)
print(luciano.idade)
for filho in luciano.filhos:
print(filho.nome)
luciano.sobrenome = "Denari" # criando atributo dinamico
del luciano.filhos #remove o atributos dinamicamente - usar atributos dinamicos qdo uma data para uso um formato diferente só apresentação web.
luciano.olhos = 1
del luciano.olhos # aqui apaga o atributo do objeto luciano e não da classe, voltando ao buscar o valor do objeto na classe
print(luciano.__dict__) # dander dict atributos de instancia fica aqui
print(mauricio.__dict__) # não vao aparecer os atributos classe, apenas os atributos de instancia criados no __init__
Pessoa.olhos = 3 #mudando o atributo de classe olhos para 3, mas o objeto olhos no
print(Pessoa.olhos)
print(mauricio.olhos)
print(luciano.olhos)
##o valor eh o mesmo pois o atributo foi criado fora dander __init__
print(id(Pessoa.olhos), id(mauricio.olhos), id(luciano.olhos))
print(Pessoa.metodo_estatico(),luciano.metodo_estatico())
print(Pessoa.nome_e_atributos_de_classe(),luciano.nome_e_atributos_de_classe())
|
py | b41366d02a1a762ad6418f0805a0bfc1241fe32d | import spacy
import pytest
import numpy as np
from spacy.tokens import Doc
from spacy_readability import (
Readability,
_get_num_syllables,
)
@pytest.fixture(scope="function")
def nlp():
return spacy.load("en")
@pytest.fixture(scope="function")
def read():
np.random.seed(123)
pipeline = spacy.load("en")
return Readability(nlp=pipeline)
def test_simple(nlp):
doc = nlp("sample")
assert doc
def test_integration(nlp, read):
nlp.add_pipe(read, last=True)
assert "readability" == nlp.pipe_names[-1]
def test_sentences(nlp, read):
nlp.add_pipe(read, last=True)
doc = nlp("I am 2 sentences. I am the best panda?")
assert doc._.total_sentences == 2
def test_words(nlp, read):
nlp.add_pipe(read, last=True)
doc = nlp("I contain four words.")
assert doc._.total_words == 4
def test_syllables(nlp, read):
nlp.add_pipe(read, last=True)
doc = nlp("I contain four words.")
for token in doc:
print(token, token._.syllables_count)
assert doc._.total_syllables == 5
def test_extensions(nlp, read):
""" Values obtained by manual calculation.
"""
nlp.add_pipe(read, last=True)
doc = nlp("I contain four words. Therefore, it should be possible to calculate by hand.")
syllable_result = {
"i": 1,
"contain": 2,
"four": 1,
"words": 1,
"therefore": 2,
"it": 1,
"should": 1,
"be": 1,
"possible": 3,
"to": 1,
"calculate": 3,
"by": 1,
"hand": 1,
}
letter_result = {
"i": 1,
"contain": 7,
"four": 4,
"words": 5,
"therefore": 9,
"it": 2,
"should": 6,
"be": 2,
"possible": 8,
"to": 2,
"calculate": 9,
"by": 2,
"hand": 4,
}
assert Doc.has_extension("flesch_kincaid_grade_level")
assert Doc.has_extension("flesch_kincaid_reading_ease")
assert Doc.has_extension("dale_chall")
assert Doc.has_extension("smog")
assert Doc.has_extension("coleman_liau_index")
assert Doc.has_extension("automated_readability_index")
assert Doc.has_extension("forcast")
assert doc._.total_sentences == 2
assert doc._.total_words == 13
assert doc._.total_syllables == 19
assert doc._.total_letters == 61
assert syllable_result == {word.text.lower(): word._.syllables_count for word in doc if not word.is_punct and not word.is_digit}
assert letter_result == {word.text.lower(): word._.letters_count for word in doc if not word.is_punct and not word.is_digit}
# test extension values
assert pytest.approx(4.69, rel=1e-2) == doc._.total_letters / doc._.total_words
assert pytest.approx(1.46, rel=1e-2) == doc._.total_syllables / doc._.total_words
assert pytest.approx(6.5, rel=1e-2) == doc._.total_words / doc._.total_sentences
assert pytest.approx(4.19, rel=1e-2) == doc._.flesch_kincaid_grade_level
assert pytest.approx(7.22, rel=1e-2) == doc._.coleman_liau_index
assert pytest.approx(3.92, rel=1e-2) == doc._.automated_readability_index
assert doc._.smog == 0
|
py | b41367c8e02c1e837b44cb84f70dc2565d287ffc | import argparse
import sys
from .const import ARGS_PARSE_FILENAME_HELP, ARGS_PARSE_TEXT_HELP
def _get_argument_parser():
parser = argparse.ArgumentParser()
parser.add_argument(
'text',
nargs='?',
help=ARGS_PARSE_TEXT_HELP,
)
parser.add_argument(
'-f', '--filename',
type=argparse.FileType('r'),
dest='filename',
help=ARGS_PARSE_FILENAME_HELP,
)
return parser
def load_text():
parser = _get_argument_parser()
args = parser.parse_args()
text = ''
if args.filename is not None:
text = args.filename.read()
elif args.text is not None:
text = args.text
elif not sys.stdin.isatty():
for line in sys.stdin:
text += line
if not text:
parser.print_help()
exit(0)
return text
|
py | b41369d658c64381627158ef9d9b133de78c491c | # Copyright 2018 Davide Spadini and Arie van Deursen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pytest
from pydriller.git_repository import GitRepository
from pydriller.domain.commit import Commit, DMMProperty
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
@pytest.fixture()
def repo():
path = "test-repos/dmm-test-repo"
gr = GitRepository(path)
yield gr
gr.clear()
# List of (commit_message, dmm_value) pairs
#
# We use unit size to exercise all the various DMM cases
UNIT_SIZE_TEST_DATA = [
# delta high > 0, delta low = 0 -- always DMM 0.0
('Commit with one large method', 0.0),
# delta high > 0, delta low > 0 -- DMM = ratio
('Make large larger, add small method', 0.8),
# delta high > 0, delta low < 0 --- always DMM 0.0
('Make large larger, make small smaller', 0.0),
# delta high = 0, delta low = 0 --- no delta-changes, dmm None
('Modify every line in large method', None),
# delta high = 0, delta low > 0 --- always DMM 1.0
('Make small method a bit larger', 1.0),
# delta high = 0, delta low < 0 --- alwyas DMM 0.0
('Make small smaller', 0.0),
# delta high < 0, delta low < 0 --- DMM = ratio
('Make large smaller, make small smaller', 2/3),
# delta high < 0, delta low = 0 -- always 1.0
('Make large smaller', 1.0),
# delta high < 0, delta low > 0 -- always DMM 1.0
('Make large smaller, make small larger', 1.0),
# File 1: large larger; File 2: small larger -- dmm fraction
('Increase in one, decrease in other file', 3/4),
# Method with unit size exactly on the border
('Add method with unit size on-point', 1.0),
# Method with unit size at off point
('Increase unit size to risky', 0.0)
]
UNIT_COMPLEXITY_TEST_DATA = [
# Large method, but no conditional logic
('Commit with one large method', 1.0),
# Method with cyclomatic complexity exactly on the border
('Add method with complexity on-point', 1.0),
# Method with cyclomatic complexity at off point
('Increase complexity to risky', 0.0)
]
UNIT_INTERFACING_TEST_DATA = [
# Large method, but no parameters
('Commit with one large method', 1.0),
# Adjust method with nr of paramters exactly on the border, same size
('Add method with interfacing on-point', None),
# Method with nr of parameters at off point
('Increase interfacing to risky', 0.0)
]
def commit_by_msg(repo: GitRepository, msg: str) -> Commit:
for commit in repo.get_list_commits():
if commit.msg == msg:
return commit
raise Exception('cannot find commit with msg {}'.format(msg))
@pytest.mark.parametrize('msg,dmm', UNIT_SIZE_TEST_DATA)
def test_dmm_unit_size(repo: GitRepository, msg: str, dmm: float):
commit = commit_by_msg(repo, msg)
assert commit.dmm_unit_size[0] == dmm
@pytest.mark.parametrize('msg,dmm', UNIT_COMPLEXITY_TEST_DATA)
def test_dmm_unit_complexity(repo: GitRepository, msg: str, dmm: float):
commit = commit_by_msg(repo, msg)
assert commit.dmm_unit_complexity[0] == dmm
@pytest.mark.parametrize('msg,dmm', UNIT_INTERFACING_TEST_DATA)
def test_dmm_unit_interfacing(repo: GitRepository, msg: str, dmm: float):
commit = commit_by_msg(repo, msg)
assert commit.dmm_unit_interfacing[0] == dmm
def test_unsupported_language(repo: GitRepository):
# Add .md file that cannot be analyzed by Lizard
commit = commit_by_msg(repo, 'Offer README explaining the repo purpose')
assert commit.dmm_unit_size is None
def test_mixin_unsupported_language(repo: GitRepository):
# Add .txt file and update (comments in) .java files
commit = commit_by_msg(repo, 'Release under Apache 2 license')
assert commit.dmm_unit_size[0] is None
def test_delta_profile_modification(repo: GitRepository):
commit = commit_by_msg(repo, 'Increase unit size to risky')
mod = commit.modifications[0]
assert mod._delta_risk_profile(DMMProperty.UNIT_SIZE) == (-15, 16)
def test_delta_profile_commit(repo: GitRepository):
commit = commit_by_msg(repo, 'Increase in one, decrease in other file')
m0 = commit.modifications[0]
assert m0._delta_risk_profile(DMMProperty.UNIT_SIZE) == (0, 1)
m1 = commit.modifications[1]
assert m1._delta_risk_profile(DMMProperty.UNIT_SIZE) == (3, 0)
assert commit._delta_risk_profile(DMMProperty.UNIT_SIZE) == (3, 1)
def test_supported_languages(repo: GitRepository):
# Add .md file that cannot be analyzed by Lizard
commit = commit_by_msg(repo, 'Offer README explaining the repo purpose')
mod = commit.modifications[0]
assert not mod.language_supported
@pytest.mark.parametrize(
'dlo,dhi,prop', [
(0, 0, None),
(1, 0, 1.0),
(-1, 0, 0.0),
(0, 1, 0.0),
(0, -1, 1.0),
(1, 1, 0.5),
(-1, -1, 0.5),
(1, -1, 1.0),
(-1, 1, 0.0)
])
def test_good_proportion(dlo: int, dhi: int, prop: float):
(good_change, bad_change) = Commit._change_proportion(dlo, dhi)
assert Commit._delta_score(good_change, bad_change) == prop
|
py | b4136b110b44c02e05c31a883f36a39cda78b41d | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2020-02-14 15:18
from __future__ import unicode_literals
from django.db import migrations, models
import osf.models.validators
class Migration(migrations.Migration):
dependencies = [
('osf', '0199_draft_node_permissions'),
]
operations = [
migrations.AlterField(
model_name='draftregistration',
name='category',
field=models.CharField(blank=True, choices=[('analysis', 'Analysis'), ('communication', 'Communication'), ('data', 'Data'), ('hypothesis', 'Hypothesis'), ('instrumentation', 'Instrumentation'), ('methods and measures', 'Methods and Measures'), ('procedure', 'Procedure'), ('project', 'Project'), ('software', 'Software'), ('other', 'Other'), ('', 'Uncategorized')], default='', max_length=255),
),
migrations.AlterField(
model_name='draftregistration',
name='description',
field=models.TextField(blank=True, default=''),
),
migrations.AlterField(
model_name='draftregistration',
name='title',
field=models.TextField(blank=True, default='', validators=[osf.models.validators.validate_title]),
),
]
|
py | b4136b90c59ee0dd75fe864daf9eb1ae8e39a18f | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Aakvatech and Contributors
# See license.txt
from __future__ import unicode_literals
import unittest
class TestPropertyAmenity(unittest.TestCase):
pass
|
py | b4136be880fc8522afb565b6a6a67b8562ee2f82 | ########################################################################
import vtk
import dolfin as dolfin
import numpy as np
from vtk.util import numpy_support
########################################################################
def convertUGridToXMLMesh(ugrid):
num_pts = ugrid.GetNumberOfPoints()
num_cells = ugrid.GetNumberOfCells()
celltypes = numpy_support.vtk_to_numpy(ugrid.GetCellTypesArray())
num_tetra = np.count_nonzero(celltypes == 10)
print ("Number of points = ", num_pts)
print ("Number of tetra = ", num_tetra)
mesh = dolfin.Mesh()
editor = dolfin.MeshEditor()
# c_type = cell_type()
# c_str = c_type.type2string(c_type.cell_type())
editor.open(mesh,'tetrahedron',3, 3)#for python3
#editor.open(mesh,4, 3, 3) # top. and geom. dimension are both 2
# The following argument types are supported:
# 1. (self: dolfin.cpp.mesh.MeshEditor, mesh: dolfin.cpp.mesh.Mesh, type: str,tdim: int, gdim: int, degree: int=1) -> None
editor.init_vertices(num_pts) # number of vertices
editor.init_cells(num_tetra) # number of cells
for p in range(0, num_pts):
pt = ugrid.GetPoints().GetPoint(p)
editor.add_vertex(p, [pt[0], pt[1], pt[2]])
cnt = 0
for p in range(0, num_cells):
pts = vtk.vtkIdList()
ugrid.GetCellPoints(p, pts)
if(pts.GetNumberOfIds() == 4):
editor.add_cell(cnt, [pts.GetId(0), pts.GetId(1), pts.GetId(2), pts.GetId(3)])
cnt = cnt + 1
editor.close()
return mesh
|
py | b4136c04ad6dec8ea54bc8d26a47174ef03ceb07 | # Generated by Django 3.2.6 on 2022-01-08 19:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('bookmarks', '0011_userprofile_web_archive_integration'),
]
operations = [
migrations.CreateModel(
name='Toast',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('key', models.CharField(max_length=50)),
('message', models.TextField()),
('acknowledged', models.BooleanField(default=False)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
py | b4136c93f83d3696f3612439b66c68f5978badc2 | # @desc Evaluate the overengineered XOR gate. By Benji '24
def XOR(bool1, bool2):
value1 = bool1 or bool2
value2 = not (bool1 and bool2)
return value1 and value2
def main():
print(XOR(False, False))
print(XOR(False, True))
print(XOR(True, False))
print(XOR(True, True))
if __name__ == '__main__':
main()
|
py | b4136d1cb2b95becd4da0ae8713c52dbe67ac251 | # -*- coding: utf-8 -*-
# @Time : 2021/4/12 下午12:48
# @Author : anonymous
# @File : __init__.py.py
# @Software: PyCharm
# @Description:
|
py | b4136d4fb76f2642112c794b89fb47285e1c8641 | from django.contrib import admin
from .models import *
admin.site.register(Subject)
admin.site.register(Note)
admin.site.register(Grade) |
py | b4136d9e722a419f004d562f6c7fc2d2ff505695 | """Django settings for foodgram project."""
import os
from pathlib import Path
from django.core.management.utils import get_random_secret_key
from dotenv import load_dotenv
load_dotenv()
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = os.environ.get("SECRET_KEY", default=get_random_secret_key())
DEBUG = bool(int(os.environ.get("DEBUG") if os.environ.get("DEBUG") else "0"))
ALLOWED_HOSTS = [
"localhost",
"127.0.0.1",
"[::1]",
"food-gram.cf",
]
INSTALLED_APPS = [
"users",
"recipes.apps.RecipesConfig",
"api",
"about",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"sorl.thumbnail",
"debug_toolbar", # for django-debug-toolbar
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"debug_toolbar.middleware.DebugToolbarMiddleware", # for django-debug-toolbar
]
# for django-debug-toolbar
INTERNAL_IPS = [
"127.0.0.1",
]
ROOT_URLCONF = "foodgram.urls"
TEMPLATES_DIR = BASE_DIR / "templates"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [TEMPLATES_DIR],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "foodgram.wsgi.application"
DATABASES = {
"default": {
"ENGINE": os.environ.get("DB_ENGINE"),
"NAME": os.environ.get("POSTGRES_DB"),
"USER": os.environ.get("POSTGRES_USER"),
"PASSWORD": os.environ.get("POSTGRES_PASSWORD"),
"HOST": os.environ.get("DB_HOST"),
"PORT": os.environ.get("DB_PORT"),
}
}
# DATABASES = {
# "default": {
# "ENGINE": "django.db.backends.sqlite3",
# "NAME": BASE_DIR / "db.sqlite3",
# }
# }
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
},
{
"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
},
{
"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
},
{
"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
},
]
LANGUAGE_CODE = "ru"
TIME_ZONE = "Europe/Moscow"
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = "static/"
STATIC_ROOT = BASE_DIR / "static"
MEDIA_URL = "media/"
MEDIA_ROOT = BASE_DIR / "media"
LOGIN_URL = "/auth/login/"
LOGIN_REDIRECT_URL = "index"
# подключаем движок filebased.EmailBackend
EMAIL_BACKEND = "django.core.mail.backends.filebased.EmailBackend"
# указываем директорию, в которую будут складываться файлы писем
EMAIL_FILE_PATH = BASE_DIR / "sent_emails"
REST_FRAMEWORK = {
"DEFAULT_PERMISSION_CLASSES": [
"rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly"
]
}
AUTH_USER_MODEL = "users.RecipeUser"
SHOPLIST_SESSION_ID = "shoplist"
|
py | b4136ebb27119d9a9d91a9ad6d806469e86bd7cd | from models import UsedPort
import psutil
def get_used_ports():
all_conns = psutil.net_connections()
listening = [UsedPort.from_ps_util_sconn(conn)
for conn in all_conns
if conn.status == 'LISTEN' ]
return listening
|
py | b413702cb02a43045a3358cc6d0b39ac550d2a8b | from django.contrib.contenttypes.models import ContentType
from rest_framework.serializers import (
ModelSerializer,
ValidationError,
EmailField,
)
from coin.models import Coin
class CoinCreateSerializer(ModelSerializer):
class Meta:
model = Coin
fields = [
'name',
'ticker',
'price',
'btc_price',
]
def validate(self, data):
return data #TODO: build validation logic here
def create(self, validated_data):
return validated_data #TODO: build this validation
|
py | b41370b99e725245f70fff5110cd8ddd4f9d6f5f | # ! /usr/bin/python
# -*- coding: utf-8 -*-
# =============================================================================
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import pytest
from nemo.backends.pytorch.tutorials import MSELoss, RealFunctionDataLayer, TaylorNet
from nemo.core import NeuralGraph, OperationMode
@pytest.mark.usefixtures("neural_factory")
class TestNeuralGraphImportExport:
"""
Class testing Neural Graph configuration import/export.
"""
@pytest.mark.unit
def test_graph_simple_import_export(self, tmpdir):
"""
Tests whether the Neural Module can instantiate a simple module by loading a configuration file.
Args:
tmpdir: Fixture which will provide a temporary directory.
"""
# Instantiate the necessary neural modules.
dl = RealFunctionDataLayer(n=100, batch_size=1, name="tgio1_dl")
tn = TaylorNet(dim=4, name="tgio1_tn")
loss = MSELoss(name="tgio1_loss")
# Create the graph.
with NeuralGraph(operation_mode=OperationMode.training) as g1:
x, t = dl()
p = tn(x=x)
_ = loss(predictions=p, target=t)
# Serialize graph
serialized_g1 = g1.serialize()
# Generate filename in the temporary directory.
tmp_file_name = str(tmpdir.mkdir("export").join("simple_graph.yml"))
# Export graph to file.
g1.export_to_config(tmp_file_name)
# Create the second graph - import!
g2 = NeuralGraph.import_from_config(tmp_file_name, reuse_existing_modules=True)
serialized_g2 = g2.serialize()
# Must be the same.
assert serialized_g1 == serialized_g2
|
py | b413716e2969899d2cac670c89d115b3cb346e58 | from flask import Flask,render_template,request,redirect
import pickle
import numpy as np
from flask_mysqldb import MySQL
app=Flask(__name__)
model=pickle.load(open('model.pkl','rb'))
app.config['MYSQL_HOST'] = 'Localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = ''
app.config['MYSQL_DB'] = 'dataset'
mysql = MySQL(app)
@app.route('/')
def home():
return render_template('index.html')
@app.route('/Type_form', methods=['GET', 'POST'])
def Type_form():
return render_template('Type.html')
@app.route('/Health_form', methods=['GET', 'POST'])
def Health_form():
return render_template('Health.html')
@app.route('/Visualization_form', methods=['GET', 'POST'])
def Visualization_form():
return render_template('Visualization.html')
@app.route('/Qc_form', methods=['GET', 'POST'])
def Qc_form():
if request.method=='POST':
odor=int(request.values['odor'])
spore_print_color=int(request.values['spore-print-color'])
gill_color=int(request.values['gill-color'])
ring_type=int(request.values['ring-type'])
stalk_surface_above_ring=int(request.values['stalk-surface-above-ring'])
stalk_surface_below_ring=int(request.values['stalk-surface-below-ring'])
gill_size=int(request.values['gill-size'])
stalk_color_above_ring=int(request.values['stalk-color-above-ring'])
stalk_color_below_ring=int(request.values['stalk-color-below-ring'])
bruises=int(request.values['bruises'])
population=int(request.values['population'])
habitat=int(request.values['habitat'])
cur = mysql.connection.cursor()
cur.execute("INSERT INTO mushroomsdb (odor, spore_print_color, gill_color, ring_type, stalk_surface_above_ring, stalk_surface_below_ring, gill_size, stalk_color_above_ring, stalk_color_below_ring, bruises, population, habitat) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);",(odor, spore_print_color, gill_color, ring_type, stalk_surface_above_ring, stalk_surface_below_ring, gill_size, stalk_color_above_ring, stalk_color_below_ring, bruises, population, habitat))
mysql.connection.commit()
cur.close()
return redirect ('/predict')
return render_template('home.html')
@app.route('/values',methods=['GET','POST'])
def values():
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM mushroomsdb;")
details = cur.fetchall()
return render_template('values.html',details=details)
@app.route('/predict',methods=['GET','POST'])
def predict():
cur = mysql.connection.cursor()
cur.execute("SELECT odor, spore_print_color, gill_color, ring_type, stalk_surface_above_ring, stalk_surface_below_ring, gill_size, stalk_color_above_ring, stalk_color_below_ring, bruises, population, habitat FROM mushroomsdb WHERE id=(SELECT MAX(id) FROM mushroomsdb);")
to_predict_list = cur.fetchall()
result = model.predict(to_predict_list)
return render_template('result.html',prediction_text="Mushroom is {}".format(result), x=to_predict_list)
if __name__ == "__main__":
app.run() |
py | b413720e5bd2d0b9a18bf9e5f926800e611996a0 | #encoding=utf-8
import pygame, sys
pygame.init()
delay = 100
interval = 50
pygame.key.set_repeat(delay, interval)
screen = pygame.display.set_mode([640,480])
background = pygame.Surface(screen.get_size())
background.fill([255,255,255])
clock = pygame.time.Clock()
class Ball(pygame.sprite.Sprite):
def __init__(self, image_file, speed, location):
pygame.sprite.Sprite.__init__(self)# 初始化动画精灵
self.image = pygame.image.load(image_file)
self.rect = self.image.get_rect()# 得到定义图像边界的矩形
self.rect.left, self.rect.top = location#设置球的初始位置
self.speed = speed #增加speed属性
def move(self):
if self.rect.left <= screen.get_rect().left or \
self.rect.right >= screen.get_rect().right:
self.speed[0] = -self.speed[0]
newpos = self.rect.move(self.speed)
self.rect = newpos
my_ball = Ball('beach_ball.png', [10,0], [20, 20]) # 建立球的实例
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP:
my_ball.rect.top = my_ball.rect.top - 10
elif event.key == pygame.K_DOWN:
my_ball.rect.top = my_ball.rect.top + 10
clock.tick(30)
screen.blit(background, (0, 0))
my_ball.move()
screen.blit(my_ball.image, my_ball.rect)
pygame.display.flip() |
py | b4137273970c22848a0c0512a28877545623cc7a | from pathlib import Path
DATA_BASE_DIR = Path(__file__).parent.parent.parent
DATA_MEDIA_ROOT = 'information'
MAIN_ASSETS = (
'BCH',
'BTC',
'DOGE',
'ETH',
'LTC',
)
|
py | b41372905d5168b10a59df838096ae205cff1d2f | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding=utf-8
import os
import json
import time
import logging
from aliyunsdkcore.client import AcsClient
from aliyunsdkecs.request.v20140526 import DescribeInstanceAttributeRequest
from aliyunsdkecs.request.v20140526 import CreateInstanceRequest
from aliyunsdkecs.request.v20140526 import StartInstanceRequest
from aliyunsdkecs.request.v20140526 import StopInstanceRequest
from aliyunsdkecs.request.v20140526 import DeleteInstanceRequest
from aliyunsdkecs.request.v20140526 import DescribeInstancesRequest
logging.basicConfig(level=logging.DEBUG)
mylogger = logging.getLogger()
class TestEcsIntegration(object):
def test_ecs(self):
# init client
client = AcsClient(
os.environ['ACCESS_KEY_ID'],
os.environ['ACCESS_KEY_SECRET'],
"cn-hangzhou"
)
mylogger.info("Init client success")
# get demo instance attributes
image_id, security_group_id = TestEcsIntegration.get_demo_ecs_attributes(client)
# create
instance_id = TestEcsIntegration.create_instance(client, image_id, security_group_id)
# wait
TestEcsIntegration.wait_for_instance(client, instance_id, 'Stopped')
# start
TestEcsIntegration.start_instance(client, instance_id)
# wait
TestEcsIntegration.wait_for_instance(client, instance_id, 'Running')
# stop
TestEcsIntegration.stop_instance(client, instance_id)
# wait
TestEcsIntegration.wait_for_instance(client, instance_id, 'Stopped')
# delete
TestEcsIntegration.delete_instance(client, instance_id)
# wait
TestEcsIntegration.wait_for_instance(client, instance_id, 'Deleted')
# delete all test instances
TestEcsIntegration.delete_all_test_ecs_instance(client)
@staticmethod
def get_demo_ecs_attributes(client):
mylogger.info("trying to get demo instance attributes...", )
demo_instance_id = os.environ['DEMO_ECS_INSTANCE_ID']
request = DescribeInstanceAttributeRequest.DescribeInstanceAttributeRequest()
request.set_accept_format("JSON")
request.set_InstanceId(demo_instance_id)
content = client.do_action_with_exception(request)
response = json.loads(content.decode('utf-8'))
mylogger.info("success")
return response.get('ImageId'), response.get('SecurityGroupIds').get('SecurityGroupId')[0]
@staticmethod
def create_instance(client, image_id, security_group_id):
mylogger.info("trying to create instance...", )
request = CreateInstanceRequest.CreateInstanceRequest()
request.set_accept_format("JSON")
request.set_ImageId(image_id)
request.set_InstanceName('SdkIntegrationTestInstance' + str(int(time.time())))
request.set_SecurityGroupId(security_group_id)
request.set_InstanceType('ecs.t1.small')
content = client.do_action_with_exception(request)
response = json.loads(content.decode('utf-8'))
mylogger.info("success")
return response.get('InstanceId')
@staticmethod
def start_instance(client, instance_id):
mylogger.info("trying to start instance...", )
request = StartInstanceRequest.StartInstanceRequest()
request.set_accept_format("JSON")
request.set_InstanceId(instance_id)
content = client.do_action_with_exception(request)
response = json.loads(content.decode('utf-8'))
mylogger.info("success")
return response.get('InstanceId')
@staticmethod
def stop_instance(client, instance_id):
mylogger.info("trying to stop instance...", )
request = StopInstanceRequest.StopInstanceRequest()
request.set_accept_format("JSON")
request.set_InstanceId(instance_id)
content = client.do_action_with_exception(request)
response = json.loads(content.decode('utf-8'))
mylogger.info("success")
return response.get('InstanceId')
@staticmethod
def delete_instance(client, instance_id):
mylogger.info("trying to delete instance...", )
request = DeleteInstanceRequest.DeleteInstanceRequest()
request.set_accept_format("JSON")
request.set_InstanceId(instance_id)
content = client.do_action_with_exception(request)
response = json.loads(content.decode('utf-8'))
mylogger.info("success")
return response.get('InstanceId')
@staticmethod
def wait_for_instance(client, instance_id, target_status):
while True:
request = DescribeInstanceAttributeRequest.DescribeInstanceAttributeRequest()
request.set_InstanceId(instance_id)
request.set_accept_format("JSON")
code, headers, body = client.get_response(request)
if target_status == 'Deleted' and code == 404:
mylogger.info("delete ecs instance(%s) success" % instance_id)
break
if code != 200:
mylogger.error("Failed to describe ecs instance, statusCode=%s, message=%s" % (status, body))
break
status = json.loads(body.decode('utf-8')).get('Status')
if status == target_status:
mylogger.info(
"ecs instance(%s) status has changed to %s, wait 20 seconds" % (instance_id, target_status))
time.sleep(20)
break
else:
mylogger.info(
"ecs instance(%s) status is %s, wait for changing to %s" % (instance_id, status, target_status))
time.sleep(10)
@staticmethod
def delete_all_test_ecs_instance(client):
mylogger.info("list all ecs instances")
request = DescribeInstancesRequest.DescribeInstancesRequest()
request.set_PageNumber(1)
request.set_PageSize(30)
content = client.do_action_with_exception(request)
response = json.loads(content.decode('utf-8'))
mylogger.info("success! TotalCount = %s", response.get('TotalCount'))
instances = response.get('Instances').get('Instance')
for instance in instances:
instance_name = instance.get('InstanceName')
if instance_name.startswith('SdkIntegrationTestInstance'):
create_time = int(instance_name[26:len(instance_name)])
current_time = int(time.time())
if create_time - current_time < 3600:
mylogger.info("found undeleted ecs instance(%s) but created in 60 minutes, try to delete next time"
% instance_name)
else:
mylogger.info("found undeleted ecs instance(%s), status=%s, try to delete it."
% instance_name, instance['Status'])
if instance['Status'] == "Running":
# running -> stopped
TestEcsIntegration.stop_instance(client, instance['InstanceId'])
if instance['Status'] == "Stopped":
# stopped -> deleted
TestEcsIntegration.delete_instance(client, instance['InstanceId'])
# wait
TestEcsIntegration.wait_for_instance(client, instance['InstanceId'], 'Deleted')
|
py | b41372ec66ee7a897a9305389bbff0477d04ee0e | #!/usr/bin/python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader
from model import KGEModel
from dataloader import TrainDataset
from dataloader import BidirectionalOneShotIterator
def parse_args(args=None):
parser = argparse.ArgumentParser(
description='Training and Testing Knowledge Graph Embedding Models',
usage='train.py [<args>] [-h | --help]'
)
parser.add_argument('--cuda', action='store_true', help='use GPU')
parser.add_argument('--do_train', action='store_true')
parser.add_argument('--use_adadelta_optim', action='store_true')
parser.add_argument('--do_valid', action='store_true')
parser.add_argument('--do_test', action='store_true')
parser.add_argument('--evaluate_train', action='store_true', help='Evaluate on training data')
parser.add_argument('--countries', action='store_true', help='Use Countries S1/S2/S3 datasets')
parser.add_argument('--regions', type=int, nargs='+', default=None,
help='Region Id for Countries S1/S2/S3 datasets, DO NOT MANUALLY SET')
parser.add_argument('--data_path', type=str, default=None)
parser.add_argument('--model', default='TransE', type=str)
parser.add_argument('-de', '--double_entity_embedding', action='store_true')
parser.add_argument('-dr', '--double_relation_embedding', action='store_true')
parser.add_argument('--mde_score', action='store_true')
parser.add_argument('-gamma_1', '--gamma_1', default=2, type=int)
parser.add_argument('-gamma_2', '--gamma_2', default=2, type=int)
parser.add_argument('-beta_1', '--beta_1', default=1, type=int)
parser.add_argument('-beta_2', '--beta_2', default=1, type=int)
parser.add_argument('-n', '--negative_sample_size', default=128, type=int)
parser.add_argument('-d', '--hidden_dim', default=500, type=int)
parser.add_argument('-g', '--gamma', default=12.0, type=float)
parser.add_argument('-adv', '--negative_adversarial_sampling', action='store_true')
parser.add_argument('-a', '--adversarial_temperature', default=1.0, type=float)
parser.add_argument('-b', '--batch_size', default=1024, type=int)
parser.add_argument('-r', '--regularization', default=0.0, type=float)
parser.add_argument('--test_batch_size', default=4, type=int, help='valid/test batch size')
parser.add_argument('--uni_weight', action='store_true',
help='Otherwise use subsampling weighting like in word2vec')
parser.add_argument('-lr', '--learning_rate', default=0.0001, type=float)
parser.add_argument('-cpu', '--cpu_num', default=10, type=int)
parser.add_argument('-init', '--init_checkpoint', default=None, type=str)
parser.add_argument('-save', '--save_path', default=None, type=str)
parser.add_argument('--max_steps', default=100000, type=int)
parser.add_argument('--warm_up_steps', default=None, type=int)
parser.add_argument('--save_checkpoint_steps', default=10000, type=int)
parser.add_argument('--valid_steps', default=20, type=int)
parser.add_argument('--log_steps', default=100, type=int, help='train log every xx steps')
parser.add_argument('--test_log_steps', default=100, type=int, help='valid/test log every xx steps')
parser.add_argument('--nentity', type=int, default=0, help='DO NOT MANUALLY SET')
parser.add_argument('--nrelation', type=int, default=0, help='DO NOT MANUALLY SET')
return parser.parse_args(args)
def override_config(args):
'''
Override model and data configuration
'''
with open(os.path.join(args.init_checkpoint, 'config.json'), 'r') as fjson:
argparse_dict = json.load(fjson)
args.countries = argparse_dict['countries']
if args.data_path is None:
args.data_path = argparse_dict['data_path']
args.model = argparse_dict['model']
args.double_entity_embedding = argparse_dict['double_entity_embedding']
args.double_relation_embedding = argparse_dict['double_relation_embedding']
args.hidden_dim = argparse_dict['hidden_dim']
args.test_batch_size = argparse_dict['test_batch_size']
def save_model(model, optimizer, save_variable_list, args):
'''
Save the parameters of the model and the optimizer,
as well as some other variables such as step and learning_rate
'''
argparse_dict = vars(args)
with open(os.path.join(args.save_path, 'config.json'), 'w') as fjson:
json.dump(argparse_dict, fjson)
torch.save({
**save_variable_list,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict()},
os.path.join(args.save_path, 'checkpoint')
)
entity_embedding = model.entity_embedding.detach().cpu().numpy()
np.save(
os.path.join(args.save_path, 'entity_embedding'),
entity_embedding
)
relation_embedding = model.relation_embedding.detach().cpu().numpy()
print('relation_emb')
print(relation_embedding)
np.save(
os.path.join(args.save_path, 'relation_embedding'),
relation_embedding
)
def read_triple(file_path, entity2id, relation2id):
'''
Read triples and map them into ids.
'''
triples = []
with open(file_path) as fin:
for line in fin:
h, r, t = line.strip().split('\t')
triples.append((entity2id[h], relation2id[r], entity2id[t]))
return triples
def set_logger(args):
'''
Write logs to checkpoint and console
'''
if args.do_train:
log_file = os.path.join(args.save_path or args.init_checkpoint, 'train.log')
else:
log_file = os.path.join(args.save_path or args.init_checkpoint, 'test.log')
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S',
filename=log_file,
filemode='w'
)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
def log_metrics(mode, step, metrics):
'''
Print the evaluation logs
'''
if len(metrics) > 0 and mode == 'Test':
for metric in metrics:
logging.info('%s %s at step %d: %f' % (mode, metric, step, metrics[metric]))
#logging.info('%s %s at step %d: %f' % (mode, 'MR', step, metrics['MR']))
#logging.info('%s %s at step %d: %f' % (mode, 'MRR', step, metrics['MRR']))
#logging.info('%s %s at step %d: %f' % (mode, 'HITS@10', step, metrics['HITS@10']))
#logging.info('%s %s at step %d: %f' % (mode, 'HITS@3', step, metrics['HITS@3']))
#logging.info('%s %s at step %d: %f' % (mode, 'HITS@1', step, metrics['HITS@1']))
# logging.info('%s %s at step %d: %f' % (mode, 'AUC : ', step, metrics['auc']))
# logging.info('%s %s at step %d: %f' % (mode, 'AUC_PR : ', step, metrics['auc_pr']))
else:
for metric in metrics:
logging.info('%s %s at step %d: %f' % (mode, metric, step, metrics[metric]))
def main(args):
if (not args.do_train) and (not args.do_valid) and (not args.do_test):
raise ValueError('one of train/val/test mode must be choosed.')
if args.init_checkpoint:
override_config(args)
elif args.data_path is None:
raise ValueError('one of init_checkpoint/data_path must be choosed.')
if args.do_train and args.save_path is None:
raise ValueError('Where do you want to save your trained model?')
if args.save_path and not os.path.exists(args.save_path):
os.makedirs(args.save_path)
# Write logs to checkpoint and console
set_logger(args)
with open(os.path.join(args.data_path, 'entities.dict')) as fin:
entity2id = dict()
for line in fin:
eid, entity = line.strip().split('\t')
entity2id[entity] = int(eid)
with open(os.path.join(args.data_path, 'relations.dict')) as fin:
relation2id = dict()
for line in fin:
rid, relation = line.strip().split('\t')
relation2id[relation] = int(rid)
# Read regions for Countries S* datasets
if args.countries:
regions = list()
with open(os.path.join(args.data_path, 'regions.list')) as fin:
for line in fin:
region = line.strip()
regions.append(entity2id[region])
args.regions = regions
nentity = len(entity2id)
nrelation = len(relation2id)
#all entities
args.all_entities = list(entity2id.values())
args.nentity = nentity
args.nrelation = nrelation
print('negative_sample_size', args.negative_sample_size)
print('batch_size', args.batch_size)
print('dimension', args.hidden_dim)
logging.info('Model: %s' % args.model)
logging.info('Data Path: %s' % args.data_path)
logging.info('#entity: %d' % nentity)
logging.info('#relation: %d' % nrelation)
train_triples = read_triple(os.path.join(args.data_path, 'train.txt'), entity2id, relation2id)
logging.info('#train: %d' % len(train_triples))
valid_triples = read_triple(os.path.join(args.data_path, 'valid.txt'), entity2id, relation2id)
logging.info('#valid: %d' % len(valid_triples))
test_triples = read_triple(os.path.join(args.data_path, 'test.txt'), entity2id, relation2id)
logging.info('#test: %d' % len(test_triples))
#All true triples
all_true_triples = train_triples + valid_triples + test_triples
print('')
kge_model = KGEModel(
model_name=args.model,
nentity=nentity,
nrelation=nrelation,
hidden_dim=args.hidden_dim,
gamma=args.gamma,
double_entity_embedding=args.double_entity_embedding,
double_relation_embedding=args.double_relation_embedding
)
logging.info('Model Parameter Configuration:')
for name, param in kge_model.named_parameters():
logging.info('Parameter %s: %s, require_grad = %s' % (name, str(param.size()), str(param.requires_grad)))
if args.cuda:
kge_model = kge_model.cuda()
if args.do_train:
# Set training dataloader iterator
train_dataloader_head = DataLoader(
TrainDataset(train_triples, nentity, nrelation, args.negative_sample_size, 'head-batch'),
batch_size=args.batch_size,
shuffle=True,
num_workers=max(1, args.cpu_num//2),
collate_fn=TrainDataset.collate_fn
)
train_dataloader_tail = DataLoader(
TrainDataset(train_triples, nentity, nrelation, args.negative_sample_size, 'tail-batch'),
batch_size=args.batch_size,
shuffle=True,
num_workers=max(1, args.cpu_num//2),
collate_fn=TrainDataset.collate_fn
)
train_iterator = BidirectionalOneShotIterator(train_dataloader_head, train_dataloader_tail)
# Set training configuration
current_learning_rate = args.learning_rate
if args.use_adadelta_optim :
optimizer = torch.optim.Adadelta(
filter(lambda p: p.requires_grad, kge_model.parameters()),
lr=current_learning_rate, weight_decay=1e-6
)
else:
optimizer = torch.optim.Adam(
filter(lambda p: p.requires_grad, kge_model.parameters()),
lr=current_learning_rate
)
if args.warm_up_steps:
warm_up_steps = args.warm_up_steps
else:
warm_up_steps = args.max_steps // 2
if args.init_checkpoint:
# Restore model from checkpoint directory
logging.info('Loading checkpoint %s...' % args.init_checkpoint)
checkpoint = torch.load(os.path.join(args.init_checkpoint, 'checkpoint'))
init_step = checkpoint['step']
kge_model.load_state_dict(checkpoint['model_state_dict'])
if args.do_train:
current_learning_rate = checkpoint['current_learning_rate']
warm_up_steps = checkpoint['warm_up_steps']
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
else:
logging.info('Ramdomly Initializing %s Model...' % args.model)
init_step = 0
step = init_step
logging.info('Start Training...')
logging.info('init_step = %d' % init_step)
logging.info('batch_size = %d' % args.batch_size)
logging.info('negative_sample_size = %d' % args.negative_sample_size)
logging.info('hidden_dim = %d' % args.hidden_dim)
logging.info('gamma = %f' % args.gamma)
logging.info('negative_adversarial_sampling = %s' % str(args.negative_adversarial_sampling))
if args.negative_adversarial_sampling:
logging.info('adversarial_temperature = %f' % args.adversarial_temperature)
# Set valid dataloader as it would be evaluated during training
if args.do_train:
if current_learning_rate == 0:
current_learning_rate = args.learning_rate
logging.info('learning_rate = %f' % current_learning_rate)
training_logs = []
#Training Loop
for step in range(init_step, args.max_steps):
print('step : ', step, '/', args.max_steps)
log = kge_model.train_step(kge_model, optimizer, train_iterator, args)
training_logs.append(log)
if step >= warm_up_steps:
current_learning_rate = current_learning_rate / 10
logging.info('Change learning_rate to %f at step %d' % (current_learning_rate, step))
if args.use_adadelta_optim:
optimizer = torch.optim.Adadelta(
filter(lambda p: p.requires_grad, kge_model.parameters()),
lr=current_learning_rate, weight_decay=1e-6
)
else:
optimizer = torch.optim.Adam(
filter(lambda p: p.requires_grad, kge_model.parameters()),
lr=current_learning_rate
)
warm_up_steps = warm_up_steps * 3
if step % args.save_checkpoint_steps == 0:
save_variable_list = {
'step': step,
'current_learning_rate': current_learning_rate,
'warm_up_steps': warm_up_steps
}
save_model(kge_model, optimizer, save_variable_list, args)
if step % args.log_steps == 0:
metrics = {}
for metric in training_logs[0].keys():
metrics[metric] = sum([log[metric] for log in training_logs])/len(training_logs)
log_metrics('Training average', step, metrics)
training_logs = []
if args.do_valid and step > 90 and step % args.valid_steps == 0:
logging.info('Evaluating on Valid Dataset...')
metrics = kge_model.test_step(kge_model, valid_triples, all_true_triples, args)
log_metrics('Valid', step, metrics)
if args.do_test and step > 20 and step % args.valid_steps == 0:
logging.info('Evaluating on Test Dataset...')
metrics = kge_model.test_step(kge_model, test_triples, all_true_triples, args)
log_metrics('Test', step, metrics)
save_variable_list = {
'step': step,
'current_learning_rate': current_learning_rate,
'warm_up_steps': warm_up_steps
}
save_model(kge_model, optimizer, save_variable_list, args)
if args.do_valid:
logging.info('Evaluating on Valid Dataset...')
metrics = kge_model.test_step(kge_model, valid_triples, all_true_triples, args)
log_metrics('Valid', step, metrics)
if args.do_test:
logging.info('Evaluating on Test Dataset...')
metrics = kge_model.test_step(kge_model, test_triples, all_true_triples, args)
log_metrics('Test', step, metrics)
if args.evaluate_train:
logging.info('Evaluating on Training Dataset...')
metrics = kge_model.test_step(kge_model, train_triples, all_true_triples, args)
log_metrics('Test', step, metrics)
if __name__ == '__main__':
main(parse_args())
|
py | b41373135cff8bfb6664f8315a06c122a6bfd57b | """Write a function in python to count the number of lowercase
alphabets present in a text file “happy.txt"""
def lowercase():
with open("happy.txt") as F:
count_lower = 0
count_upper = 0
value = F.read()
for i in value:
if i.islower():
count_lower += 1
elif i.isupper():
count_upper += 1
print("The total number of lower case letters are", count_lower)
print("The total number of upper case letters are", count_upper)
print("The total number of letters are", count_lower + count_upper)
if __name__ == "__main__":
lowercase()
|
py | b41373fa9640d1c60c7b867c8ce75dc49386499d | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
# http://www.apache.org/licenses/LICENSE-2.0
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
InvalidService defines a invalid model handler for testing purpose.
"""
def handle(data, context):
# This model is created to test reporting of an error in a batch of requests
if data:
context.set_http_response_status(code=507, idx=0)
return ["Invalid response"]
|
py | b4137417a59bedceba8610317fb58da85d1d9cad | from hackathon.game.game import Game
from hackathon.game.map import Map
from hackathon.game.noisemap import NoiseMap2
from hackathon.game.robot import Robot
from hackathon.game.team import Team
|
py | b41375d3c52b8baed69023c78be108d1aee17550 | from discord.ext import commands
class DownBad(commands.Cog):
def __init__(self, client):
self.client = client
@commands.Cog.listener()
async def on_raw_reaction_add(self, payload):
if payload.emoji.id == "932028197162328114":
print("hit")
def setup(client):
client.add_cog(DownBad(client))
|
py | b41376b4acd0b99643fd382a2aa81a494f0d47cb | def hex_to_color (hex):
hex = str (hex)
if hex.startswith ('0x'):
hex = hex[2:]
if len (hex) != 6:
raise RuntimeError (hex + ' is not a hex color')
red = int ('0x' + hex[0:2], 0)
green = int ('0x' + hex[2:4], 0)
blue = int ('0x' + hex[4:6], 0)
return Color (*map (clampInt, [red, green, blue]))
def clampInt (value):
value = int (value)
if value > 255:
return 255
elif value < 0:
return 0
else:
return value
def clampFloat (value):
value = float (value)
if value > 1.0:
return 1.0
elif value < 0.0:
return 0.0
else:
return value
class Color:
def __init__ (self, red, green, blue):
self.red = float (red)
self.green = float (green)
self.blue = float (blue)
def interpolate (self, c, percent):
percent = float (percent)
if percent > 1.0 or percent < 0.0:
raise RuntimeError ('Cannot interpolate color: perecent out of range')
return ((c * percent) + (self * (1.0 - percent)))
def __add__ (self, c):
r = self.red + c.red
g = self.green + c.green
b = self.blue + c.blue
return Color (r, g, b)
def __mul__ (self, scalar):
r = self.red * scalar
g = self.green * scalar
b = self.blue * scalar
return Color (r, g, b)
def __str__ (self):
rgb = 'rgb('
rgb += str(int(self.red))+ ','
rgb += str(int(self.green))+ ','
rgb += str(int(self.blue))+ ')'
return rgb
red = Color (255, 0, 0)
green = Color (0, 255, 0)
blue = Color (0, 0, 255)
black = Color (0, 0, 0)
white = Color (255, 255, 255)
|
py | b41376e891fc1996a6082d7fe5079c275351235d | import numpy as np
from datetime import datetime
import math
from tqdm import tqdm
import logging
def displacement_err(data):
n = 50
uni = np.full(n, len(data) / n)
binned, _ = np.histogram(data, n)
displacement = np.sum(np.abs(binned - uni)) / 2
return displacement / len(data)
class SampleConfig():
def __init__(self, date_limit=datetime(2019, 1, 1), max_pp=7000, n_bins=200):
self.date_limit = date_limit
self.max_pp = max_pp
self.n_bins = n_bins
class SampleFunctionGenerator():
def __init__(self, subset, sample_config):
self.subset = subset
self.date_limit = sample_config.date_limit
self.max_pp = sample_config.max_pp
self.n_bins = sample_config.n_bins
def greedy(self, prop=.05, fill_factor=.5):
n_scores = self.subset.scores_high.count()
bins_score_cnt, sample_func = np.zeros(
self.n_bins), np.zeros(self.n_bins)
bin_width = self.max_pp / self.n_bins
bin_cap = math.ceil(n_scores * prop / self.n_bins)
bins_hist = []
for i in tqdm(range(self.n_bins - 1, -1, -1)):
diff = bin_cap - bins_score_cnt[i]
pp_floor, pp_ceil = i * bin_width, (i + 1) * bin_width
if diff > 0:
pipeline = self.__pp_histogram_pipeline(pp_floor, pp_ceil)
u_bins_score_cnt = np.zeros(self.n_bins)
for bin_cnt in self.subset.scores_high.aggregate(pipeline):
u_bins_score_cnt[int(bin_cnt['_id'])] = bin_cnt['count']
curr_range_scores = u_bins_score_cnt[i]
should_resize = curr_range_scores > diff
u_prop = (diff / curr_range_scores) * \
fill_factor if should_resize else 1
bins_score_cnt += u_bins_score_cnt * u_prop
sample_func[i] = u_prop
bins_hist.append(bins_score_cnt.copy())
return sample_func, bins_score_cnt / bin_cap, bins_hist
def pdf(self, dist, prop=.05):
logging.debug(f"Sampling for {100 * prop}% of scores:")
scores = list(
self.subset.scores_high.find(
{
'date': {
'$gt': self.date_limit
}
},
{
'mlpp.est_user_pp': 1,
'_id': 0
}
)
)
pp_data = [s['mlpp']['est_user_pp'] for s in scores]
logging.debug(f"Aggregated scores from {self.subset.scores_high.name}")
best_params = dist.fit(pp_data)
arg = best_params[:-2]
loc = best_params[-2]
scale = best_params[-1]
def pdf(i): return dist.pdf(i, loc=loc, scale=scale, *arg)
logging.debug(f"Fitting to {dist.name}")
t = self.__dist_threshold(pdf, prop)
func = []
for i in range(1, self.max_pp + 1):
p = pdf(i)
if p > t:
func.append(t/p)
else:
func.append(1)
logging.debug(f"Generated sampling function")
return func
def __field_histogram_pipeline(_, field, bin_width):
return [
{
'$set': {
'range_i': {
'$floor': {
'$divide': [field, bin_width]
}
}
}
},
{
'$group': {
'_id': '$range_i',
'count': {
'$sum': 1
}
}
}, {
'$sort': {
'_id': 1
}
}
]
def __pp_histogram_pipeline(self, pp_floor, pp_ceil):
user_ids_in_range = [u['_id'] for u in self.subset.user_stats.find(
{
'mlpp.est_current_pp': {
'$gte': pp_floor,
'$lt': pp_ceil
}
},
{
'_id': 1
}
)]
pipeline = [
{
'$match': {
'user_id': {
'$in': user_ids_in_range
},
'date': {
'$gt': self.date_limit
}
}
},
*self.__field_histogram_pipeline('$mlpp.est_user_pp', pp_ceil - pp_floor)
]
return pipeline
def __dist_threshold(self, pdf, f_prop):
pdf_y = list(map(pdf, np.arange(1, self.max_pp + 1)))
pdf_y = np.asarray(pdf_y)
start, end = 0, 1
while(True):
mid = (start + end) / 2
capped_y = np.copy(pdf_y)
capped_y[pdf_y > mid] = mid
prop = np.sum(capped_y)
if (abs(f_prop - prop) / f_prop < .05):
return mid
if (prop > f_prop):
end = mid
else:
start = mid
|
py | b413772e88680c8d69c78ae939f186a00d6dcd4a | import sys, getopt
sys.path.append('.')
import RTIMU
import os.path
import time
import math
import socket
import json
SETTINGS_FILE = "RTIMULib"
print("Using settings file " + SETTINGS_FILE + ".ini")
if not os.path.exists(SETTINGS_FILE + ".ini"):
print("Settings file does not exist, will be created")
s = RTIMU.Settings(SETTINGS_FILE)
imu = RTIMU.RTIMU(s)
print("IMU Name: " + imu.IMUName())
if (not imu.IMUInit()):
print("IMU Init Failed")
sys.exit(1)
else:
print("IMU Init Succeeded")
# this is a good time to set any fusion parameters
imu.setSlerpPower(0.02)
imu.setGyroEnable(True)
imu.setAccelEnable(True)
imu.setCompassEnable(True)
poll_interval = imu.IMUGetPollInterval()
print("Recommended Poll Interval: %dmS\n" % poll_interval)
i=0
# while True:
while i<5:
if imu.IMURead():
# x, y, z = imu.getFusionData()
# print("%f %f %f" % (x,y,z))
data = imu.getIMUData()
fusionPose = data["fusionPose"]
print("r: %f p: %f y: %f" % (math.degrees(fusionPose[0]),
math.degrees(fusionPose[1]), math.degrees(fusionPose[2])))
time.sleep(poll_interval/2.0*1.0/1000.0)
i=i+1
time.sleep(poll_interval/2.0*1.0/1000.0)
|
py | b413780197570eabc225ba1184880d9b0b37e2bb | import time
from sklearn import neural_network
from sklearn.metrics import classification_report, accuracy_score, make_scorer
from sklearn.model_selection import ShuffleSplit, cross_val_score
from src.data import Data
def train_test(x, y):
nn = neural_network.MLPClassifier(verbose=1)
# shuffle = ShuffleSplit(train_size=.7, test_size=.2, n_splits=5)
scores = cross_val_score(nn, x, y, cv=3)
print("Cross validation scores:{}".format(scores))
print("Mean cross validation score:{:2f}".format(scores.mean()))
print("Finish training")
# def test():
#
# x_test_crop,y_test_crop=data.get_data(data.TEST_CROP_DIR)
# print("Start training")
# rfc = neural_network.MLPClassifier(verbose=1)
# target_names = ['class 1', 'class 2', 'class 3', 'class 4', 'class 5',
# 'class 6', 'class 7', 'class 8', 'class 9', 'class 10',
# 'class 11', 'class 12', 'class 13', 'class 14', 'class 15']
# rfc.fit(x,y)
# y_pred_crop=rfc.predict(x_test_crop)
# print(classification_report(y_test_crop, y_pred_crop, target_names=target_names))
def classification_report_with_accuracy_score(y_true, y_pred):
print(classification_report(y_true, y_pred)) # print classification report
return accuracy_score(y_true, y_pred) # return accuracy score
if __name__ == "__main__":
start = time.time()
# test()
data = Data()
x, y = data.get_data(data.CROP_DIR)
rfc = neural_network.MLPClassifier(verbose=1)
nested_score = cross_val_score(rfc, X=x, y=y, cv=3, scoring=make_scorer(classification_report_with_accuracy_score))
print(nested_score)
# train_test(x, y)
end = time.time()
print('Running time: %s Seconds' % (end - start))
|
py | b413780e7241e327b9ceb0f8a2689c55d8516630 | import unittest
import dolphindb as ddb
from numpy import repeat
from numpy.testing import assert_array_equal
from pandas.testing import assert_frame_equal
from setup import HOST, PORT, WORK_DIR
class DBInfo:
dfsDBName = 'dfs://testLoadTable'
diskDBName = WORK_DIR + '/testLoadTable'
table1 = 'tb1'
table2 = 'tb2'
def create_dfs_dimension_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db=database(dbPath,RANGE,1..10)
n=100000
tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10,n) as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
db.createTable(tdata,`{tb1}).append!(tdata)
db.createTable(tdata,`{tb2}).append!(tdata)
'''.format(db=DBInfo.dfsDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
def create_dfs_range_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db=database(dbPath,RANGE,0..10*10000+1)
n=100000
tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
db.createPartitionedTable(tdata,`{tb1},`id).append!(tdata)
db.createPartitionedTable(tdata,`{tb2},`id).append!(tdata)
'''.format(db=DBInfo.dfsDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
def create_dfs_hash_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db=database(dbPath,HASH,[INT,10])
n=100000
tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10,n) as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
db.createPartitionedTable(tdata,`{tb1},`id).append!(tdata)
db.createPartitionedTable(tdata,`{tb2},`id).append!(tdata)
'''.format(db=DBInfo.dfsDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
def create_dfs_value_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db=database(dbPath,VALUE,2010.01.01..2010.01.30)
n=100000
tdata=table(sort(take(2010.01.01..2010.01.30, n)) as date, take(1..10,n) as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
db.createPartitionedTable(tdata,`{tb1},`date).append!(tdata)
db.createPartitionedTable(tdata,`{tb2},`date).append!(tdata)
'''.format(db=DBInfo.dfsDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
def create_dfs_list_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db=database(dbPath,LIST,[`AMD`QWE`CES,`DOP`ASZ,`FSD`BBVC,`AWQ`DS])
n=100000
tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10,n) as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
db.createPartitionedTable(tdata,`{tb1},`sym).append!(tdata)
db.createPartitionedTable(tdata,`{tb2},`sym).append!(tdata)
'''.format(db=DBInfo.dfsDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
def create_dfs_compo_range_range_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db1=database('',RANGE,2010.01M+0..12)
db2=database('',RANGE,1 3 5 7 9 11)
db=database(dbPath,COMPO,[db1,db2])
n=100000
tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10,n) as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
db.createPartitionedTable(tdata,`{tb1},`date`id).append!(tdata)
db.createPartitionedTable(tdata,`{tb2},`date`id).append!(tdata)
'''.format(db=DBInfo.dfsDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
def create_dfs_compo_range_hash_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db1=database('',RANGE,2010.01M+0..12)
db2=database('',HASH,[INT,10])
db=database(dbPath,COMPO,[db1,db2])
n=100000
tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10,n) as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
db.createPartitionedTable(tdata,`{tb1},`date`id).append!(tdata)
db.createPartitionedTable(tdata,`{tb2},`date`id).append!(tdata)
'''.format(db=DBInfo.dfsDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
def create_dfs_compo_range_value_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db1=database('',RANGE,2010.01M+0..12)
db2=database('',VALUE,1..10)
db=database(dbPath,COMPO,[db1,db2])
n=100000
tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10,n) as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
db.createPartitionedTable(tdata,`{tb1},`date`id).append!(tdata)
db.createPartitionedTable(tdata,`{tb2},`date`id).append!(tdata)
'''.format(db=DBInfo.dfsDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
def create_dfs_compo_range_list_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db1=database('',RANGE,2010.01M+0..12)
db2=database('',LIST,[`AMD`QWE`CES,`DOP`ASZ,`FSD`BBVC,`AWQ`DS])
db=database(dbPath,COMPO,[db1,db2])
n=100000
tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10,n) as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
db.createPartitionedTable(tdata,`{tb1},`date`sym).append!(tdata)
db.createPartitionedTable(tdata,`{tb2},`date`sym).append!(tdata)
'''.format(db=DBInfo.dfsDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
def create_dfs_compo_range_hash_list_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db1=database('',RANGE,2010.01M+0..12)
db2=database('',HASH,[INT,10])
db3=database('',LIST,[`AMD`QWE`CES,`DOP`ASZ,`FSD`BBVC,`AWQ`DS])
db=database(dbPath,COMPO,[db1,db2,db3])
n=100000
tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10,n) as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
db.createPartitionedTable(tdata,`{tb1},`date`id`sym).append!(tdata)
db.createPartitionedTable(tdata,`{tb2},`date`id`sym).append!(tdata)
'''.format(db=DBInfo.dfsDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
def create_dfs_compo_range_value_list_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db1=database('',RANGE,2010.01M+0..12)
db2=database('',VALUE,1..10)
db3=database('',LIST,[`AMD`QWE`CES,`DOP`ASZ,`FSD`BBVC,`AWQ`DS])
db=database(dbPath,COMPO,[db1,db2,db3])
n=100000
tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10,n) as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
db.createPartitionedTable(tdata,`{tb1},`date`id`sym).append!(tdata)
db.createPartitionedTable(tdata,`{tb2},`date`id`sym).append!(tdata)
'''.format(db=DBInfo.dfsDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
def create_disk_unpartitioned_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(exists(dbPath))
dropDatabase(dbPath)
db=database(dbPath)
n=100000
tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
saveTable(db,tdata,`{tb1})
saveTable(db,tdata,`{tb2})
'''.format(db=DBInfo.diskDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
def create_disk_range_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db=database(dbPath,RANGE,0..10*10000+1)
n=100000
tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, 1..n as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
db.createPartitionedTable(tdata,`{tb1},`id).append!(tdata)
db.createPartitionedTable(tdata,`{tb2},`id).append!(tdata)
'''.format(db=DBInfo.diskDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
def create_disk_hash_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db=database(dbPath,HASH,[INT,10])
n=100000
tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10,n) as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
db.createPartitionedTable(tdata,`{tb1},`id).append!(tdata)
db.createPartitionedTable(tdata,`{tb2},`id).append!(tdata)
'''.format(db=DBInfo.diskDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
def create_disk_value_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db=database(dbPath,VALUE,2010.01.01..2010.01.30)
n=100000
tdata=table(sort(take(2010.01.01..2010.01.30, n)) as date, take(1..10,n) as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
db.createPartitionedTable(tdata,`{tb1},`date).append!(tdata)
db.createPartitionedTable(tdata,`{tb2},`date).append!(tdata)
'''.format(db=DBInfo.diskDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
def create_disk_list_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db=database(dbPath,LIST,[`AMD`QWE`CES,`DOP`ASZ,`FSD`BBVC,`AWQ`DS])
n=100000
tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10,n) as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
db.createPartitionedTable(tdata,`{tb1},`sym).append!(tdata)
db.createPartitionedTable(tdata,`{tb2},`sym).append!(tdata)
'''.format(db=DBInfo.diskDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
def create_disk_compo_range_range_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db1=database('',RANGE,2010.01M+0..12)
db2=database('',RANGE,1 3 5 7 9 11)
db=database(dbPath,COMPO,[db1,db2])
n=100000
tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10,n) as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
db.createPartitionedTable(tdata,`{tb1},`date`id).append!(tdata)
db.createPartitionedTable(tdata,`{tb2},`date`id).append!(tdata)
'''.format(db=DBInfo.diskDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
def create_disk_compo_range_hash_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db1=database('',RANGE,2010.01M+0..12)
db2=database('',HASH,[INT,10])
db=database(dbPath,COMPO,[db1,db2])
n=100000
tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10,n) as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
db.createPartitionedTable(tdata,`{tb1},`date`id).append!(tdata)
db.createPartitionedTable(tdata,`{tb2},`date`id).append!(tdata)
'''.format(db=DBInfo.diskDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
def create_disk_compo_range_value_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db1=database('',RANGE,2010.01M+0..12)
db2=database('',VALUE,1..10)
db=database(dbPath,COMPO,[db1,db2])
n=100000
tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10,n) as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
db.createPartitionedTable(tdata,`{tb1},`date`id).append!(tdata)
db.createPartitionedTable(tdata,`{tb2},`date`id).append!(tdata)
'''.format(db=DBInfo.diskDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
def create_disk_compo_range_list_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db1=database('',RANGE,2010.01M+0..12)
db2=database('',LIST,[`AMD`QWE`CES,`DOP`ASZ,`FSD`BBVC,`AWQ`DS])
db=database(dbPath,COMPO,[db1,db2])
n=100000
tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10,n) as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
db.createPartitionedTable(tdata,`{tb1},`date`sym).append!(tdata)
db.createPartitionedTable(tdata,`{tb2},`date`sym).append!(tdata)
'''.format(db=DBInfo.diskDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
def create_disk_compo_range_hash_list_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db1=database('',RANGE,2010.01M+0..12)
db2=database('',HASH,[INT,10])
db3=database('',LIST,[`AMD`QWE`CES,`DOP`ASZ,`FSD`BBVC,`AWQ`DS])
db=database(dbPath,COMPO,[db1,db2,db3])
n=100000
tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10,n) as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
db.createPartitionedTable(tdata,`{tb1},`date`id`sym).append!(tdata)
db.createPartitionedTable(tdata,`{tb2},`date`id`sym).append!(tdata)
'''.format(db=DBInfo.diskDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
def create_disk_compo_range_value_list_db():
s = ddb.session()
s.connect(HOST, PORT, "admin", "123456")
ddb_script = '''
login('admin','123456')
dbPath='{db}'
if(existsDatabase(dbPath))
dropDatabase(dbPath)
db1=database('',RANGE,2010.01M+0..12)
db2=database('',VALUE,1..10)
db3=database('',LIST,[`AMD`QWE`CES,`DOP`ASZ,`FSD`BBVC,`AWQ`DS])
db=database(dbPath,COMPO,[db1,db2,db3])
n=100000
tdata=table(sort(take(2010.01.01..2010.12.31, n)) as date, take(1..10,n) as id,take(`AMD`QWE`CES`DOP`ASZ`FSD`BBVC`AWQ`DS, n) as sym,rand(100,n) as val)
db.createPartitionedTable(tdata,`{tb1},`date`id`sym).append!(tdata)
db.createPartitionedTable(tdata,`{tb2},`date`id`sym).append!(tdata)
'''.format(db=DBInfo.diskDBName, tb1=DBInfo.table1, tb2=DBInfo.table2)
s.run(ddb_script)
s.close()
class LoadTableTest(unittest.TestCase):
@classmethod
def setUp(cls):
cls.s = ddb.session()
cls.s.connect(HOST, PORT, "admin", "123456")
dbPaths = [DBInfo.dfsDBName, DBInfo.diskDBName]
for dbPath in dbPaths:
script = """
if(existsDatabase('{dbPath}'))
dropDatabase('{dbPath}')
if(exists('{dbPath}'))
rmdir('{dbPath}', true)
""".format(dbPath=dbPath)
cls.s.run(script)
@classmethod
def tearDown(cls):
cls.s = ddb.session()
cls.s.connect(HOST, PORT, "admin", "123456")
dbPaths = [DBInfo.dfsDBName, DBInfo.diskDBName]
for dbPath in dbPaths:
script = """
if(existsDatabase('{dbPath}'))
dropDatabase('{dbPath}')
if(exists('{dbPath}'))
rmdir('{dbPath}', true)
""".format(dbPath=dbPath)
cls.s.run(script)
def test_loadTable_dfs_dimension(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_dimension_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_dfs_range(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_range_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_dfs_range_param_partitions(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_range_db()
self.assertRaises(RuntimeError, self.s.loadTable, tbName1, dbPath, [5000, 15000])
def test_loadTable_dfs_range_param_memoryMode(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_range_db()
with self.assertRaises(RuntimeError):
self.s.loadTable(tableName=tbName1, dbPath=dbPath, memoryMode=True)
def test_loadTable_dfs_hash(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_hash_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_dfs_hash_param_partitions(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_hash_db()
with self.assertRaises(RuntimeError):
self.s.loadTable(tableName=tbName1, dbPath=dbPath, partitions=[1, 2])
def test_loadTable_dfs_hash_param_memoryMode(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_hash_db()
with self.assertRaises(RuntimeError):
self.s.loadTable(tableName=tbName1, dbPath=dbPath, memoryMode=True)
def test_loadTable_dfs_value(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_value_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_dfs_value_param_partitions(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_value_db()
with self.assertRaises(RuntimeError):
self.s.loadTable(tableName=tbName1, dbPath=dbPath, partitions=["2010.01.01", "2010.01.30"])
def test_loadTable_dfs_value_param_memoryMode(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_value_db()
with self.assertRaises(RuntimeError):
self.s.loadTable(tableName=tbName1, dbPath=dbPath, memoryMode=True)
def test_loadTable_dfs_list(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_list_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_dfs_list_param_partitions(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_list_db()
with self.assertRaises(RuntimeError):
self.s.loadTable(tableName=tbName1, dbPath=dbPath, partitions=["`DOP", "`BBVC"])
def test_loadTable_dfs_list_param_memoryMode(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_list_db()
with self.assertRaises(RuntimeError):
self.s.loadTable(tableName=tbName1, dbPath=dbPath, memoryMode=True)
def test_loadTable_dfs_compo_range_range(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_compo_range_range_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_dfs_compo_range_range_param_partitions(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_compo_range_range_db()
with self.assertRaises(RuntimeError):
self.s.loadTable(tableName=tbName1, dbPath=dbPath, partitions=["2010.01.01", "2010.01.30"])
def test_loadTable_dfs_compo_range_range_param_memoryMode(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_compo_range_range_db()
with self.assertRaises(RuntimeError):
self.s.loadTable(tableName=tbName1, dbPath=dbPath, memoryMode=True)
def test_loadTable_dfs_compo_range_hash(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_compo_range_hash_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_dfs_compo_range_hash_param_partitions(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_compo_range_hash_db()
with self.assertRaises(RuntimeError):
self.s.loadTable(tableName=tbName1, dbPath=dbPath, partitions=["2010.01.01", "2010.01.30"])
def test_loadTable_dfs_compo_range_hash_param_memoryMode(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_compo_range_hash_db()
with self.assertRaises(RuntimeError):
self.s.loadTable(tableName=tbName1, dbPath=dbPath, memoryMode=True)
def test_loadTable_dfs_compo_range_value(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_compo_range_value_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_dfs_compo_range_value_param_partitions(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_compo_range_value_db()
with self.assertRaises(RuntimeError):
self.s.loadTable(tableName=tbName1, dbPath=dbPath, partitions=["2010.01.01", "2010.01.30"])
def test_loadTable_dfs_compo_range_value_param_memoryMode(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_compo_range_value_db()
with self.assertRaises(RuntimeError):
self.s.loadTable(tableName=tbName1, dbPath=dbPath, memoryMode=True)
def test_loadTable_dfs_compo_range_list(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_compo_range_list_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_dfs_compo_range_list_param_partitions(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_compo_range_list_db()
with self.assertRaises(RuntimeError):
self.s.loadTable(tableName=tbName1, dbPath=dbPath, partitions=["2010.01.01", "2010.01.30"])
def test_loadTable_dfs_compo_range_list_param_memoryMode(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_compo_range_list_db()
with self.assertRaises(RuntimeError):
self.s.loadTable(tableName=tbName1, dbPath=dbPath, memoryMode=True)
def test_loadTable_dfs_compo_range_hash_list(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_compo_range_hash_list_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_dfs_compo_range_hash_list_param_partitions(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_compo_range_hash_list_db()
with self.assertRaises(RuntimeError):
self.s.loadTable(tableName=tbName1, dbPath=dbPath, partitions=["2010.01.01", "2010.01.30"])
def test_loadTable_dfs_compo_range_hash_list_param_memoryMode(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_compo_range_hash_list_db()
with self.assertRaises(RuntimeError):
self.s.loadTable(tableName=tbName1, dbPath=dbPath, memoryMode=True)
def test_loadTable_dfs_compo_range_value_list(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_compo_range_value_list_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_dfs_compo_range_value_list_param_partitions(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_compo_range_value_list_db()
with self.assertRaises(RuntimeError):
self.s.loadTable(tableName=tbName1, dbPath=dbPath, partitions=["2010.01.01", "2010.01.30"])
def test_loadTable_dfs_compo_range_value_list_param_memoryMode(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_compo_range_value_list_db()
with self.assertRaises(RuntimeError):
self.s.loadTable(tableName=tbName1, dbPath=dbPath, memoryMode=True)
def test_loadTable_disk_unpartitioned(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_unpartitioned_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_disk_range(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_range_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_disk_range_param_partitions(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_range_db()
rs = self.s.run("select * from loadTable('{db}','{tb}') where id<20001".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath, partitions=[5000, 15000])
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_disk_range_param_memoryMode(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_range_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
before = list(self.s.run("exec memSize from getSessionMemoryStat()"))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath, memoryMode=True)
after = list(self.s.run("exec memSize from getSessionMemoryStat()"))
assert_frame_equal(tmp.toDF(), rs)
assert_array_equal(after >= before, repeat(True, 4))
def test_loadTable_disk_hash(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_hash_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_disk_hash_param_partitions(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_hash_db()
rs = self.s.run("select * from loadTable('{db}','{tb}') where id in [1,3,5]".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath, partitions=[1, 3, 5])
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_disk_hash_param_memoryMode(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_hash_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
before = list(self.s.run("exec memSize from getSessionMemoryStat()"))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath, memoryMode=True)
after = list(self.s.run("exec memSize from getSessionMemoryStat()"))
assert_frame_equal(tmp.toDF(), rs)
assert_array_equal(after >= before, repeat(True, 4))
def test_loadTable_disk_value(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_value_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_disk_value_param_partitions(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_value_db()
rs = self.s.run("select * from loadTable('{db}','{tb}') where date in [2010.01.01, 2010.01.30]".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath, partitions=["2010.01.01", "2010.01.30"])
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_disk_value_param_memoryMode(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_value_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
before = list(self.s.run("exec memSize from getSessionMemoryStat()"))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath, memoryMode=True)
after = list(self.s.run("exec memSize from getSessionMemoryStat()"))
assert_frame_equal(tmp.toDF(), rs)
assert_array_equal(after >= before, repeat(True, 4))
def test_loadTable_disk_list(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_list_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_disk_list_param_partitions(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_list_db()
rs = self.s.run("select * from loadTable('{db}','{tb}') where sym in `DOP`ASZ`FSD`BBVC`AWQ`DS".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath, partitions=["DOP", "FSD", "AWQ"])
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_disk_list_param_memoryMode(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_list_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
before = list(self.s.run("exec memSize from getSessionMemoryStat()"))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath, memoryMode=True)
after = list(self.s.run("exec memSize from getSessionMemoryStat()"))
assert_frame_equal(tmp.toDF(), rs)
assert_array_equal(after >= before, repeat(True, 4))
def test_loadTable_disk_compo_range_range(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_compo_range_range_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_disk_compo_range_range_param_partitions(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_compo_range_range_db()
rs = self.s.run("select * from loadTable('{db}','{tb}') where "
"date between 2010.01.01:2010.01.31 "
"or date between 2010.04.01:2010.04.30".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath, partitions=["2010.01.01", "2010.04.25"])
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_disk_compo_range_range_param_memoryMode(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_compo_range_range_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
before = list(self.s.run("exec memSize from getSessionMemoryStat()"))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath, memoryMode=True)
after = list(self.s.run("exec memSize from getSessionMemoryStat()"))
assert_frame_equal(tmp.toDF(), rs)
assert_array_equal(after >= before, repeat(True, 4))
def test_loadTable_disk_compo_range_hash(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_compo_range_hash_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_disk_compo_range_hash_param_partitions(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_compo_range_hash_db()
rs = self.s.run("select * from loadTable('{db}','{tb}') where "
"date between 2010.01.01:2010.01.31 "
"or date between 2010.04.01:2010.04.30".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath, partitions=["2010.01.01", "2010.04.25"])
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_disk_compo_range_hash_param_memoryMode(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_compo_range_hash_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
before = list(self.s.run("exec memSize from getSessionMemoryStat()"))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath, memoryMode=True)
after = list(self.s.run("exec memSize from getSessionMemoryStat()"))
assert_frame_equal(tmp.toDF(), rs)
assert_array_equal(after >= before, repeat(True, 4))
def test_loadTable_disk_compo_range_value(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_compo_range_value_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_disk_compo_range_value_param_partitions(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_compo_range_value_db()
rs = self.s.run("select * from loadTable('{db}','{tb}') where "
"date between 2010.01.01:2010.01.31 "
"or date between 2010.04.01:2010.04.30".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath, partitions=["2010.01.01", "2010.04.25"])
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_disk_compo_range_value_param_memoryMode(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_compo_range_value_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
before = list(self.s.run("exec memSize from getSessionMemoryStat()"))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath, memoryMode=True)
after = list(self.s.run("exec memSize from getSessionMemoryStat()"))
assert_frame_equal(tmp.toDF(), rs)
assert_array_equal(after >= before, repeat(True, 4))
def test_loadTable_disk_compo_range_list(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_compo_range_list_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_disk_compo_range_list_param_partitions(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_compo_range_list_db()
rs = self.s.run("select * from loadTable('{db}','{tb}') where "
"date between 2010.01.01:2010.01.31 "
"or date between 2010.04.01:2010.04.30".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath, partitions=["2010.01.01", "2010.04.25"])
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_disk_compo_range_list_param_memoryMode(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_compo_range_list_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
before = list(self.s.run("exec memSize from getSessionMemoryStat()"))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath, memoryMode=True)
after = list(self.s.run("exec memSize from getSessionMemoryStat()"))
assert_frame_equal(tmp.toDF(), rs)
assert_array_equal(after >= before, repeat(True, 4))
def test_loadTable_disk_compo_range_hash_list(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_compo_range_hash_list_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_disk_compo_range_hash_list_param_partitions(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_compo_range_hash_list_db()
rs = self.s.run("select * from loadTable('{db}','{tb}') where "
"date between 2010.01.01:2010.01.31 "
"or date between 2010.04.01:2010.04.30".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath, partitions=["2010.01.01", "2010.04.25"])
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_disk_compo_range_hash_list_param_memoryMode(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_compo_range_hash_list_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
before = list(self.s.run("exec memSize from getSessionMemoryStat()"))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath, memoryMode=True)
after = list(self.s.run("exec memSize from getSessionMemoryStat()"))
assert_frame_equal(tmp.toDF(), rs)
assert_array_equal(after >= before, repeat(True, 4))
def test_loadTable_disk_compo_range_value_list(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_compo_range_value_list_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath)
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_disk_compo_range_value_list_param_partitions(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_compo_range_value_list_db()
rs = self.s.run("select * from loadTable('{db}','{tb}') where "
"date between 2010.01.01:2010.01.31 "
"or date between 2010.04.01:2010.04.30".format(db=dbPath, tb=tbName1))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath, partitions=["2010.01.01", "2010.04.25"])
assert_frame_equal(tmp.toDF(), rs)
def test_loadTable_disk_compo_range_value_list_param_memoryMode(self):
dbPath = DBInfo.diskDBName
tbName1 = DBInfo.table1
create_disk_compo_range_value_list_db()
rs = self.s.run("select * from loadTable('{db}','{tb}')".format(db=dbPath, tb=tbName1))
before = list(self.s.run("exec memSize from getSessionMemoryStat()"))
tmp = self.s.loadTable(tableName=tbName1, dbPath=dbPath, memoryMode=True)
after = list(self.s.run("exec memSize from getSessionMemoryStat()"))
assert_frame_equal(tmp.toDF(), rs)
assert_array_equal(after >= before, repeat(True, 4))
def test_loadTable_disk_value_partition_string_scalar(self):
myDBName=WORK_DIR+"/db1"
script='''
login("admin","123456")
if(exists("{dbName}"))
dropDatabase("{dbName}")
db=database("{dbName}", VALUE, ["AAA", "BBB", "CCC"])
t=table(take(["AAA", "BBB", "CCC"], 1000) as sym, rand(100.0, 1000) as val)
db.createPartitionedTable(t, "pt", "sym").append!(t)
'''.format(dbName=myDBName)
self.s.run(script)
res=self.s.loadTable(tableName="pt", dbPath=myDBName, partitions="AAA", memoryMode=True).toDF()
expected=self.s.run("select * from loadTable('{dbName}', 'pt') where sym='AAA'".format(dbName=myDBName))
assert_frame_equal(res, expected)
def test_loadTable_disk_value_partition_string_vector(self):
myDBName=WORK_DIR+"/db1"
script='''
login("admin","123456")
if(exists("{dbName}"))
dropDatabase("{dbName}")
db=database("{dbName}", VALUE, ["AAA", "BBB", "CCC"])
t=table(take(["AAA", "BBB", "CCC"], 1000) as sym, rand(100.0, 1000) as val)
db.createPartitionedTable(t, "pt", "sym").append!(t)
'''.format(dbName=myDBName)
self.s.run(script)
res=self.s.loadTable(tableName="pt", dbPath=myDBName, partitions=["AAA", "BBB"], memoryMode=True).toDF()
expected=self.s.run("select * from loadTable('{dbName}', 'pt') where sym='AAA' or sym='BBB'".format(dbName=myDBName))
assert_frame_equal(res, expected)
def test_loadTable_paramete(self):
dbPath = DBInfo.dfsDBName
tbName1 = DBInfo.table1
create_dfs_compo_range_value_db()
with self.assertRaises(TypeError):
self.s.loadTable(tableName_ERROR=tbName1, dbPath=dbPath,partitions=None, memoryMode=False)
with self.assertRaises(TypeError):
self.s.loadTable(tableName=tbName1, dbPath_ERROR=dbPath,partitions=None, memoryMode=False)
with self.assertRaises(TypeError):
self.s.loadTable(tableName=tbName1, dbPath=dbPath,partitions_ERROR=None, memoryMode=False)
with self.assertRaises(TypeError):
self.s.loadTable(tableName=tbName1, dbPath=dbPath,partitions=None, memoryMode_ERROR=False)
self.s.loadTable(tableName=tbName1, dbPath=dbPath,partitions=None, memoryMode=False)
if __name__ == '__main__':
unittest.main()
|
py | b41379c17b6dc9aacc81e75897845760eec15346 | """
Mask R-CNN
Train on a Greppy Metaverse generated dataset and infer depth.
Copyright (c) 2018 Achille, Inc.
Licensed under the MIT License (see LICENSE for details)
Written by Matthew Moore
------------------------------------------------------------
Usage: import the module (see Jupyter notebooks for examples), or run from
the command line as such:
# Train a new model starting from pre-trained COCO weights
python3 greppy.py train --dataset=/path/to/greppymetaverse/dataset --weights=coco --traindepth --variantsnotcomponents
# Resume training a model that you had trained earlier
python3 greppy.py train --dataset=/path/to/greppymetaverse/dataset --weights=last --traindepth --variantsnotcomponents
# Train a new model starting from ImageNet weights
python3 greppy.py train --dataset=/path/to/greppymetaverse/dataset --weights=imagenet --traindepth --variantsnotcomponents
# Run inference on an image
python3 greppy.py infer --weights=/path/to/weights/file.h5 --image=<URL or path to file> --depth=<URL or path to file> --variantsnotcomponents
"""
import os
import fnmatch
import sys
import json
import datetime
import numpy as np
import skimage.draw
import OpenEXR, Imath
import random, shutil, glob
# Automatically splits raw datasets between validation and training. Out of 100.
DEFAULT_TRAINING_SPLIT = 80
# Root directory of the project
ROOT_DIR = os.path.abspath("../../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn.config import Config
from mrcnn import model as modellib, utils
# Path to trained weights file
COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
# Directory to save logs and model checkpoints, if not provided
# through the command line argument --logs
DEFAULT_LOGS_DIR = os.path.join(ROOT_DIR, "logs")
############################################################
# Configurations
############################################################
class GreppyConfig(Config):
"""Configuration for training on the Nespresso dataset.
Derives from the base Config class and overrides some values.
"""
# Give the configuration a recognizable name
NAME = "greppy"
# Subclass/override to turn this off
TRAINED_ON_VARIANTS_NOT_COMPONENTS = False
# Override if not using depth
MEAN_PIXEL = np.array([123.7, 116.8, 103.9, 0.0])
IMAGE_CHANNEL_COUNT = 4 # override to 3 for non-depth
# We use a GPU with 12GB memory, which can fit two images.
# Adjust down if you use a smaller GPU.
IMAGES_PER_GPU = 2
# number of gpu's to use
GPU_COUNT = 1
# Number of classes (including background)
NUM_CLASSES = 1 # Override from the _dataset.json file
# Number of training steps per epoch
STEPS_PER_EPOCH = 100
# Skip detections with < 90% confidence
DETECTION_MIN_CONFIDENCE = 0.9
# Equivalent of classnames, loaded from the _dataset.json file
COMPONENT_URIS = [] # Override from the _dataset.json file
VARIANT_URIS = [] # Override from the _dataset.json file
IS_STEREO_CAMERA = False # Override from the _dataset.json file
# def init_from_dataset_dir(self, dataset_dir):
# dataset_dict = json.load(open(os.path.join(dataset_dir, '_dataset.json')))
# self.__class__.COMPONENT_URIS = dataset_dict['component_uris']
# self.__class__.NUM_CLASSES = 1 + len(dataset_dict['component_uris'])
# self.__class__.IS_STEREO_CAMERA = dataset_dict['camera']['is_stereo_camera']
############################################################
# Dataset
############################################################
# Get a list of all possible scenes
def _scene_prefixes(dataset_dir):
# print(dataset_dir)
dataset_prefixes = []
for root, dirs, files in os.walk(dataset_dir):
# one mask json file per scene so we can get the prefixes from them
for filename in fnmatch.filter(files, '*.json'):
dataset_prefixes.append(filename[0:0-len('.json')])
dataset_prefixes.sort()
# print(dataset_prefixes)
return dataset_prefixes
class GreppyDataset(utils.Dataset):
# Subclass to turn this off
USE_DEPTH_CHANNEL = True
# Subclass to turn this off
SHOULD_TRAIN_VARIANTS_NOT_COMPONENTS = False
# Equivalent of classnames, loaded from the _dataset.json file
COMPONENT_URIS = []
VARIANT_URIS = []
COMPONENT_URIS_INITED = False
IS_STEREO_CAMERA = False
def init_from_dataset_dir(self, dataset_dir):
if not self.__class__.COMPONENT_URIS_INITED:
self.__class__.COMPONENT_URIS_INITED = True
dataset_dict = json.load(open(os.path.join(dataset_dir, '_dataset.json')))
self.__class__.COMPONENT_URIS = dataset_dict['component_uris']
self.__class__.VARIANT_URIS = dataset_dict['variant_uris']
self.__class__.IS_STEREO_CAMERA = dataset_dict['camera']['is_stereo_camera']
def load_image(self, image_id):
# If image is not from this dataset, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != GreppyConfig.NAME:
return super(self.__class__, self).load_image(image_id)
# Nothng special unless we're using the depth channel
if not self.__class__.USE_DEPTH_CHANNEL:
return super(self.__class__, self).load_image(image_id)
# Otherwise load as rgb & load depth, return a [H,W,4] Numpy array
# Load image
image = skimage.io.imread(image_info['path'])
# If has an alpha channel, remove it. We're going to add depth as 4th
if image.shape[-1] == 4:
image = image[..., :3]
filename_postfix = ''
if self.__class__.IS_STEREO_CAMERA:
filename_postfix = '-left'
depth_data = self.load_exr(
image_info['prefix_dir'],
image_info['prefix'],
"depth"+filename_postfix,
image_info['height'],
image_info['width']
)
# FIXME TODO what's the best way to handle inf depth values?
max_depth = np.nanmax(depth_data[depth_data != np.inf])
if np.isinf(depth_data).any():
print("[WARN]",image_info['prefix'],"depth image has some 'inf' values, setting to 0, which depth cameras seem to do", flush=True)
depth_data[depth_data == np.inf] = 0
channels = [image[..., 0].astype(np.float32), image[..., 1].astype(np.float32), image[..., 2].astype(np.float32), depth_data.astype(np.float32)]
depth_image = np.stack(channels, axis=-1)
return depth_image
# file_kind = componentMasks-left or variantMasks-left
def load_exr(self, prefix_dir, prefix, file_kind, expected_height, expected_width):
exr_file = OpenEXR.InputFile(os.path.join(prefix_dir,prefix+"-"+file_kind+".exr"))
cm_dw = exr_file.header()['dataWindow']
exr_data = np.fromstring(
exr_file.channel('R', Imath.PixelType(Imath.PixelType.HALF)),
dtype=np.float16
)
exr_data.shape = (cm_dw.max.y - cm_dw.min.y + 1, cm_dw.max.x - cm_dw.min.x + 1) # rows, cols
if exr_data.shape[0] != expected_height:
print("[ERROR] ", prefix, file_kind, " != expected image height", exr_data.shape[0], expected_height)
if exr_data.shape[1] != expected_width:
print("[ERROR] ", prefix, file_kind, " width != image width", exr_data.shape[1], expected_width)
return exr_data
def load_subset(self, dataset_dir, subset):
"""Load a subset of the generated dataset.
dataset_dir: Root directory of the dataset.
subset: Subset to load: 'training' or 'validation'
"""
self.init_from_dataset_dir(dataset_dir)
if self.__class__.SHOULD_TRAIN_VARIANTS_NOT_COMPONENTS:
# Add classes with their ids for all the variants
for i, variant_uri in enumerate(self.__class__.VARIANT_URIS):
self.add_class(GreppyConfig.NAME, i, variant_uri)
else:
# Add classes with their ids for all the components
for i, component_uri in enumerate(self.__class__.COMPONENT_URIS):
self.add_class(GreppyConfig.NAME, i, component_uri)
# Train or validation dataset?
assert subset in ["training", "validation"]
dataset_dir = os.path.join(dataset_dir, subset)
# TODO FIXME only doing the left images
filename_postfix = ''
if self.__class__.IS_STEREO_CAMERA:
filename_postfix = '-left'
print("Loading dataset ", dataset_dir)
dataset_prefixes = _scene_prefixes(dataset_dir)
assert len(dataset_prefixes) > 0
for prefix in dataset_prefixes:
image_filename = prefix+'-rgb'+filename_postfix+'.jpg'
image_path = os.path.join(dataset_dir, image_filename)
image = skimage.io.imread(image_path)
height, width = image.shape[:2]
self.add_image(
GreppyConfig.NAME,
image_id=image_filename, # use file name as a unique image id
path=image_path,
width=width,
height=height,
prefix=prefix,
prefix_dir=dataset_dir
)
def load_mask(self, image_id):
"""Generate instance masks for an image.
Returns:
masks: A bool array of shape [height, width, instance count] with
one mask per instance.
class_ids: a 1D array of class IDs of the instance masks.
"""
# If image is not from this dataset, delegate to parent class.
image_info = self.image_info[image_id]
if image_info["source"] != GreppyConfig.NAME:
return super(self.__class__, self).load_mask(image_id)
# the json file has the information about all the possible pixels
masks_json = json.load(open(os.path.join(image_info['prefix_dir'], image_info['prefix']+'.json')))
# TODO FIXME only doing the left images
filename_postfix = ''
if self.__class__.IS_STEREO_CAMERA:
filename_postfix = '-left'
variant_data = self.load_exr(
image_info['prefix_dir'],
image_info['prefix'],
"variant-mask"+filename_postfix,
image_info['height'],
image_info['width']
)
component_data = self.load_exr(
image_info['prefix_dir'],
image_info['prefix'],
"component-mask"+filename_postfix,
image_info['height'],
image_info['width']
)
# If training variants: fetch each mask separately and add its class
# If training components: For each variant in the scene, for each component of it, we might have an instance
# Or, you might not have an instance because it could be "not in view" for this variant.
# Loop and only add the variant instances that are in the scene.
class_ids = []
masks_bool = []
if self.__class__.SHOULD_TRAIN_VARIANTS_NOT_COMPONENTS:
for variant_pixel_val_str, instance in masks_json["variants"]["masks_and_poses_by_pixel_value"].items():
variant_pixel_val = float(int(variant_pixel_val_str))
variant_data_copy = np.copy(variant_data)
variant_data_copy[variant_data_copy != variant_pixel_val] = 0
variant_data_copy[variant_data_copy == variant_pixel_val] = 1
if np.any(variant_data_copy):
masks_bool.append(variant_data_copy.astype(np.bool))
variant_class_id = self.__class__.VARIANT_URIS.index(instance['variant_uri'])
class_ids.append(variant_class_id)
else:
for variant_pixel_val_str, instance in masks_json["variants"]["masks_and_poses_by_pixel_value"].items():
variant_pixel_val = float(int(variant_pixel_val_str))
variant_data_copy = np.copy(variant_data)
variant_data_copy[variant_data_copy != variant_pixel_val] = 0
variant_data_copy[variant_data_copy == variant_pixel_val] = 1
for component_pixel_val_str, component_mask in masks_json["component_masks"].items():
# Filter to only the pixel values where the variants line up
if component_mask['variant_uri'] == instance['variant_uri']:
# Run intersection on this variant with this component
component_pixel_val = float(int(component_pixel_val_str))
component_data_copy = np.copy(component_data)
component_data_copy_sum_test = (component_data_copy == 106).sum()
component_data_copy[component_data_copy != component_pixel_val] = 0
component_data_copy[component_data_copy == component_pixel_val] = 1
intersected_data = np.bitwise_and(variant_data_copy.astype(np.bool), component_data_copy.astype(np.bool))
# intersection actually exists on this one
if np.any(intersected_data):
masks_bool.append(intersected_data)
component_class_id = self.__class__.COMPONENT_URIS.index(component_mask['component_uri'])
class_ids.append(component_class_id)
# Convert generate bitmap masks of all components in the image
# shape" [height, width, instance_count]
mask = np.zeros([image_info["height"], image_info["width"], 0], dtype=np.bool)
if len(masks_bool) > 0:
mask = np.stack(masks_bool, axis=-1)
return mask, np.array(class_ids)
def image_reference(self, image_id):
"""Return the path of the image."""
info = self.image_info[image_id]
if info["source"] == GreppyConfig.NAME:
return info["path"]
else:
super(self.__class__, self).image_reference(image_id)
############################################################
# Training
############################################################
def split_dataset_into_dirs(dataset, dataset_split):
training_dir = os.path.join(dataset, "training")
validation_dir = os.path.join(dataset, "validation")
if not os.path.isdir(training_dir):
os.mkdir(training_dir)
if not os.path.isdir(validation_dir):
os.mkdir(validation_dir)
scene_prefixes = _scene_prefixes(dataset)
random.shuffle(scene_prefixes)
split_index = int(len(scene_prefixes) * dataset_split/100.00)
training_prefixes = scene_prefixes[0:split_index]
validation_prefixes = scene_prefixes[split_index:]
print("Moving", len(training_prefixes), "scenes into training, and", len(validation_prefixes), "into validation.")
for prefix in training_prefixes:
for scene_file in glob.glob(os.path.join(dataset, prefix+'-*')):
shutil.move(scene_file, training_dir)
for prefix in validation_prefixes:
for scene_file in glob.glob(os.path.join(dataset, prefix+'-*')):
shutil.move(scene_file, validation_dir)
def train(model, dataset, variants_not_components, dataset_split):
"""Train the model."""
class VariantsOrNotGreppyDataset(GreppyDataset):
SHOULD_TRAIN_VARIANTS_NOT_COMPONENTS = variants_not_components
# look for training and validation folders as signals for
# the dataset already being split. if not existant, split the dataset
# into the folders
if not os.path.isdir(os.path.join(dataset, "training")) or not os.path.isdir(os.path.join(dataset, "validation")):
split_dataset_into_dirs(dataset, dataset_split)
# Training dataset.
dataset_train = VariantsOrNotGreppyDataset()
dataset_train.load_subset(dataset, "training")
dataset_train.prepare()
# Validation dataset
dataset_val = VariantsOrNotGreppyDataset()
dataset_val.load_subset(dataset, "validation")
dataset_val.prepare()
# *** This training schedule is an example. Update to your needs ***
# Since we're using a very small dataset, and starting from
# COCO trained weights, we don't need to train too long. Also,
# no need to train all layers, just the heads should do it.
print("Training network")
model.train(dataset_train, dataset_val,
learning_rate=config.LEARNING_RATE,
epochs=30,
layers='all') # can't just train because we can't transfer learn
############################################################
# Inference
############################################################
def draw_objects_and_depth(image, r):
"""Apply color splash effect.
image: RGB image [height, width, 3]
mask: instance segmentation mask [height, width, instance count]
Returns result image.
"""
# Make a grayscale copy of the image. The grayscale copy still
# has 3 RGB channels, though.
gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255
# Copy color pixels from the original color image where mask is set
if mask.shape[-1] > 0:
# We're treating all instances as one, so collapse the mask into one layer
mask = (np.sum(mask, -1, keepdims=True) >= 1)
splash = np.where(mask, image, gray).astype(np.uint8)
else:
splash = gray.astype(np.uint8)
return splash
def detect_and_infer_depth(model, dataset_dir, image_path=None, depth_path=None):
assert image_path and depth_path
# Run model detection and generate the color splash effect
print("Running on {} with dataset {}".format(image_path, dataset_dir))
dataset = GreppyDataset()
dataset.load_subset(dataset_dir, "validation")
dataset.prepare()
# Read image
image = skimage.io.imread(image_path)
# Detect objects
r = model.detect([image], verbose=1)[0]
# Color splash
# Save image with masks
# visualize.display_instances(
# image, r['rois'], r['masks'], r['class_ids'],
# dataset.class_names, r['scores'],
# show_bbox=True, show_mask=True,
# title="Predictions")
# annotated = draw_objects_and_depth(image, r['masks'])
# Save output
# file_name = "depth_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
# skimage.io.imsave(file_name, annotated)
# TODO FIXME update to also read depth
# print("Saved to ", file_name)
############################################################
# Main
############################################################
if __name__ == '__main__':
import argparse
# Parse command line arguments
parser = argparse.ArgumentParser(
description='Train Mask R-CNN to detect nespressos.')
parser.add_argument("command",
metavar="<command>",
help="'train' or 'infer'")
parser.add_argument('--dataset', required=False,
metavar="/path/to/generated/dataset/",
help='Directory of the generated dataset')
parser.add_argument('--traindepth', dest='train_depth', action='store_true',
help="Enable depth training (default: does not train depth)")
parser.add_argument('--no-traindepth', dest='train_depth', action='store_false',
help="Definitely don't do depth training (default: does not train depth)")
parser.set_defaults(train_depth=False)
parser.add_argument('--variantsnotcomponents', dest='variants_not_components', action='store_true',
help="Enable variants training rather than components (default: use components not variants)")
parser.add_argument('--componentsnotvariants', dest='variants_not_components', action='store_false',
help="Enable components training rather than variants (default: use components not variants)")
parser.set_defaults(variants_not_components=False)
parser.add_argument('--splittraining', required=False, type=int,
metavar="80", default=DEFAULT_TRAINING_SPLIT,
help='split off the training set from the validation at this percentage')
parser.add_argument('--weights', required=True,
metavar="/path/to/weights.h5",
help="Path to weights .h5 file or 'coco'")
parser.add_argument('--logs', required=False,
default=DEFAULT_LOGS_DIR,
metavar="/path/to/logs/",
help='Logs and checkpoints directory (default=logs/)')
parser.add_argument('--image', required=False,
metavar="path or URL to image",
help='Image to predict on')
parser.add_argument('--depth', required=False,
metavar="path or URL to depth image exr",
help='Accompanying depth file to predict on')
args = parser.parse_args()
# Validate arguments
if args.command == "train":
assert args.dataset, "Argument --dataset is required for training"
elif args.command == "infer":
assert args.image and args.depth,\
"Provide --image and --depth to run inference"
print("Weights: ", args.weights)
print("Dataset: ", args.dataset)
print("Logs: ", args.logs)
print("Variants rather than Components: ", args.variants_not_components)
if args.command == "train":
print("Train Depth:", args.train_depth)
# Configurations
dataset_dict = json.load(open(os.path.join(args.dataset, '_dataset.json')))
use_depth = True
if args.command == "train":
use_depth = args.train_depth
class TrainingConfig(GreppyConfig):
IMAGE_CHANNEL_COUNT = 4 if use_depth else 3 # depth or RGB
TRAINED_ON_VARIANTS_NOT_COMPONENTS = args.variants_not_components
MEAN_PIXEL = np.array([123.7, 116.8, 103.9, 0.0]) if use_depth else np.array([123.7, 116.8, 103.9])
VARIANT_URIS = dataset_dict['variant_uris']
COMPONENT_URIS = dataset_dict['component_uris']
NUM_CLASSES = (1 + len(dataset_dict['variant_uris'])) if args.variants_not_components else (1 + len(dataset_dict['component_uris']))
IS_STEREO_CAMERA = dataset_dict['camera']['is_stereo_camera']
config = TrainingConfig()
else:
use_depth = args.depth
class InferenceConfig(GreppyConfig):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
IMAGE_CHANNEL_COUNT = 4 if use_depth else 3 # depth or RGB
TRAINED_ON_VARIANTS_NOT_COMPONENTS = args.variants_not_components
MEAN_PIXEL = np.array([123.7, 116.8, 103.9, 0.0]) if use_depth and true else np.array([123.7, 116.8, 103.9])
VARIANT_URIS = dataset_dict['variant_uris']
COMPONENT_URIS = dataset_dict['component_uris']
NUM_CLASSES = (1 + len(dataset_dict['variant_uris'])) if args.variants_not_components else (1 + len(dataset_dict['component_uris']))
IS_STEREO_CAMERA = dataset_dict['camera']['is_stereo_camera']
config = InferenceConfig()
assert config.NUM_CLASSES, (1 + len(dataset_dict['variant_uris'])) if args.variants_not_components else (1 + len(dataset_dict['component_uris']))
config.display()
# Create model
if args.command == "train":
model = modellib.MaskRCNN(mode="training", config=config,
model_dir=args.logs)
else:
model = modellib.MaskRCNN(mode="inference", config=config,
model_dir=args.logs)
# Select weights file to load
if args.weights.lower() == "coco":
weights_path = COCO_WEIGHTS_PATH
# Download weights file
if not os.path.exists(weights_path):
utils.download_trained_weights(weights_path)
elif args.weights.lower() == "last":
# Find last trained weights
weights_path = model.find_last()
elif args.weights.lower() == "imagenet":
# Start from ImageNet trained weights
weights_path = model.get_imagenet_weights()
else:
weights_path = args.weights
# Load weights
print("Loading weights ", weights_path)
if args.weights.lower() == "coco":
# Exclude the last layers because they require a matching
# number of classes
if use_depth:
# Exclude the first layer too because we've changed the shape of the input:
# Since you're changing the shape of the input, the shape of the first Conv layer will change as well
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask", "conv1"])
else:
model.load_weights(weights_path, by_name=True, exclude=[
"mrcnn_class_logits", "mrcnn_bbox_fc",
"mrcnn_bbox", "mrcnn_mask"])
else:
model.load_weights(weights_path, by_name=True)
# Train or evaluate
if args.command == "train":
train(model, args.dataset, args.variants_not_components, args.splittraining)
elif args.command == "infer":
detect_and_infer_depth(model, args.dataset, image_path=args.image,
depth_path=args.depth)
else:
print("'{}' is not recognized. "
"Use 'train' or 'infer'".format(args.command))
|
py | b4137ab0c13ea34b68ded7d226e58c822462e629 | """
Top-level version information for sarif-tools.
"""
import importlib.metadata
def _read_package_version():
try:
return importlib.metadata.version("sarif-tools")
except importlib.metadata.PackageNotFoundError:
return "local"
__version__ = _read_package_version()
|
py | b4137c951c2132ad13b0c659d25deaeaf4975407 | #-
# Copyright (c) 2012 Ben Thorner
# Copyright (c) 2013 Colin Rothwell
# All rights reserved.
#
# This software was developed by Ben Thorner as part of his summer internship
# and Colin Rothwell as part of his final year undergraduate project.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import attr
from beritest_tools import BaseBERITestCase
class test_raw_fpu_mul_ps(BaseBERITestCase):
@attr('floatpaired')
def test_mul_paired(self):
'''Test we can multiply paired singles'''
self.assertRegisterInRange(self.MIPS.s2, 0x4140000043674C07, 0x4140000043674C08, "Failed paired single multiply.")
@attr('floatpaired')
@attr('float_multiply_rounding')
def test_mul_paired_rounding(self):
'''Test we can multiply paired singles, and check rounding'''
self.assertRegisterEqual(self.MIPS.s2, 0x4140000043674C08, "Failed paired single multiply (checking rounding).")
@attr('floatpaired')
def test_mul_paired_qnan(self):
'''Test paired single multiplication when one of the pair is QNaN'''
self.assertRegisterEqual(self.MIPS.s3, 0x7F81000040800000, "mul.ps failed to echo QNaN")
|
py | b4137c9b0c43d886b488768b2a03fbd099d4cec5 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from contextlib import contextmanager
class CustomTestCase(unittest.TestCase):
@contextmanager
def assertNotRaises(self, exc_type):
try:
yield None
except exc_type:
raise self.failureException('{} raised'.format(exc_type.__name__))
|
py | b4137cc2f91991e39baaa54e2aa2913a1c8b7082 | from typing import List
from data_set_info_data_class.data_class.data_set_info import DataSetInfo
from data_set_remover.classes.data_class.data_for_criteria_remove import DataForCriteriaRemove
class DataSetRemover(object):
def remove_manually(self, data_sets_info: List[DataSetInfo], data_set_name: str):
pass
def remove_by_criteria(self, criteria_remover_data: DataForCriteriaRemove):
pass
def get_criteria_validator_names(self):
pass |
py | b4137d157e26f5d38acd6974e936e8e539f0e23b | # coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.3192
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class UpdateCalendarRequest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'weekend_mask': 'WeekendMask',
'source_provider': 'str',
'properties': 'list[ModelProperty]'
}
attribute_map = {
'weekend_mask': 'weekendMask',
'source_provider': 'sourceProvider',
'properties': 'properties'
}
required_map = {
'weekend_mask': 'required',
'source_provider': 'required',
'properties': 'required'
}
def __init__(self, weekend_mask=None, source_provider=None, properties=None): # noqa: E501
"""
UpdateCalendarRequest - a model defined in OpenAPI
:param weekend_mask: (required)
:type weekend_mask: lusid.WeekendMask
:param source_provider: (required)
:type source_provider: str
:param properties: (required)
:type properties: list[lusid.ModelProperty]
""" # noqa: E501
self._weekend_mask = None
self._source_provider = None
self._properties = None
self.discriminator = None
self.weekend_mask = weekend_mask
self.source_provider = source_provider
self.properties = properties
@property
def weekend_mask(self):
"""Gets the weekend_mask of this UpdateCalendarRequest. # noqa: E501
:return: The weekend_mask of this UpdateCalendarRequest. # noqa: E501
:rtype: WeekendMask
"""
return self._weekend_mask
@weekend_mask.setter
def weekend_mask(self, weekend_mask):
"""Sets the weekend_mask of this UpdateCalendarRequest.
:param weekend_mask: The weekend_mask of this UpdateCalendarRequest. # noqa: E501
:type: WeekendMask
"""
if weekend_mask is None:
raise ValueError("Invalid value for `weekend_mask`, must not be `None`") # noqa: E501
self._weekend_mask = weekend_mask
@property
def source_provider(self):
"""Gets the source_provider of this UpdateCalendarRequest. # noqa: E501
:return: The source_provider of this UpdateCalendarRequest. # noqa: E501
:rtype: str
"""
return self._source_provider
@source_provider.setter
def source_provider(self, source_provider):
"""Sets the source_provider of this UpdateCalendarRequest.
:param source_provider: The source_provider of this UpdateCalendarRequest. # noqa: E501
:type: str
"""
if source_provider is None:
raise ValueError("Invalid value for `source_provider`, must not be `None`") # noqa: E501
if source_provider is not None and len(source_provider) > 256:
raise ValueError("Invalid value for `source_provider`, length must be less than or equal to `256`") # noqa: E501
if source_provider is not None and len(source_provider) < 1:
raise ValueError("Invalid value for `source_provider`, length must be greater than or equal to `1`") # noqa: E501
if (source_provider is not None and not re.search(r'^[a-zA-Z0-9\-_]+$', source_provider)): # noqa: E501
raise ValueError(r"Invalid value for `source_provider`, must be a follow pattern or equal to `/^[a-zA-Z0-9\-_]+$/`") # noqa: E501
self._source_provider = source_provider
@property
def properties(self):
"""Gets the properties of this UpdateCalendarRequest. # noqa: E501
:return: The properties of this UpdateCalendarRequest. # noqa: E501
:rtype: list[ModelProperty]
"""
return self._properties
@properties.setter
def properties(self, properties):
"""Sets the properties of this UpdateCalendarRequest.
:param properties: The properties of this UpdateCalendarRequest. # noqa: E501
:type: list[ModelProperty]
"""
if properties is None:
raise ValueError("Invalid value for `properties`, must not be `None`") # noqa: E501
self._properties = properties
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateCalendarRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b4137e4a74608e73c6ebf297a9cdbd76da2932d0 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Functions for working with Array dispatch."""
import functools
from types import FunctionType
from typing import Callable
from .array import Array
def wrap(func: Callable, wrap_return: bool = True, decorator: bool = False) -> Callable:
"""Wrap an array backend function to work with Arrays.
Args:
func: a function to wrap.
wrap_return: If ``True`` convert results that are registered array
backend types into Array objects (Default: True).
decorator: If ``True`` the wrapped decorator function ``func`` will
also wrap the decorated functions (Default: False).
Returns:
Callable: The wrapped function.
.. note::
Setting ``decorator=True`` requires that the signature of the
function being wrapped is ``func(f: Callable, ...) -> Callable``.
Using it is equivalent to nested wrapping
.. code-block:: python
f_wrapped = wrap(func, decorator=True)(f)
is equivalent to
.. code-block:: python
f_wrapped = wrap(wrap(func)(f))
"""
# pylint: disable=protected-access
@functools.wraps(func)
def wrapped_func(*args, **kwargs):
# Check if we are wrapping a decorator by checking that
# the first argument is of FunctionType
if decorator and args:
is_decorator = isinstance(args[0], FunctionType)
else:
is_decorator = False
args = tuple(_wrap_function(x) if isinstance(x, FunctionType) else x for x in args)
kwargs = dict(
(key, _wrap_function(val)) if isinstance(val, FunctionType) else (key, val)
for key, val in kwargs.items()
)
# Return the wrapped function
if not is_decorator:
# Evaluate unwrapped function
result = _wrap_function(func)(*args, **kwargs)
# Optional wrap array return types back to Arrays
if wrap_return:
result = Array._wrap(result)
return result
# Wrap the decorated function returned by the decorator
decorated = _wrap_function(func)(*args, **kwargs)
@functools.wraps(args[0])
def wrapped_decorated(*f_args, **f_kwargs):
f_args = tuple(_wrap_function(x) if isinstance(x, FunctionType) else x for x in f_args)
f_kwargs = dict(
(key, _wrap_function(val)) if isinstance(val, FunctionType) else (key, val)
for key, val in f_kwargs.items()
)
result = _wrap_function(decorated)(*f_args, **f_kwargs)
if wrap_return:
result = Array._wrap(result)
return result
return wrapped_decorated
return wrapped_func
def _wrap_function(func: callable) -> callable:
"""Wrap a function to handle Array-like inputs and returns"""
@functools.wraps(func)
def wrapped_function(*args, **kwargs):
# Unwrap inputs
args = tuple(
x.__qiskit_array__().data if hasattr(x, "__qiskit_array__") else x for x in args
)
kwargs = dict(
(key, val.__qiskit_array__().data) if hasattr(val, "__qiskit_array__") else (key, val)
for key, val in kwargs.items()
)
# Evaluate function with unwrapped inputs
result = func(*args, **kwargs)
# Unwrap result
if isinstance(result, tuple):
result = tuple(
x.__qiskit_array__().data if hasattr(x, "__qiskit_array__") else x for x in result
)
elif hasattr(result, "__qiskit_array__"):
result = result.__qiskit_array__().data
return result
return wrapped_function
|
py | b4137f22881fdebea2cd23883580fc481a133db8 | try:
import cloudstorage
except ImportError:
cloudstorage = None
from django.core.files.base import ContentFile
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import unittest
from djangoappengine.storage import AppEngineStorage, CloudStorageInfo, BLOBSTORE_SERVICE, CLOUD_STORAGE_SERVICE
from google.appengine.api import files
from google.appengine.ext.blobstore import BlobInfo, BlobKey
class AppEngineStorageBaseTest(object):
def test_file_accessed_time(self):
self.assertRaises(NotImplementedError, self.storage.accessed_time, self.file_key)
def test_file_created_time(self):
ctime = self.storage.created_time(self.file_key)
self.assertEqual(ctime, self.test_file_info.creation)
def test_file_modified_time(self):
self.assertRaises(NotImplementedError, self.storage.modified_time, self.file_key)
def test_file_exists(self):
self.assertTrue(self.storage.exists(self.file_key))
def test_file_does_not_exist(self):
self.assertFalse(self.storage.exists('abcdef'))
def test_listdir(self):
self.assertRaises(NotImplementedError, self.storage.listdir, '')
def test_file_save_without_name(self):
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
info = self.file_info(storage_f_name)
self.assertEqual(info.filename, f.name)
def test_file_save_with_path(self):
path = 'path/to/test.file'
storage_f_name = self.storage.save(path,
ContentFile('file saved with path'))
self.assertTrue(self.storage.exists(storage_f_name))
self.assertEqual(self.storage.open(storage_f_name).read(),
'file saved with path')
info = self.file_info(storage_f_name)
self.assertEqual(info.filename, path)
def test_file_path(self):
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertRaises(NotImplementedError, self.storage.path, f_name)
class BlobstoreStorageTest(AppEngineStorageBaseTest, TestCase):
def setUp(self):
super(BlobstoreStorageTest, self).setUp()
self.storage = AppEngineStorage(storage_service=BLOBSTORE_SERVICE)
file_name = files.blobstore.create()
with files.open(file_name, 'a') as f:
f.write('abcdef')
files.finalize(file_name)
self.blob_key = files.blobstore.get_blob_key(file_name)
self.file_key = str(self.blob_key)
self.test_file_info = self.file_info(self.file_key)
def file_info(self, name):
key = BlobKey(name.split('/', 1)[0])
return BlobInfo(key)
def test_file_url(self):
url = self.storage.url(self.file_key)
self.assertEqual(url, '/_ah/img/%s' % self.file_key)
@unittest.skipUnless(cloudstorage, 'cloudstorage not installed')
class GSStorageTest(AppEngineStorageBaseTest, TestCase):
def setUp(self):
super(GSStorageTest, self).setUp()
self.storage = AppEngineStorage(storage_service=CLOUD_STORAGE_SERVICE, cloud_storage_bucket='test_bucket')
file_name = '/test_bucket/file.test'
with cloudstorage.open(file_name, 'w') as f:
f.write('abcdef')
self.file_key = '/gs' + file_name
self.test_file_info = self.file_info(self.file_key)
def file_info(self, name):
return CloudStorageInfo(name)
def test_file_url(self):
url = self.storage.url(self.file_key)
self.assertTrue(url.startswith('/_ah/img/encoded_gs_file:'))
|
py | b413810415a6839bb6fee6010b1888f2b264309f | # coding=utf-8
"""
Classes for data files
These classes provide a somewhat thin API around various file formats that are expected to be used
for data files. Instances of them will be returned by the storage manager on request - you
do not need to instantiate these on your own.
As data files are expected to be modified often, these classes all provide mutable APIs. Instead of
providing complex access mechanisms, however, you may simply use them as a context manager for ease
of use. For example:
>>> x = ultros.core.storage_manager.get_data("test.yml")
>>> with x:
... x[1] = 2
... x["a"]["b"] = "c"
...
>>> x.reload()
>>> x[1]
2 # The file has been saved automatically upon exiting the context manager
>>>
Note that the file *will not be saved* if an exception is raised within the context manager. If this
isn't your intention, then remember to handle any exceptions yourself.
Submodules
==========
.. currentmodule:: ultros.core.storage.data
.. autosummary::
:toctree: data
base
ini
json
toml
yaml
"""
__author__ = "Gareth Coles"
|
py | b4138107f3e6285c6b22eefde883f392b368ecb8 | from src.map.room.Wall import *
from src.Constants import *
from src.characters.Character import *
from src.items.Item import *
from src.map.room.Wall import Wall
CONSTANT = Constants()
class Room:
def __init__(self):
self._north = Wall()
self._east = Wall()
self._south = Wall()
self._west = Wall()
self._player = Character
self._characters = []
self._items = []
def changeWall(self, dr, wl: Wall):
if dr == CONSTANT.NORTH:
self._north = wl
elif dr == CONSTANT.EAST:
self._east = wl
elif dr == CONSTANT.SOUTH:
self._south = wl
elif dr == CONSTANT.WEST:
self._west = wl
def addPlayer(self, pl: Character):
self._player = pl
def removePlayer(self):
self._player = EmptyCharacter()
def getPlayer(self) -> Character:
return self._player
def addCharacters(self, ch: Character):
self._characters.append(ch)
def addItem(self, it: Item):
self._items.append(it)
def movePlayer(self, dr: str):
if dr == CONSTANT.NORTH:
self.movePlayerDir(self._north)
elif dr == CONSTANT.EAST:
self.movePlayerDir(self._east)
elif dr == CONSTANT.SOUTH:
self.movePlayerDir(self._south)
elif dr == CONSTANT.WEST:
self.movePlayerDir(self._west)
def movePlayerDir(self, wl: Wall):
if type(wl) is Door:
wl.getPath().addPlayer(self._player)
self.removePlayer()
elif type(wl) is LockedDoor:
if not wl.isLocked():
wl.getPath().addPlayer(self._player)
self.removePlayer()
else:
print("Door Locked")
class EmptyRoom(Room):
def __init__(self):
super().__init__()
'''
room1 = Room()
room2 = Room()
room1.changeWall(CONSTANT.NORTH, Door(room2))
room2.changeWall(CONSTANT.SOUTH, Door(room1))
testing = Character(Elf(), Monk(), Stats(1, 1, 1, 1, 1, 1))
room1.addPlayer(testing)
room1.getPlayer().printCharacter()
room1.movePlayer(CONSTANT.NORTH)
room2.getPlayer().printCharacter()
room2.movePlayer(CONSTANT.SOUTH)
room1.getPlayer().printCharacter()
''' |
py | b413811641ba3bf84218e544e9290819ff2ef011 |
import sys
import os
import socket
import shutil
import argparse
import donkeycar as dk
from donkeycar.parts.datastore import Tub
from .tub import TubManager
PACKAGE_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
TEMPLATES_PATH = os.path.join(PACKAGE_PATH, 'templates')
def make_dir(path):
real_path = os.path.expanduser(path)
print('making dir ', real_path)
if not os.path.exists(real_path):
os.makedirs(real_path)
return real_path
def load_config(config_path):
'''
load a config from the given path
'''
conf = os.path.expanduser(config_path)
if not os.path.exists(conf):
print("No config file at location: %s. Add --config to specify\
location or run from dir containing config.py." % conf)
return None
try:
cfg = dk.load_config(conf)
except:
print("Exception while loading config from", conf)
return None
return cfg
class BaseCommand():
pass
class CreateCar(BaseCommand):
def parse_args(self, args):
parser = argparse.ArgumentParser(prog='createcar', usage='%(prog)s [options]')
parser.add_argument('--path', default=None, help='path where to create car folder')
parser.add_argument('--template', default=None, help='name of car template to use')
parser.add_argument('--overwrite', action='store_true', help='should replace existing files')
parsed_args = parser.parse_args(args)
return parsed_args
def run(self, args):
args = self.parse_args(args)
self.create_car(path=args.path, template=args.template, overwrite=args.overwrite)
def create_car(self, path, template='donkey2', overwrite=False):
"""
This script sets up the folder struction for donkey to work.
It must run without donkey installed so that people installing with
docker can build the folder structure for docker to mount to.
"""
#these are neeeded incase None is passed as path
path = path or '~/d2'
template = template or 'donkey2'
print("Creating car folder: {}".format(path))
path = make_dir(path)
print("Creating data & model folders.")
folders = ['models', 'data', 'logs']
folder_paths = [os.path.join(path, f) for f in folders]
for fp in folder_paths:
make_dir(fp)
#add car application and config files if they don't exist
app_template_path = os.path.join(TEMPLATES_PATH, template+'.py')
config_template_path = os.path.join(TEMPLATES_PATH, 'config_defaults.py')
car_app_path = os.path.join(path, 'manage.py')
car_config_path = os.path.join(path, 'config.py')
if os.path.exists(car_app_path) and not overwrite:
print('Car app already exists. Delete it and rerun createcar to replace.')
else:
print("Copying car application template: {}".format(template))
shutil.copyfile(app_template_path, car_app_path)
if os.path.exists(car_config_path) and not overwrite:
print('Car config already exists. Delete it and rerun createcar to replace.')
else:
print("Copying car config defaults. Adjust these before starting your car.")
shutil.copyfile(config_template_path, car_config_path)
print("Donkey setup complete.")
class UploadData(BaseCommand):
def parse_args(self, args):
parser = argparse.ArgumentParser(prog='uploaddata', usage='%(prog)s [options]')
parser.add_argument('--url', help='path where to create car folder')
parser.add_argument('--template', help='name of car template to use')
parsed_args = parser.parse_args(args)
return parsed_args
class FindCar(BaseCommand):
def parse_args(self, args):
pass
def run(self, args):
print('Looking up your computer IP address...')
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8",80))
ip = s.getsockname()[0]
print('Your IP address: %s ' %s.getsockname()[0])
s.close()
print("Finding your car's IP address...")
cmd = "sudo nmap -sP " + ip + "/24 | awk '/^Nmap/{ip=$NF}/B8:27:EB/{print ip}'"
print("Your car's ip address is:" )
os.system(cmd)
class CalibrateCar(BaseCommand):
def parse_args(self, args):
parser = argparse.ArgumentParser(prog='calibrate', usage='%(prog)s [options]')
parser.add_argument('--channel', help='The channel youd like to calibrate [0-15]')
parsed_args = parser.parse_args(args)
return parsed_args
def run(self, args):
from donkeycar.parts.actuator import PCA9685
args = self.parse_args(args)
channel = int(args.channel)
c = PCA9685(channel)
for i in range(10):
pmw = int(input('Enter a PWM setting to test(0-1500)'))
c.run(pmw)
class MakeMovie(BaseCommand):
def parse_args(self, args):
parser = argparse.ArgumentParser(prog='makemovie')
parser.add_argument('--tub', help='The tub to make movie from')
parser.add_argument('--out', default='tub_movie.mp4', help='The movie filename to create. default: tub_movie.mp4')
parser.add_argument('--config', default='./config.py', help='location of config file to use. default: ./config.py')
parsed_args = parser.parse_args(args)
return parsed_args, parser
def run(self, args):
'''
Load the images from a tub and create a movie from them.
Movie
'''
import moviepy.editor as mpy
args, parser = self.parse_args(args)
if args.tub is None:
parser.print_help()
return
conf = os.path.expanduser(args.config)
if not os.path.exists(conf):
print("No config file at location: %s. Add --config to specify\
location or run from dir containing config.py." % conf)
return
try:
cfg = dk.load_config(conf)
except:
print("Exception while loading config from", conf)
return
self.tub = Tub(args.tub)
self.num_rec = self.tub.get_num_records()
self.iRec = 0
print('making movie', args.out, 'from', self.num_rec, 'images')
clip = mpy.VideoClip(self.make_frame, duration=(self.num_rec//cfg.DRIVE_LOOP_HZ) - 1)
clip.write_videofile(args.out,fps=cfg.DRIVE_LOOP_HZ)
print('done')
def make_frame(self, t):
'''
Callback to return an image from from our tub records.
This is called from the VideoClip as it references a time.
We don't use t to reference the frame, but instead increment
a frame counter. This assumes sequential access.
'''
self.iRec = self.iRec + 1
if self.iRec >= self.num_rec - 1:
return None
rec = self.tub.get_record(self.iRec)
image = rec['cam/image_array']
return image # returns a 8-bit RGB array
class Sim(BaseCommand):
'''
Start a websocket SocketIO server to talk to a donkey simulator
'''
def parse_args(self, args):
parser = argparse.ArgumentParser(prog='sim')
parser.add_argument('--model', help='the model to use for predictions')
parser.add_argument('--config', default='./config.py', help='location of config file to use. default: ./config.py')
parser.add_argument('--type', default='categorical', help='model type to use when loading. categorical|linear')
parser.add_argument('--top_speed', default='3', help='what is top speed to drive')
parsed_args = parser.parse_args(args)
return parsed_args, parser
def run(self, args):
'''
Start a websocket SocketIO server to talk to a donkey simulator
'''
import socketio
from donkeycar.parts.simulation import SteeringServer
from donkeycar.parts.keras import KerasCategorical, KerasLinear
args, parser = self.parse_args(args)
cfg = load_config(args.config)
if cfg is None:
return
#TODO: this logic should be in a pilot or modle handler part.
if args.type == "categorical":
kl = KerasCategorical()
elif args.type == "linear":
kl = KerasLinear(num_outputs=2)
else:
print("didn't recognice type:", args.type)
return
#can provide an optional image filter part
img_stack = None
#load keras model
kl.load(args.model)
#start socket server framework
sio = socketio.Server()
top_speed = float(args.top_speed)
#start sim server handler
ss = SteeringServer(sio, kpart=kl, top_speed=top_speed, image_part=img_stack)
#register events and pass to server handlers
@sio.on('telemetry')
def telemetry(sid, data):
ss.telemetry(sid, data)
@sio.on('connect')
def connect(sid, environ):
ss.connect(sid, environ)
ss.go(('0.0.0.0', 9090))
class TubCheck(BaseCommand):
def parse_args(self, args):
parser = argparse.ArgumentParser(prog='tubcheck', usage='%(prog)s [options]')
parser.add_argument('tubs', nargs='+', help='paths to tubs')
parsed_args = parser.parse_args(args)
return parsed_args
def check(self, tub_paths, fix=False):
'''
Check for any problems. Looks at tubs and find problems in any records or images that won't open.
If fix is True, then delete images and records that cause problems.
'''
tubs = [Tub(path) for path in tub_paths]
for tub in tubs:
tub.check(fix=fix)
def run(self, args):
args = self.parse_args(args)
self.check(args.tubs)
class ShowHistogram(BaseCommand):
def parse_args(self, args):
parser = argparse.ArgumentParser(prog='tubhist', usage='%(prog)s [options]')
parser.add_argument('tubs', nargs='+', help='paths to tubs')
parser.add_argument('--record', default=None, help='name of record to create histogram')
parsed_args = parser.parse_args(args)
return parsed_args
def show_histogram(self, tub_paths, record_name):
'''
Produce a histogram of record type frequency in the given tub
'''
from matplotlib import pyplot as plt
from donkeycar.parts.datastore import TubGroup
tg = TubGroup(tub_paths=tub_paths)
if record_name is not None:
tg.df[record_name].hist(bins=50)
else:
tg.df.hist(bins=50)
plt.show()
def run(self, args):
args = self.parse_args(args)
args.tubs = ','.join(args.tubs)
self.show_histogram(args.tubs, args.record)
class ShowPredictionPlots(BaseCommand):
def plot_predictions(cfg, tub_paths, model_path):
'''
Plot model predictions for angle and throttle against data from tubs.
'''
import matplotlib.pyplot as plt
import pandas as pd
from donkeycar.parts.datastore import TubGroup
from donkeycar.parts.keras import KerasCategorical
tg = TubGroup(tub_paths)
model_path = os.path.expanduser(model_path)
model = KerasCategorical()
model.load(model_path)
gen = tg.get_batch_gen(batch_size=len(tg.df),shuffle=False)
arr = next(gen)
"""
THIS WILL SHOW the output of a predicted model.
for tub in tubs:
num_records = tub.get_num_records()
for iRec in tub.get_index(shuffled=False):
record = tub.get_record(iRec)
img = record["cam/image_array"]
user_angle = float(record["user/angle"])
user_throttle = float(record["user/throttle"])
pilot_angle, pilot_throttle = model.run(img)
user_angles.append(user_angle)
user_throttles.append(user_throttle)
pilot_angles.append(pilot_angle)
pilot_throttles.append(pilot_throttle)
angles_df = pd.DataFrame({'user_angle': user_angles, 'pilot_angle': pilot_angles})
throttles_df = pd.DataFrame({'user_throttle': user_throttles, 'pilot_throttle': pilot_throttles})
fig = plt.figure()
title = "Model Predictions\nTubs: " + tub_names + "\nModel: " + model_name
fig.suptitle(title)
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
angles_df.plot(ax=ax1)
throttles_df.plot(ax=ax2)
ax1.legend(loc=4)
ax2.legend(loc=4)
plt.show()
"""
def execute_from_command_line():
"""
This is the fuction linked to the "donkey" terminal command.
"""
commands = {
'createcar': CreateCar,
'findcar': FindCar,
'calibrate': CalibrateCar,
'tubclean': TubManager,
'tubhist': ShowHistogram,
'tubplot': ShowPredictionPlots,
'tubcheck': TubCheck,
'makemovie': MakeMovie,
'sim': Sim,
}
args = sys.argv[:]
command_text = args[1]
if command_text in commands.keys():
command = commands[command_text]
c = command()
c.run(args[2:])
else:
print('The availible commands are:')
print(list(commands.keys()))
|
py | b413812a1a30d7c6c9f1b3a0e98e3f84e618dd5a | #! -*- coding:utf-8 -*-
import math
class SumTree(object):
def __init__(self, max_size):
self.max_size = max_size
self.tree_level = int(math.ceil(math.log(max_size+1, 2))+1)
self.tree_size = int(2**self.tree_level-1)
self.tree = [0 for i in range(self.tree_size)]
self.data = [None for i in range(self.max_size)]
self.size = 0
self.cursor = 0
def add(self, contents, value):
index = self.cursor
self.cursor = (self.cursor+1)%self.max_size
self.size = min(self.size+1, self.max_size)
self.data[index] = contents
self.val_update(index, value)
def get_val(self, index):
tree_index = 2**(self.tree_level-1)-1+index
return self.tree[tree_index]
def val_update(self, index, value):
tree_index = 2**(self.tree_level-1)-1+index
diff = value-self.tree[tree_index]
self.reconstruct(tree_index, diff)
def reconstruct(self, tindex, diff):
self.tree[tindex] += diff
if not tindex == 0:
tindex = int((tindex-1)/2)
self.reconstruct(tindex, diff)
def find(self, value, norm=True):
if norm:
value *= self.tree[0]
return self._find(value, 0)
def _find(self, value, index):
if 2**(self.tree_level-1)-1 <= index:
return self.data[index-(2**(self.tree_level-1)-1)], self.tree[index], index-(2**(self.tree_level-1)-1)
left = self.tree[2*index+1]
if value <= left:
return self._find(value,2*index+1)
else:
return self._find(value-left,2*(index+1))
def print_tree(self):
for k in range(1, self.tree_level+1):
for j in range(2**(k-1)-1, 2**k-1):
print(self.tree[j], end=' ')
print()
def filled_size(self):
return self.size
if __name__ == '__main__':
s = SumTree(10)
for i in range(20):
s.add(2**i, i)
s.print_tree()
print(s.find(0.5))
|
py | b41381d63e69b5be6bbb8d7eb2f50afdc67d6815 | # To use this code, make sure you
#
# import json
#
# and then, to convert JSON from a string, do
#
# result = admin_barriers_create_response_from_dict(json.loads(json_string))
from dataclasses import dataclass
from typing import Optional, Any, List, TypeVar, Callable, Type, cast
T = TypeVar("T")
def from_str(x: Any) -> str:
assert isinstance(x, str)
return x
def from_none(x: Any) -> Any:
assert x is None
return x
def from_union(fs, x):
for f in fs:
try:
return f(x)
except:
pass
assert False
def from_list(f: Callable[[Any], T], x: Any) -> List[T]:
assert isinstance(x, list)
return [f(y) for y in x]
def from_int(x: Any) -> int:
assert isinstance(x, int) and not isinstance(x, bool)
return x
def to_class(c: Type[T], x: Any) -> dict:
assert isinstance(x, c)
return cast(Any, x).to_dict()
def from_bool(x: Any) -> bool:
assert isinstance(x, bool)
return x
@dataclass
class Usergroup:
id: Optional[str] = None
name: Optional[str] = None
@staticmethod
def from_dict(obj: Any) -> 'Usergroup':
assert isinstance(obj, dict)
id = from_union([from_str, from_none], obj.get("id"))
name = from_union([from_str, from_none], obj.get("name"))
return Usergroup(id, name)
def to_dict(self) -> dict:
result: dict = {}
result["id"] = from_union([from_str, from_none], self.id)
result["name"] = from_union([from_str, from_none], self.name)
return result
@dataclass
class Barrier:
id: Optional[str] = None
enterprise_id: Optional[str] = None
primary_usergroup: Optional[Usergroup] = None
barriered_from_usergroups: Optional[List[Usergroup]] = None
restricted_subjects: Optional[List[str]] = None
date_update: Optional[int] = None
@staticmethod
def from_dict(obj: Any) -> 'Barrier':
assert isinstance(obj, dict)
id = from_union([from_str, from_none], obj.get("id"))
enterprise_id = from_union([from_str, from_none], obj.get("enterprise_id"))
primary_usergroup = from_union([Usergroup.from_dict, from_none], obj.get("primary_usergroup"))
barriered_from_usergroups = from_union([lambda x: from_list(Usergroup.from_dict, x), from_none], obj.get("barriered_from_usergroups"))
restricted_subjects = from_union([lambda x: from_list(from_str, x), from_none], obj.get("restricted_subjects"))
date_update = from_union([from_int, from_none], obj.get("date_update"))
return Barrier(id, enterprise_id, primary_usergroup, barriered_from_usergroups, restricted_subjects, date_update)
def to_dict(self) -> dict:
result: dict = {}
result["id"] = from_union([from_str, from_none], self.id)
result["enterprise_id"] = from_union([from_str, from_none], self.enterprise_id)
result["primary_usergroup"] = from_union([lambda x: to_class(Usergroup, x), from_none], self.primary_usergroup)
result["barriered_from_usergroups"] = from_union([lambda x: from_list(lambda x: to_class(Usergroup, x), x), from_none], self.barriered_from_usergroups)
result["restricted_subjects"] = from_union([lambda x: from_list(from_str, x), from_none], self.restricted_subjects)
result["date_update"] = from_union([from_int, from_none], self.date_update)
return result
@dataclass
class ResponseMetadata:
messages: Optional[List[str]] = None
@staticmethod
def from_dict(obj: Any) -> 'ResponseMetadata':
assert isinstance(obj, dict)
messages = from_union([lambda x: from_list(from_str, x), from_none], obj.get("messages"))
return ResponseMetadata(messages)
def to_dict(self) -> dict:
result: dict = {}
result["messages"] = from_union([lambda x: from_list(from_str, x), from_none], self.messages)
return result
@dataclass
class AdminBarriersCreateResponse:
ok: Optional[bool] = None
error: Optional[str] = None
needed: Optional[str] = None
provided: Optional[str] = None
response_metadata: Optional[ResponseMetadata] = None
barrier: Optional[Barrier] = None
@staticmethod
def from_dict(obj: Any) -> 'AdminBarriersCreateResponse':
assert isinstance(obj, dict)
ok = from_union([from_bool, from_none], obj.get("ok"))
error = from_union([from_str, from_none], obj.get("error"))
needed = from_union([from_str, from_none], obj.get("needed"))
provided = from_union([from_str, from_none], obj.get("provided"))
response_metadata = from_union([ResponseMetadata.from_dict, from_none], obj.get("response_metadata"))
barrier = from_union([Barrier.from_dict, from_none], obj.get("barrier"))
return AdminBarriersCreateResponse(ok, error, needed, provided, response_metadata, barrier)
def to_dict(self) -> dict:
result: dict = {}
result["ok"] = from_union([from_bool, from_none], self.ok)
result["error"] = from_union([from_str, from_none], self.error)
result["needed"] = from_union([from_str, from_none], self.needed)
result["provided"] = from_union([from_str, from_none], self.provided)
result["response_metadata"] = from_union([lambda x: to_class(ResponseMetadata, x), from_none], self.response_metadata)
result["barrier"] = from_union([lambda x: to_class(Barrier, x), from_none], self.barrier)
return result
def admin_barriers_create_response_from_dict(s: Any) -> AdminBarriersCreateResponse:
return AdminBarriersCreateResponse.from_dict(s)
def admin_barriers_create_response_to_dict(x: AdminBarriersCreateResponse) -> Any:
return to_class(AdminBarriersCreateResponse, x)
|
py | b4138381d115a78f3203f6ef0017fd83a8df41d1 | # encoding: utf-8
from __future__ import unicode_literals
import os
import re
import sys
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..compat import (
compat_etree_fromstring,
compat_urllib_parse_unquote,
compat_urlparse,
compat_xml_parse_error,
)
from ..utils import (
determine_ext,
ExtractorError,
float_or_none,
HEADRequest,
is_html,
orderedSet,
sanitized_Request,
smuggle_url,
unescapeHTML,
unified_strdate,
unsmuggle_url,
UnsupportedError,
url_basename,
xpath_text,
)
from .brightcove import (
BrightcoveLegacyIE,
BrightcoveNewIE,
)
from .nbc import NBCSportsVPlayerIE
from .ooyala import OoyalaIE
from .rutv import RUTVIE
from .tvc import TVCIE
from .sportbox import SportBoxEmbedIE
from .smotri import SmotriIE
from .myvi import MyviIE
from .condenast import CondeNastIE
from .udn import UDNEmbedIE
from .senateisvp import SenateISVPIE
from .svt import SVTIE
from .pornhub import PornHubIE
from .xhamster import XHamsterEmbedIE
from .tnaflix import TNAFlixNetworkEmbedIE
from .vimeo import VimeoIE
from .dailymotion import DailymotionCloudIE
from .onionstudios import OnionStudiosIE
from .viewlift import ViewLiftEmbedIE
from .screenwavemedia import ScreenwaveMediaIE
from .mtv import MTVServicesEmbeddedIE
from .pladform import PladformIE
from .videomore import VideomoreIE
from .googledrive import GoogleDriveIE
from .jwplatform import JWPlatformIE
from .digiteka import DigitekaIE
from .instagram import InstagramIE
from .liveleak import LiveLeakIE
from .threeqsdn import ThreeQSDNIE
from .theplatform import ThePlatformIE
class GenericIE(InfoExtractor):
IE_DESC = 'Generic downloader that works on some sites'
_VALID_URL = r'.*'
IE_NAME = 'generic'
_TESTS = [
# Direct link to a video
{
'url': 'http://media.w3.org/2010/05/sintel/trailer.mp4',
'md5': '67d406c2bcb6af27fa886f31aa934bbe',
'info_dict': {
'id': 'trailer',
'ext': 'mp4',
'title': 'trailer',
'upload_date': '20100513',
}
},
# Direct link to media delivered compressed (until Accept-Encoding is *)
{
'url': 'http://calimero.tk/muzik/FictionJunction-Parallel_Hearts.flac',
'md5': '128c42e68b13950268b648275386fc74',
'info_dict': {
'id': 'FictionJunction-Parallel_Hearts',
'ext': 'flac',
'title': 'FictionJunction-Parallel_Hearts',
'upload_date': '20140522',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
]
},
# Direct download with broken HEAD
{
'url': 'http://ai-radio.org:8000/radio.opus',
'info_dict': {
'id': 'radio',
'ext': 'opus',
'title': 'radio',
},
'params': {
'skip_download': True, # infinite live stream
},
'expected_warnings': [
r'501.*Not Implemented',
r'400.*Bad Request',
],
},
# Direct link with incorrect MIME type
{
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'md5': '4ccbebe5f36706d85221f204d7eb5913',
'info_dict': {
'url': 'http://ftp.nluug.nl/video/nluug/2014-11-20_nj14/zaal-2/5_Lennart_Poettering_-_Systemd.webm',
'id': '5_Lennart_Poettering_-_Systemd',
'ext': 'webm',
'title': '5_Lennart_Poettering_-_Systemd',
'upload_date': '20141120',
},
'expected_warnings': [
'URL could be a direct video link, returning it as such.'
]
},
# RSS feed
{
'url': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'info_dict': {
'id': 'http://phihag.de/2014/youtube-dl/rss2.xml',
'title': 'Zero Punctuation',
'description': 're:.*groundbreaking video review series.*'
},
'playlist_mincount': 11,
},
# RSS feed with enclosure
{
'url': 'http://podcastfeeds.nbcnews.com/audio/podcast/MSNBC-MADDOW-NETCAST-M4V.xml',
'info_dict': {
'id': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
'ext': 'm4v',
'upload_date': '20150228',
'title': 'pdv_maddow_netcast_m4v-02-27-2015-201624',
}
},
# SMIL from http://videolectures.net/promogram_igor_mekjavic_eng
{
'url': 'http://videolectures.net/promogram_igor_mekjavic_eng/video/1/smil.xml',
'info_dict': {
'id': 'smil',
'ext': 'mp4',
'title': 'Automatics, robotics and biocybernetics',
'description': 'md5:815fc1deb6b3a2bff99de2d5325be482',
'upload_date': '20130627',
'formats': 'mincount:16',
'subtitles': 'mincount:1',
},
'params': {
'force_generic_extractor': True,
'skip_download': True,
},
},
# SMIL from http://www1.wdr.de/mediathek/video/livestream/index.html
{
'url': 'http://metafilegenerator.de/WDR/WDR_FS/hds/hds.smil',
'info_dict': {
'id': 'hds',
'ext': 'flv',
'title': 'hds',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
# SMIL from https://www.restudy.dk/video/play/id/1637
{
'url': 'https://www.restudy.dk/awsmedia/SmilDirectory/video_1637.xml',
'info_dict': {
'id': 'video_1637',
'ext': 'flv',
'title': 'video_1637',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
# SMIL from http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm
{
'url': 'http://services.media.howstuffworks.com/videos/450221/smil-service.smil',
'info_dict': {
'id': 'smil-service',
'ext': 'flv',
'title': 'smil-service',
'formats': 'mincount:1',
},
'params': {
'skip_download': True,
},
},
# SMIL from http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370
{
'url': 'http://api.new.livestream.com/accounts/1570303/events/1585861/videos/4719370.smil',
'info_dict': {
'id': '4719370',
'ext': 'mp4',
'title': '571de1fd-47bc-48db-abf9-238872a58d1f',
'formats': 'mincount:3',
},
'params': {
'skip_download': True,
},
},
# XSPF playlist from http://www.telegraaf.nl/tv/nieuws/binnenland/24353229/__Tikibad_ontruimd_wegens_brand__.html
{
'url': 'http://www.telegraaf.nl/xml/playlist/2015/8/7/mZlp2ctYIUEB.xspf',
'info_dict': {
'id': 'mZlp2ctYIUEB',
'ext': 'mp4',
'title': 'Tikibad ontruimd wegens brand',
'description': 'md5:05ca046ff47b931f9b04855015e163a4',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 33,
},
'params': {
'skip_download': True,
},
},
# MPD from http://dash-mse-test.appspot.com/media.html
{
'url': 'http://yt-dash-mse-test.commondatastorage.googleapis.com/media/car-20120827-manifest.mpd',
'md5': '4b57baab2e30d6eb3a6a09f0ba57ef53',
'info_dict': {
'id': 'car-20120827-manifest',
'ext': 'mp4',
'title': 'car-20120827-manifest',
'formats': 'mincount:9',
'upload_date': '20130904',
},
'params': {
'format': 'bestvideo',
},
},
# m3u8 served with Content-Type: audio/x-mpegURL; charset=utf-8
{
'url': 'http://once.unicornmedia.com/now/master/playlist/bb0b18ba-64f5-4b1b-a29f-0ac252f06b68/77a785f3-5188-4806-b788-0893a61634ed/93677179-2d99-4ef4-9e17-fe70d49abfbf/content.m3u8',
'info_dict': {
'id': 'content',
'ext': 'mp4',
'title': 'content',
'formats': 'mincount:8',
},
'params': {
# m3u8 downloads
'skip_download': True,
}
},
# m3u8 served with Content-Type: text/plain
{
'url': 'http://www.nacentapps.com/m3u8/index.m3u8',
'info_dict': {
'id': 'index',
'ext': 'mp4',
'title': 'index',
'upload_date': '20140720',
'formats': 'mincount:11',
},
'params': {
# m3u8 downloads
'skip_download': True,
}
},
# google redirect
{
'url': 'http://www.google.com/url?sa=t&rct=j&q=&esrc=s&source=web&cd=1&cad=rja&ved=0CCUQtwIwAA&url=http%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DcmQHVoWB5FY&ei=F-sNU-LLCaXk4QT52ICQBQ&usg=AFQjCNEw4hL29zgOohLXvpJ-Bdh2bils1Q&bvm=bv.61965928,d.bGE',
'info_dict': {
'id': 'cmQHVoWB5FY',
'ext': 'mp4',
'upload_date': '20130224',
'uploader_id': 'TheVerge',
'description': 're:^Chris Ziegler takes a look at the\.*',
'uploader': 'The Verge',
'title': 'First Firefox OS phones side-by-side',
},
'params': {
'skip_download': False,
}
},
{
# redirect in Refresh HTTP header
'url': 'https://www.facebook.com/l.php?u=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DpO8h3EaFRdo&h=TAQHsoToz&enc=AZN16h-b6o4Zq9pZkCCdOLNKMN96BbGMNtcFwHSaazus4JHT_MFYkAA-WARTX2kvsCIdlAIyHZjl6d33ILIJU7Jzwk_K3mcenAXoAzBNoZDI_Q7EXGDJnIhrGkLXo_LJ_pAa2Jzbx17UHMd3jAs--6j2zaeto5w9RTn8T_1kKg3fdC5WPX9Dbb18vzH7YFX0eSJmoa6SP114rvlkw6pkS1-T&s=1',
'info_dict': {
'id': 'pO8h3EaFRdo',
'ext': 'mp4',
'title': 'Tripeo Boiler Room x Dekmantel Festival DJ Set',
'description': 'md5:6294cc1af09c4049e0652b51a2df10d5',
'upload_date': '20150917',
'uploader_id': 'brtvofficial',
'uploader': 'Boiler Room',
},
'params': {
'skip_download': False,
},
},
{
'url': 'http://www.hodiho.fr/2013/02/regis-plante-sa-jeep.html',
'md5': '85b90ccc9d73b4acd9138d3af4c27f89',
'info_dict': {
'id': '13601338388002',
'ext': 'mp4',
'uploader': 'www.hodiho.fr',
'title': 'R\u00e9gis plante sa Jeep',
}
},
# bandcamp page with custom domain
{
'add_ie': ['Bandcamp'],
'url': 'http://bronyrock.com/track/the-pony-mash',
'info_dict': {
'id': '3235767654',
'ext': 'mp3',
'title': 'The Pony Mash',
'uploader': 'M_Pallante',
},
'skip': 'There is a limit of 200 free downloads / month for the test song',
},
# embedded brightcove video
# it also tests brightcove videos that need to set the 'Referer' in the
# http requests
{
'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.bfmtv.com/video/bfmbusiness/cours-bourse/cours-bourse-l-analyse-technique-154522/',
'info_dict': {
'id': '2765128793001',
'ext': 'mp4',
'title': 'Le cours de bourse : l’analyse technique',
'description': 'md5:7e9ad046e968cb2d1114004aba466fd9',
'uploader': 'BFM BUSINESS',
},
'params': {
'skip_download': True,
},
},
{
# https://github.com/rg3/youtube-dl/issues/2253
'url': 'http://bcove.me/i6nfkrc3',
'md5': '0ba9446db037002366bab3b3eb30c88c',
'info_dict': {
'id': '3101154703001',
'ext': 'mp4',
'title': 'Still no power',
'uploader': 'thestar.com',
'description': 'Mississauga resident David Farmer is still out of power as a result of the ice storm a month ago. To keep the house warm, Farmer cuts wood from his property for a wood burning stove downstairs.',
},
'add_ie': ['BrightcoveLegacy'],
},
{
'url': 'http://www.championat.com/video/football/v/87/87499.html',
'md5': 'fb973ecf6e4a78a67453647444222983',
'info_dict': {
'id': '3414141473001',
'ext': 'mp4',
'title': 'Видео. Удаление Дзагоева (ЦСКА)',
'description': 'Онлайн-трансляция матча ЦСКА - "Волга"',
'uploader': 'Championat',
},
},
{
# https://github.com/rg3/youtube-dl/issues/3541
'add_ie': ['BrightcoveLegacy'],
'url': 'http://www.kijk.nl/sbs6/leermijvrouwenkennen/videos/jqMiXKAYan2S/aflevering-1',
'info_dict': {
'id': '3866516442001',
'ext': 'mp4',
'title': 'Leer mij vrouwen kennen: Aflevering 1',
'description': 'Leer mij vrouwen kennen: Aflevering 1',
'uploader': 'SBS Broadcasting',
},
'skip': 'Restricted to Netherlands',
'params': {
'skip_download': True, # m3u8 download
},
},
# ooyala video
{
'url': 'http://www.rollingstone.com/music/videos/norwegian-dj-cashmere-cat-goes-spartan-on-with-me-premiere-20131219',
'md5': '166dd577b433b4d4ebfee10b0824d8ff',
'info_dict': {
'id': 'BwY2RxaTrTkslxOfcan0UCf0YqyvWysJ',
'ext': 'mp4',
'title': '2cc213299525360.mov', # that's what we get
'duration': 238.231,
},
'add_ie': ['Ooyala'],
},
{
# ooyala video embedded with http://player.ooyala.com/iframe.js
'url': 'http://www.macrumors.com/2015/07/24/steve-jobs-the-man-in-the-machine-first-trailer/',
'info_dict': {
'id': 'p0MGJndjoG5SOKqO_hZJuZFPB-Tr5VgB',
'ext': 'mp4',
'title': '"Steve Jobs: Man in the Machine" trailer',
'description': 'The first trailer for the Alex Gibney documentary "Steve Jobs: Man in the Machine."',
'duration': 135.427,
},
'params': {
'skip_download': True,
},
},
# embed.ly video
{
'url': 'http://www.tested.com/science/weird/460206-tested-grinding-coffee-2000-frames-second/',
'info_dict': {
'id': '9ODmcdjQcHQ',
'ext': 'mp4',
'title': 'Tested: Grinding Coffee at 2000 Frames Per Second',
'upload_date': '20140225',
'description': 'md5:06a40fbf30b220468f1e0957c0f558ff',
'uploader': 'Tested',
'uploader_id': 'testedcom',
},
# No need to test YoutubeIE here
'params': {
'skip_download': True,
},
},
# funnyordie embed
{
'url': 'http://www.theguardian.com/world/2014/mar/11/obama-zach-galifianakis-between-two-ferns',
'info_dict': {
'id': '18e820ec3f',
'ext': 'mp4',
'title': 'Between Two Ferns with Zach Galifianakis: President Barack Obama',
'description': 'Episode 18: President Barack Obama sits down with Zach Galifianakis for his most memorable interview yet.',
},
},
# RUTV embed
{
'url': 'http://www.rg.ru/2014/03/15/reg-dfo/anklav-anons.html',
'info_dict': {
'id': '776940',
'ext': 'mp4',
'title': 'Охотское море стало целиком российским',
'description': 'md5:5ed62483b14663e2a95ebbe115eb8f43',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
# TVC embed
{
'url': 'http://sch1298sz.mskobr.ru/dou_edu/karamel_ki/filial_galleries/video/iframe_src_http_tvc_ru_video_iframe_id_55304_isplay_false_acc_video_id_channel_brand_id_11_show_episodes_episode_id_32307_frameb/',
'info_dict': {
'id': '55304',
'ext': 'mp4',
'title': 'Дошкольное воспитание',
},
},
# SportBox embed
{
'url': 'http://www.vestifinance.ru/articles/25753',
'info_dict': {
'id': '25753',
'title': 'Вести Экономика ― Прямые трансляции с Форума-выставки "Госзаказ-2013"',
},
'playlist': [{
'info_dict': {
'id': '370908',
'title': 'Госзаказ. День 3',
'ext': 'mp4',
}
}, {
'info_dict': {
'id': '370905',
'title': 'Госзаказ. День 2',
'ext': 'mp4',
}
}, {
'info_dict': {
'id': '370902',
'title': 'Госзаказ. День 1',
'ext': 'mp4',
}
}],
'params': {
# m3u8 download
'skip_download': True,
},
},
# Myvi.ru embed
{
'url': 'http://www.kinomyvi.tv/news/detail/Pervij-dublirovannij-trejler--Uzhastikov-_nOw1',
'info_dict': {
'id': 'f4dafcad-ff21-423d-89b5-146cfd89fa1e',
'ext': 'mp4',
'title': 'Ужастики, русский трейлер (2015)',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 153,
}
},
# XHamster embed
{
'url': 'http://www.numisc.com/forum/showthread.php?11696-FM15-which-pumiscer-was-this-%28-vid-%29-%28-alfa-as-fuck-srx-%29&s=711f5db534502e22260dec8c5e2d66d8',
'info_dict': {
'id': 'showthread',
'title': '[NSFL] [FM15] which pumiscer was this ( vid ) ( alfa as fuck srx )',
},
'playlist_mincount': 7,
},
# Embedded TED video
{
'url': 'http://en.support.wordpress.com/videos/ted-talks/',
'md5': '65fdff94098e4a607385a60c5177c638',
'info_dict': {
'id': '1969',
'ext': 'mp4',
'title': 'Hidden miracles of the natural world',
'uploader': 'Louie Schwartzberg',
'description': 'md5:8145d19d320ff3e52f28401f4c4283b9',
}
},
# Embedded Ustream video
{
'url': 'http://www.american.edu/spa/pti/nsa-privacy-janus-2014.cfm',
'md5': '27b99cdb639c9b12a79bca876a073417',
'info_dict': {
'id': '45734260',
'ext': 'flv',
'uploader': 'AU SPA: The NSA and Privacy',
'title': 'NSA and Privacy Forum Debate featuring General Hayden and Barton Gellman'
}
},
# nowvideo embed hidden behind percent encoding
{
'url': 'http://www.waoanime.tv/the-super-dimension-fortress-macross-episode-1/',
'md5': '2baf4ddd70f697d94b1c18cf796d5107',
'info_dict': {
'id': '06e53103ca9aa',
'ext': 'flv',
'title': 'Macross Episode 001 Watch Macross Episode 001 onl',
'description': 'No description',
},
},
# arte embed
{
'url': 'http://www.tv-replay.fr/redirection/20-03-14/x-enius-arte-10753389.html',
'md5': '7653032cbb25bf6c80d80f217055fa43',
'info_dict': {
'id': '048195-004_PLUS7-F',
'ext': 'flv',
'title': 'X:enius',
'description': 'md5:d5fdf32ef6613cdbfd516ae658abf168',
'upload_date': '20140320',
},
'params': {
'skip_download': 'Requires rtmpdump'
}
},
# francetv embed
{
'url': 'http://www.tsprod.com/replay-du-concert-alcaline-de-calogero',
'info_dict': {
'id': 'EV_30231',
'ext': 'mp4',
'title': 'Alcaline, le concert avec Calogero',
'description': 'md5:61f08036dcc8f47e9cfc33aed08ffaff',
'upload_date': '20150226',
'timestamp': 1424989860,
'duration': 5400,
},
'params': {
# m3u8 downloads
'skip_download': True,
},
'expected_warnings': [
'Forbidden'
]
},
# Condé Nast embed
{
'url': 'http://www.wired.com/2014/04/honda-asimo/',
'md5': 'ba0dfe966fa007657bd1443ee672db0f',
'info_dict': {
'id': '53501be369702d3275860000',
'ext': 'mp4',
'title': 'Honda’s New Asimo Robot Is More Human Than Ever',
}
},
# Dailymotion embed
{
'url': 'http://www.spi0n.com/zap-spi0n-com-n216/',
'md5': '441aeeb82eb72c422c7f14ec533999cd',
'info_dict': {
'id': 'k2mm4bCdJ6CQ2i7c8o2',
'ext': 'mp4',
'title': 'Le Zap de Spi0n n°216 - Zapping du Web',
'description': 'md5:faf028e48a461b8b7fad38f1e104b119',
'uploader': 'Spi0n',
'uploader_id': 'xgditw',
'upload_date': '20140425',
'timestamp': 1398441542,
},
'add_ie': ['Dailymotion'],
},
# YouTube embed
{
'url': 'http://www.badzine.de/ansicht/datum/2014/06/09/so-funktioniert-die-neue-englische-badminton-liga.html',
'info_dict': {
'id': 'FXRb4ykk4S0',
'ext': 'mp4',
'title': 'The NBL Auction 2014',
'uploader': 'BADMINTON England',
'uploader_id': 'BADMINTONEvents',
'upload_date': '20140603',
'description': 'md5:9ef128a69f1e262a700ed83edb163a73',
},
'add_ie': ['Youtube'],
'params': {
'skip_download': True,
}
},
# MTVSercices embed
{
'url': 'http://www.gametrailers.com/news-post/76093/north-america-europe-is-getting-that-mario-kart-8-mercedes-dlc-too',
'md5': '35727f82f58c76d996fc188f9755b0d5',
'info_dict': {
'id': '0306a69b-8adf-4fb5-aace-75f8e8cbfca9',
'ext': 'mp4',
'title': 'Review',
'description': 'Mario\'s life in the fast lane has never looked so good.',
},
},
# YouTube embed via <data-embed-url="">
{
'url': 'https://play.google.com/store/apps/details?id=com.gameloft.android.ANMP.GloftA8HM',
'info_dict': {
'id': '4vAffPZIT44',
'ext': 'mp4',
'title': 'Asphalt 8: Airborne - Update - Welcome to Dubai!',
'uploader': 'Gameloft',
'uploader_id': 'gameloft',
'upload_date': '20140828',
'description': 'md5:c80da9ed3d83ae6d1876c834de03e1c4',
},
'params': {
'skip_download': True,
}
},
# Camtasia studio
{
'url': 'http://www.ll.mit.edu/workshops/education/videocourses/antennas/lecture1/video/',
'playlist': [{
'md5': '0c5e352edabf715d762b0ad4e6d9ee67',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - video1',
'ext': 'flv',
'duration': 2235.90,
}
}, {
'md5': '10e4bb3aaca9fd630e273ff92d9f3c63',
'info_dict': {
'id': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final_PIP',
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final - pip',
'ext': 'flv',
'duration': 2235.93,
}
}],
'info_dict': {
'title': 'Fenn-AA_PA_Radar_Course_Lecture_1c_Final',
}
},
# Flowplayer
{
'url': 'http://www.handjobhub.com/video/busty-blonde-siri-tit-fuck-while-wank-6313.html',
'md5': '9d65602bf31c6e20014319c7d07fba27',
'info_dict': {
'id': '5123ea6d5e5a7',
'ext': 'mp4',
'age_limit': 18,
'uploader': 'www.handjobhub.com',
'title': 'Busty Blonde Siri Tit Fuck While Wank at HandjobHub.com',
}
},
# Multiple brightcove videos
# https://github.com/rg3/youtube-dl/issues/2283
{
'url': 'http://www.newyorker.com/online/blogs/newsdesk/2014/01/always-never-nuclear-command-and-control.html',
'info_dict': {
'id': 'always-never',
'title': 'Always / Never - The New Yorker',
},
'playlist_count': 3,
'params': {
'extract_flat': False,
'skip_download': True,
}
},
# MLB embed
{
'url': 'http://umpire-empire.com/index.php/topic/58125-laz-decides-no-thats-low/',
'md5': '96f09a37e44da40dd083e12d9a683327',
'info_dict': {
'id': '33322633',
'ext': 'mp4',
'title': 'Ump changes call to ball',
'description': 'md5:71c11215384298a172a6dcb4c2e20685',
'duration': 48,
'timestamp': 1401537900,
'upload_date': '20140531',
'thumbnail': 're:^https?://.*\.jpg$',
},
},
# Wistia embed
{
'url': 'http://study.com/academy/lesson/north-american-exploration-failed-colonies-of-spain-france-england.html#lesson',
'md5': '1953f3a698ab51cfc948ed3992a0b7ff',
'info_dict': {
'id': '6e2wtrbdaf',
'ext': 'mov',
'title': 'paywall_north-american-exploration-failed-colonies-of-spain-france-england',
'description': 'a Paywall Videos video from Remilon',
'duration': 644.072,
'uploader': 'study.com',
'timestamp': 1459678540,
'upload_date': '20160403',
'filesize': 24687186,
},
},
{
'url': 'http://thoughtworks.wistia.com/medias/uxjb0lwrcz',
'md5': 'baf49c2baa8a7de5f3fc145a8506dcd4',
'info_dict': {
'id': 'uxjb0lwrcz',
'ext': 'mp4',
'title': 'Conversation about Hexagonal Rails Part 1',
'description': 'a Martin Fowler video from ThoughtWorks',
'duration': 1715.0,
'uploader': 'thoughtworks.wistia.com',
'timestamp': 1401832161,
'upload_date': '20140603',
},
},
# Wistia standard embed (async)
{
'url': 'https://www.getdrip.com/university/brennan-dunn-drip-workshop/',
'info_dict': {
'id': '807fafadvk',
'ext': 'mp4',
'title': 'Drip Brennan Dunn Workshop',
'description': 'a JV Webinars video from getdrip-1',
'duration': 4986.95,
'timestamp': 1463607249,
'upload_date': '20160518',
},
'params': {
'skip_download': True,
}
},
# Soundcloud embed
{
'url': 'http://nakedsecurity.sophos.com/2014/10/29/sscc-171-are-you-sure-that-1234-is-a-bad-password-podcast/',
'info_dict': {
'id': '174391317',
'ext': 'mp3',
'description': 'md5:ff867d6b555488ad3c52572bb33d432c',
'uploader': 'Sophos Security',
'title': 'Chet Chat 171 - Oct 29, 2014',
'upload_date': '20141029',
}
},
# Livestream embed
{
'url': 'http://www.esa.int/Our_Activities/Space_Science/Rosetta/Philae_comet_touch-down_webcast',
'info_dict': {
'id': '67864563',
'ext': 'flv',
'upload_date': '20141112',
'title': 'Rosetta #CometLanding webcast HL 10',
}
},
# Another Livestream embed, without 'new.' in URL
{
'url': 'https://www.freespeech.org/',
'info_dict': {
'id': '123537347',
'ext': 'mp4',
'title': 're:^FSTV [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
},
'params': {
# Live stream
'skip_download': True,
},
},
# LazyYT
{
'url': 'http://discourse.ubuntu.com/t/unity-8-desktop-mode-windows-on-mir/1986',
'info_dict': {
'id': '1986',
'title': 'Unity 8 desktop-mode windows on Mir! - Ubuntu Discourse',
},
'playlist_mincount': 2,
},
# Cinchcast embed
{
'url': 'http://undergroundwellness.com/podcasts/306-5-steps-to-permanent-gut-healing/',
'info_dict': {
'id': '7141703',
'ext': 'mp3',
'upload_date': '20141126',
'title': 'Jack Tips: 5 Steps to Permanent Gut Healing',
}
},
# Cinerama player
{
'url': 'http://www.abc.net.au/7.30/content/2015/s4164797.htm',
'info_dict': {
'id': '730m_DandD_1901_512k',
'ext': 'mp4',
'uploader': 'www.abc.net.au',
'title': 'Game of Thrones with dice - Dungeons and Dragons fantasy role-playing game gets new life - 19/01/2015',
}
},
# embedded viddler video
{
'url': 'http://deadspin.com/i-cant-stop-watching-john-wall-chop-the-nuggets-with-th-1681801597',
'info_dict': {
'id': '4d03aad9',
'ext': 'mp4',
'uploader': 'deadspin',
'title': 'WALL-TO-GORTAT',
'timestamp': 1422285291,
'upload_date': '20150126',
},
'add_ie': ['Viddler'],
},
# Libsyn embed
{
'url': 'http://thedailyshow.cc.com/podcast/episodetwelve',
'info_dict': {
'id': '3377616',
'ext': 'mp3',
'title': "The Daily Show Podcast without Jon Stewart - Episode 12: Bassem Youssef: Egypt's Jon Stewart",
'description': 'md5:601cb790edd05908957dae8aaa866465',
'upload_date': '20150220',
},
},
# jwplayer YouTube
{
'url': 'http://media.nationalarchives.gov.uk/index.php/webinar-using-discovery-national-archives-online-catalogue/',
'info_dict': {
'id': 'Mrj4DVp2zeA',
'ext': 'mp4',
'upload_date': '20150212',
'uploader': 'The National Archives UK',
'description': 'md5:a236581cd2449dd2df4f93412f3f01c6',
'uploader_id': 'NationalArchives08',
'title': 'Webinar: Using Discovery, The National Archives’ online catalogue',
},
},
# rtl.nl embed
{
'url': 'http://www.rtlnieuws.nl/nieuws/buitenland/aanslagen-kopenhagen',
'playlist_mincount': 5,
'info_dict': {
'id': 'aanslagen-kopenhagen',
'title': 'Aanslagen Kopenhagen | RTL Nieuws',
}
},
# Zapiks embed
{
'url': 'http://www.skipass.com/news/116090-bon-appetit-s5ep3-baqueira-mi-cor.html',
'info_dict': {
'id': '118046',
'ext': 'mp4',
'title': 'EP3S5 - Bon Appétit - Baqueira Mi Corazon !',
}
},
# Kaltura embed (different embed code)
{
'url': 'http://www.premierchristianradio.com/Shows/Saturday/Unbelievable/Conference-Videos/Os-Guinness-Is-It-Fools-Talk-Unbelievable-Conference-2014',
'info_dict': {
'id': '1_a52wc67y',
'ext': 'flv',
'upload_date': '20150127',
'uploader_id': 'PremierMedia',
'timestamp': int,
'title': 'Os Guinness // Is It Fools Talk? // Unbelievable? Conference 2014',
},
},
# Kaltura embed protected with referrer
{
'url': 'http://www.disney.nl/disney-channel/filmpjes/achter-de-schermen#/videoId/violetta-achter-de-schermen-ruggero',
'info_dict': {
'id': '1_g4fbemnq',
'ext': 'mp4',
'title': 'Violetta - Achter De Schermen - Ruggero',
'description': 'Achter de schermen met Ruggero',
'timestamp': 1435133761,
'upload_date': '20150624',
'uploader_id': 'echojecka',
},
},
# Kaltura embed with single quotes
{
'url': 'http://fod.infobase.com/p_ViewPlaylist.aspx?AssignmentID=NUN8ZY',
'info_dict': {
'id': '0_izeg5utt',
'ext': 'mp4',
'title': '35871',
'timestamp': 1355743100,
'upload_date': '20121217',
'uploader_id': 'batchUser',
},
'add_ie': ['Kaltura'],
},
# Eagle.Platform embed (generic URL)
{
'url': 'http://lenta.ru/news/2015/03/06/navalny/',
# Not checking MD5 as sometimes the direct HTTP link results in 404 and HLS is used
'info_dict': {
'id': '227304',
'ext': 'mp4',
'title': 'Навальный вышел на свободу',
'description': 'md5:d97861ac9ae77377f3f20eaf9d04b4f5',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 87,
'view_count': int,
'age_limit': 0,
},
},
# ClipYou (Eagle.Platform) embed (custom URL)
{
'url': 'http://muz-tv.ru/play/7129/',
# Not checking MD5 as sometimes the direct HTTP link results in 404 and HLS is used
'info_dict': {
'id': '12820',
'ext': 'mp4',
'title': "'O Sole Mio",
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 216,
'view_count': int,
},
},
# Pladform embed
{
'url': 'http://muz-tv.ru/kinozal/view/7400/',
'info_dict': {
'id': '100183293',
'ext': 'mp4',
'title': 'Тайны перевала Дятлова • 1 серия 2 часть',
'description': 'Документальный сериал-расследование одной из самых жутких тайн ХХ века',
'thumbnail': 're:^https?://.*\.jpg$',
'duration': 694,
'age_limit': 0,
},
},
# Playwire embed
{
'url': 'http://www.cinemablend.com/new/First-Joe-Dirt-2-Trailer-Teaser-Stupid-Greatness-70874.html',
'info_dict': {
'id': '3519514',
'ext': 'mp4',
'title': 'Joe Dirt 2 Beautiful Loser Teaser Trailer',
'thumbnail': 're:^https?://.*\.png$',
'duration': 45.115,
},
},
# 5min embed
{
'url': 'http://techcrunch.com/video/facebook-creates-on-this-day-crunch-report/518726732/',
'md5': '4c6f127a30736b59b3e2c19234ee2bf7',
'info_dict': {
'id': '518726732',
'ext': 'mp4',
'title': 'Facebook Creates "On This Day" | Crunch Report',
},
},
# SVT embed
{
'url': 'http://www.svt.se/sport/ishockey/jagr-tacklar-giroux-under-intervjun',
'info_dict': {
'id': '2900353',
'ext': 'flv',
'title': 'Här trycker Jagr till Giroux (under SVT-intervjun)',
'duration': 27,
'age_limit': 0,
},
},
# Crooks and Liars embed
{
'url': 'http://crooksandliars.com/2015/04/fox-friends-says-protecting-atheists',
'info_dict': {
'id': '8RUoRhRi',
'ext': 'mp4',
'title': "Fox & Friends Says Protecting Atheists From Discrimination Is Anti-Christian!",
'description': 'md5:e1a46ad1650e3a5ec7196d432799127f',
'timestamp': 1428207000,
'upload_date': '20150405',
'uploader': 'Heather',
},
},
# Crooks and Liars external embed
{
'url': 'http://theothermccain.com/2010/02/02/video-proves-that-bill-kristol-has-been-watching-glenn-beck/comment-page-1/',
'info_dict': {
'id': 'MTE3MjUtMzQ2MzA',
'ext': 'mp4',
'title': 'md5:5e3662a81a4014d24c250d76d41a08d5',
'description': 'md5:9b8e9542d6c3c5de42d6451b7d780cec',
'timestamp': 1265032391,
'upload_date': '20100201',
'uploader': 'Heather',
},
},
# NBC Sports vplayer embed
{
'url': 'http://www.riderfans.com/forum/showthread.php?121827-Freeman&s=e98fa1ea6dc08e886b1678d35212494a',
'info_dict': {
'id': 'ln7x1qSThw4k',
'ext': 'flv',
'title': "PFT Live: New leader in the 'new-look' defense",
'description': 'md5:65a19b4bbfb3b0c0c5768bed1dfad74e',
'uploader': 'NBCU-SPORTS',
'upload_date': '20140107',
'timestamp': 1389118457,
},
},
# UDN embed
{
'url': 'https://video.udn.com/news/300346',
'md5': 'fd2060e988c326991037b9aff9df21a6',
'info_dict': {
'id': '300346',
'ext': 'mp4',
'title': '中一中男師變性 全校師生力挺',
'thumbnail': 're:^https?://.*\.jpg$',
},
'params': {
# m3u8 download
'skip_download': True,
},
},
# Ooyala embed
{
'url': 'http://www.businessinsider.com/excel-index-match-vlookup-video-how-to-2015-2?IR=T',
'info_dict': {
'id': '50YnY4czr4ms1vJ7yz3xzq0excz_pUMs',
'ext': 'mp4',
'description': 'VIDEO: INDEX/MATCH versus VLOOKUP.',
'title': 'This is what separates the Excel masters from the wannabes',
'duration': 191.933,
},
'params': {
# m3u8 downloads
'skip_download': True,
}
},
# Contains a SMIL manifest
{
'url': 'http://www.telewebion.com/fa/1263668/%D9%82%D8%B1%D8%B9%D9%87%E2%80%8C%DA%A9%D8%B4%DB%8C-%D9%84%DB%8C%DA%AF-%D9%82%D9%87%D8%B1%D9%85%D8%A7%D9%86%D8%A7%D9%86-%D8%A7%D8%B1%D9%88%D9%BE%D8%A7/%2B-%D9%81%D9%88%D8%AA%D8%A8%D8%A7%D9%84.html',
'info_dict': {
'id': 'file',
'ext': 'flv',
'title': '+ Football: Lottery Champions League Europe',
'uploader': 'www.telewebion.com',
},
'params': {
# rtmpe downloads
'skip_download': True,
}
},
# Brightcove URL in single quotes
{
'url': 'http://www.sportsnet.ca/baseball/mlb/sn-presents-russell-martin-world-citizen/',
'md5': '4ae374f1f8b91c889c4b9203c8c752af',
'info_dict': {
'id': '4255764656001',
'ext': 'mp4',
'title': 'SN Presents: Russell Martin, World Citizen',
'description': 'To understand why he was the Toronto Blue Jays’ top off-season priority is to appreciate his background and upbringing in Montreal, where he first developed his baseball skills. Written and narrated by Stephen Brunt.',
'uploader': 'Rogers Sportsnet',
'uploader_id': '1704050871',
'upload_date': '20150525',
'timestamp': 1432570283,
},
},
# Dailymotion Cloud video
{
'url': 'http://replay.publicsenat.fr/vod/le-debat/florent-kolandjian,dominique-cena,axel-decourtye,laurence-abeille,bruno-parmentier/175910',
'md5': '49444254273501a64675a7e68c502681',
'info_dict': {
'id': '5585de919473990de4bee11b',
'ext': 'mp4',
'title': 'Le débat',
'thumbnail': 're:^https?://.*\.jpe?g$',
}
},
# OnionStudios embed
{
'url': 'http://www.clickhole.com/video/dont-understand-bitcoin-man-will-mumble-explanatio-2537',
'info_dict': {
'id': '2855',
'ext': 'mp4',
'title': 'Don’t Understand Bitcoin? This Man Will Mumble An Explanation At You',
'thumbnail': 're:^https?://.*\.jpe?g$',
'uploader': 'ClickHole',
'uploader_id': 'clickhole',
}
},
# SnagFilms embed
{
'url': 'http://whilewewatch.blogspot.ru/2012/06/whilewewatch-whilewewatch-gripping.html',
'info_dict': {
'id': '74849a00-85a9-11e1-9660-123139220831',
'ext': 'mp4',
'title': '#whilewewatch',
}
},
# AdobeTVVideo embed
{
'url': 'https://helpx.adobe.com/acrobat/how-to/new-experience-acrobat-dc.html?set=acrobat--get-started--essential-beginners',
'md5': '43662b577c018ad707a63766462b1e87',
'info_dict': {
'id': '2456',
'ext': 'mp4',
'title': 'New experience with Acrobat DC',
'description': 'New experience with Acrobat DC',
'duration': 248.667,
},
},
# ScreenwaveMedia embed
{
'url': 'http://www.thecinemasnob.com/the-cinema-snob/a-nightmare-on-elm-street-2-freddys-revenge1',
'md5': '24ace5baba0d35d55c6810b51f34e9e0',
'info_dict': {
'id': 'cinemasnob-55d26273809dd',
'ext': 'mp4',
'title': 'cinemasnob',
},
},
# BrightcoveInPageEmbed embed
{
'url': 'http://www.geekandsundry.com/tabletop-bonus-wils-final-thoughts-on-dread/',
'info_dict': {
'id': '4238694884001',
'ext': 'flv',
'title': 'Tabletop: Dread, Last Thoughts',
'description': 'Tabletop: Dread, Last Thoughts',
'duration': 51690,
},
},
# JWPlayer with M3U8
{
'url': 'http://ren.tv/novosti/2015-09-25/sluchaynyy-prohozhiy-poymal-avtougonshchika-v-murmanske-video',
'info_dict': {
'id': 'playlist',
'ext': 'mp4',
'title': 'Случайный прохожий поймал автоугонщика в Мурманске. ВИДЕО | РЕН ТВ',
'uploader': 'ren.tv',
},
'params': {
# m3u8 downloads
'skip_download': True,
}
},
# Brightcove embed, with no valid 'renditions' but valid 'IOSRenditions'
# This video can't be played in browsers if Flash disabled and UA set to iPhone, which is actually a false alarm
{
'url': 'https://dl.dropboxusercontent.com/u/29092637/interview.html',
'info_dict': {
'id': '4785848093001',
'ext': 'mp4',
'title': 'The Cardinal Pell Interview',
'description': 'Sky News Contributor Andrew Bolt interviews George Pell in Rome, following the Cardinal\'s evidence before the Royal Commission into Child Abuse. ',
'uploader': 'GlobeCast Australia - GlobeStream',
'uploader_id': '2733773828001',
'upload_date': '20160304',
'timestamp': 1457083087,
},
'params': {
# m3u8 downloads
'skip_download': True,
},
},
# Another form of arte.tv embed
{
'url': 'http://www.tv-replay.fr/redirection/09-04-16/arte-reportage-arte-11508975.html',
'md5': '850bfe45417ddf221288c88a0cffe2e2',
'info_dict': {
'id': '030273-562_PLUS7-F',
'ext': 'mp4',
'title': 'ARTE Reportage - Nulle part, en France',
'description': 'md5:e3a0e8868ed7303ed509b9e3af2b870d',
'upload_date': '20160409',
},
},
# LiveLeak embed
{
'url': 'http://www.wykop.pl/link/3088787/',
'md5': 'ace83b9ed19b21f68e1b50e844fdf95d',
'info_dict': {
'id': '874_1459135191',
'ext': 'mp4',
'title': 'Man shows poor quality of new apartment building',
'description': 'The wall is like a sand pile.',
'uploader': 'Lake8737',
}
},
# Duplicated embedded video URLs
{
'url': 'http://www.hudl.com/athlete/2538180/highlights/149298443',
'info_dict': {
'id': '149298443_480_16c25b74_2',
'ext': 'mp4',
'title': 'vs. Blue Orange Spring Game',
'uploader': 'www.hudl.com',
},
},
]
def report_following_redirect(self, new_url):
"""Report information extraction."""
self._downloader.to_screen('[redirect] Following redirect to %s' % new_url)
def _extract_rss(self, url, video_id, doc):
playlist_title = doc.find('./channel/title').text
playlist_desc_el = doc.find('./channel/description')
playlist_desc = None if playlist_desc_el is None else playlist_desc_el.text
entries = []
for it in doc.findall('./channel/item'):
next_url = xpath_text(it, 'link', fatal=False)
if not next_url:
enclosure_nodes = it.findall('./enclosure')
for e in enclosure_nodes:
next_url = e.attrib.get('url')
if next_url:
break
if not next_url:
continue
entries.append({
'_type': 'url',
'url': next_url,
'title': it.find('title').text,
})
return {
'_type': 'playlist',
'id': url,
'title': playlist_title,
'description': playlist_desc,
'entries': entries,
}
def _extract_camtasia(self, url, video_id, webpage):
""" Returns None if no camtasia video can be found. """
camtasia_cfg = self._search_regex(
r'fo\.addVariable\(\s*"csConfigFile",\s*"([^"]+)"\s*\);',
webpage, 'camtasia configuration file', default=None)
if camtasia_cfg is None:
return None
title = self._html_search_meta('DC.title', webpage, fatal=True)
camtasia_url = compat_urlparse.urljoin(url, camtasia_cfg)
camtasia_cfg = self._download_xml(
camtasia_url, video_id,
note='Downloading camtasia configuration',
errnote='Failed to download camtasia configuration')
fileset_node = camtasia_cfg.find('./playlist/array/fileset')
entries = []
for n in fileset_node.getchildren():
url_n = n.find('./uri')
if url_n is None:
continue
entries.append({
'id': os.path.splitext(url_n.text.rpartition('/')[2])[0],
'title': '%s - %s' % (title, n.tag),
'url': compat_urlparse.urljoin(url, url_n.text),
'duration': float_or_none(n.find('./duration').text),
})
return {
'_type': 'playlist',
'entries': entries,
'title': title,
}
def _real_extract(self, url):
if url.startswith('//'):
return {
'_type': 'url',
'url': self.http_scheme() + url,
}
parsed_url = compat_urlparse.urlparse(url)
if not parsed_url.scheme:
default_search = self._downloader.params.get('default_search')
if default_search is None:
default_search = 'fixup_error'
if default_search in ('auto', 'auto_warning', 'fixup_error'):
if '/' in url:
self._downloader.report_warning('The url doesn\'t specify the protocol, trying with http')
return self.url_result('http://' + url)
elif default_search != 'fixup_error':
if default_search == 'auto_warning':
if re.match(r'^(?:url|URL)$', url):
raise ExtractorError(
'Invalid URL: %r . Call youtube-dl like this: youtube-dl -v "https://www.youtube.com/watch?v=BaW_jenozKc" ' % url,
expected=True)
else:
self._downloader.report_warning(
'Falling back to youtube search for %s . Set --default-search "auto" to suppress this warning.' % url)
return self.url_result('ytsearch:' + url)
if default_search in ('error', 'fixup_error'):
raise ExtractorError(
'%r is not a valid URL. '
'Set --default-search "ytsearch" (or run youtube-dl "ytsearch:%s" ) to search YouTube'
% (url, url), expected=True)
else:
if ':' not in default_search:
default_search += ':'
return self.url_result(default_search + url)
url, smuggled_data = unsmuggle_url(url)
force_videoid = None
is_intentional = smuggled_data and smuggled_data.get('to_generic')
if smuggled_data and 'force_videoid' in smuggled_data:
force_videoid = smuggled_data['force_videoid']
video_id = force_videoid
else:
video_id = compat_urllib_parse_unquote(os.path.splitext(url.rstrip('/').split('/')[-1])[0])
self.to_screen('%s: Requesting header' % video_id)
head_req = HEADRequest(url)
head_response = self._request_webpage(
head_req, video_id,
note=False, errnote='Could not send HEAD request to %s' % url,
fatal=False)
if head_response is not False:
# Check for redirect
new_url = head_response.geturl()
if url != new_url:
self.report_following_redirect(new_url)
if force_videoid:
new_url = smuggle_url(
new_url, {'force_videoid': force_videoid})
return self.url_result(new_url)
full_response = None
if head_response is False:
request = sanitized_Request(url)
request.add_header('Accept-Encoding', '*')
full_response = self._request_webpage(request, video_id)
head_response = full_response
info_dict = {
'id': video_id,
'title': compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0]),
'upload_date': unified_strdate(head_response.headers.get('Last-Modified'))
}
# Check for direct link to a video
content_type = head_response.headers.get('Content-Type', '').lower()
m = re.match(r'^(?P<type>audio|video|application(?=/(?:ogg$|(?:vnd\.apple\.|x-)?mpegurl)))/(?P<format_id>[^;\s]+)', content_type)
if m:
format_id = m.group('format_id')
if format_id.endswith('mpegurl'):
formats = self._extract_m3u8_formats(url, video_id, 'mp4')
elif format_id == 'f4m':
formats = self._extract_f4m_formats(url, video_id)
else:
formats = [{
'format_id': m.group('format_id'),
'url': url,
'vcodec': 'none' if m.group('type') == 'audio' else None
}]
info_dict['direct'] = True
self._sort_formats(formats)
info_dict['formats'] = formats
return info_dict
if not self._downloader.params.get('test', False) and not is_intentional:
force = self._downloader.params.get('force_generic_extractor', False)
self._downloader.report_warning(
'%s on generic information extractor.' % ('Forcing' if force else 'Falling back'))
if not full_response:
request = sanitized_Request(url)
# Some webservers may serve compressed content of rather big size (e.g. gzipped flac)
# making it impossible to download only chunk of the file (yet we need only 512kB to
# test whether it's HTML or not). According to youtube-dl default Accept-Encoding
# that will always result in downloading the whole file that is not desirable.
# Therefore for extraction pass we have to override Accept-Encoding to any in order
# to accept raw bytes and being able to download only a chunk.
# It may probably better to solve this by checking Content-Type for application/octet-stream
# after HEAD request finishes, but not sure if we can rely on this.
request.add_header('Accept-Encoding', '*')
full_response = self._request_webpage(request, video_id)
first_bytes = full_response.read(512)
# Is it an M3U playlist?
if first_bytes.startswith(b'#EXTM3U'):
info_dict['formats'] = self._extract_m3u8_formats(url, video_id, 'mp4')
self._sort_formats(info_dict['formats'])
return info_dict
# Maybe it's a direct link to a video?
# Be careful not to download the whole thing!
if not is_html(first_bytes):
self._downloader.report_warning(
'URL could be a direct video link, returning it as such.')
info_dict.update({
'direct': True,
'url': url,
})
return info_dict
webpage = self._webpage_read_content(
full_response, url, video_id, prefix=first_bytes)
self.report_extraction(video_id)
# Is it an RSS feed, a SMIL file, an XSPF playlist or a MPD manifest?
try:
doc = compat_etree_fromstring(webpage.encode('utf-8'))
if doc.tag == 'rss':
return self._extract_rss(url, video_id, doc)
elif re.match(r'^(?:{[^}]+})?smil$', doc.tag):
smil = self._parse_smil(doc, url, video_id)
self._sort_formats(smil['formats'])
return smil
elif doc.tag == '{http://xspf.org/ns/0/}playlist':
return self.playlist_result(self._parse_xspf(doc, video_id), video_id)
elif re.match(r'(?i)^(?:{[^}]+})?MPD$', doc.tag):
info_dict['formats'] = self._parse_mpd_formats(
doc, video_id, mpd_base_url=url.rpartition('/')[0])
self._sort_formats(info_dict['formats'])
return info_dict
elif re.match(r'^{http://ns\.adobe\.com/f4m/[12]\.0}manifest$', doc.tag):
info_dict['formats'] = self._parse_f4m_formats(doc, url, video_id)
self._sort_formats(info_dict['formats'])
return info_dict
except compat_xml_parse_error:
pass
# Is it a Camtasia project?
camtasia_res = self._extract_camtasia(url, video_id, webpage)
if camtasia_res is not None:
return camtasia_res
# Sometimes embedded video player is hidden behind percent encoding
# (e.g. https://github.com/rg3/youtube-dl/issues/2448)
# Unescaping the whole page allows to handle those cases in a generic way
webpage = compat_urllib_parse_unquote(webpage)
# it's tempting to parse this further, but you would
# have to take into account all the variations like
# Video Title - Site Name
# Site Name | Video Title
# Video Title - Tagline | Site Name
# and so on and so forth; it's just not practical
video_title = self._og_search_title(
webpage, default=None) or self._html_search_regex(
r'(?s)<title>(.*?)</title>', webpage, 'video title',
default='video')
# Try to detect age limit automatically
age_limit = self._rta_search(webpage)
# And then there are the jokers who advertise that they use RTA,
# but actually don't.
AGE_LIMIT_MARKERS = [
r'Proudly Labeled <a href="http://www.rtalabel.org/" title="Restricted to Adults">RTA</a>',
]
if any(re.search(marker, webpage) for marker in AGE_LIMIT_MARKERS):
age_limit = 18
# video uploader is domain name
video_uploader = self._search_regex(
r'^(?:https?://)?([^/]*)/.*', url, 'video uploader')
video_description = self._og_search_description(webpage, default=None)
video_thumbnail = self._og_search_thumbnail(webpage, default=None)
# Helper method
def _playlist_from_matches(matches, getter=None, ie=None):
urlrs = orderedSet(
self.url_result(self._proto_relative_url(getter(m) if getter else m), ie)
for m in matches)
return self.playlist_result(
urlrs, playlist_id=video_id, playlist_title=video_title)
# Look for Brightcove Legacy Studio embeds
bc_urls = BrightcoveLegacyIE._extract_brightcove_urls(webpage)
if bc_urls:
self.to_screen('Brightcove video detected.')
entries = [{
'_type': 'url',
'url': smuggle_url(bc_url, {'Referer': url}),
'ie_key': 'BrightcoveLegacy'
} for bc_url in bc_urls]
return {
'_type': 'playlist',
'title': video_title,
'id': video_id,
'entries': entries,
}
# Look for Brightcove New Studio embeds
bc_urls = BrightcoveNewIE._extract_urls(webpage)
if bc_urls:
return _playlist_from_matches(bc_urls, ie='BrightcoveNew')
# Look for ThePlatform embeds
tp_urls = ThePlatformIE._extract_urls(webpage)
if tp_urls:
return _playlist_from_matches(tp_urls, ie='ThePlatform')
# Look for embedded rtl.nl player
matches = re.findall(
r'<iframe[^>]+?src="((?:https?:)?//(?:www\.)?rtl\.nl/system/videoplayer/[^"]+(?:video_)?embed[^"]+)"',
webpage)
if matches:
return _playlist_from_matches(matches, ie='RtlNl')
vimeo_url = VimeoIE._extract_vimeo_url(url, webpage)
if vimeo_url is not None:
return self.url_result(vimeo_url)
vid_me_embed_url = self._search_regex(
r'src=[\'"](https?://vid\.me/[^\'"]+)[\'"]',
webpage, 'vid.me embed', default=None)
if vid_me_embed_url is not None:
return self.url_result(vid_me_embed_url, 'Vidme')
# Look for embedded YouTube player
matches = re.findall(r'''(?x)
(?:
<iframe[^>]+?src=|
data-video-url=|
<embed[^>]+?src=|
embedSWF\(?:\s*|
new\s+SWFObject\(
)
(["\'])
(?P<url>(?:https?:)?//(?:www\.)?youtube(?:-nocookie)?\.com/
(?:embed|v|p)/.+?)
\1''', webpage)
if matches:
return _playlist_from_matches(
matches, lambda m: unescapeHTML(m[1]))
# Look for lazyYT YouTube embed
matches = re.findall(
r'class="lazyYT" data-youtube-id="([^"]+)"', webpage)
if matches:
return _playlist_from_matches(matches, lambda m: unescapeHTML(m))
# Look for embedded Dailymotion player
matches = re.findall(
r'<(?:(?:embed|iframe)[^>]+?src=|input[^>]+id=[\'"]dmcloudUrlEmissionSelect[\'"][^>]+value=)(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.com/(?:embed|swf)/video/.+?)\1', webpage)
if matches:
return _playlist_from_matches(
matches, lambda m: unescapeHTML(m[1]))
# Look for embedded Dailymotion playlist player (#3822)
m = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?dailymotion\.[a-z]{2,3}/widget/jukebox\?.+?)\1', webpage)
if m:
playlists = re.findall(
r'list\[\]=/playlist/([^/]+)/', unescapeHTML(m.group('url')))
if playlists:
return _playlist_from_matches(
playlists, lambda p: '//dailymotion.com/playlist/%s' % p)
# Look for embedded Wistia player
match = re.search(
r'<(?:meta[^>]+?content|iframe[^>]+?src)=(["\'])(?P<url>(?:https?:)?//(?:fast\.)?wistia\.net/embed/iframe/.+?)\1', webpage)
if match:
embed_url = self._proto_relative_url(
unescapeHTML(match.group('url')))
return {
'_type': 'url_transparent',
'url': embed_url,
'ie_key': 'Wistia',
'uploader': video_uploader,
}
match = re.search(r'(?:id=["\']wistia_|data-wistia-?id=["\']|Wistia\.embed\(["\'])(?P<id>[^"\']+)', webpage)
if match:
return {
'_type': 'url_transparent',
'url': 'wistia:%s' % match.group('id'),
'ie_key': 'Wistia',
'uploader': video_uploader,
}
match = re.search(
r'''(?sx)
<script[^>]+src=(["'])(?:https?:)?//fast\.wistia\.com/assets/external/E-v1\.js\1[^>]*>.*?
<div[^>]+class=(["']).*?\bwistia_async_(?P<id>[a-z0-9]+)\b.*?\2
''', webpage)
if match:
return self.url_result(self._proto_relative_url(
'wistia:%s' % match.group('id')), 'Wistia')
# Look for SVT player
svt_url = SVTIE._extract_url(webpage)
if svt_url:
return self.url_result(svt_url, 'SVT')
# Look for embedded condenast player
matches = re.findall(
r'<iframe\s+(?:[a-zA-Z-]+="[^"]+"\s+)*?src="(https?://player\.cnevids\.com/embed/[^"]+")',
webpage)
if matches:
return {
'_type': 'playlist',
'entries': [{
'_type': 'url',
'ie_key': 'CondeNast',
'url': ma,
} for ma in matches],
'title': video_title,
'id': video_id,
}
# Look for Bandcamp pages with custom domain
mobj = re.search(r'<meta property="og:url"[^>]*?content="(.*?bandcamp\.com.*?)"', webpage)
if mobj is not None:
burl = unescapeHTML(mobj.group(1))
# Don't set the extractor because it can be a track url or an album
return self.url_result(burl)
# Look for embedded Vevo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//(?:cache\.)?vevo\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded Viddler player
mobj = re.search(
r'<(?:iframe[^>]+?src|param[^>]+?value)=(["\'])(?P<url>(?:https?:)?//(?:www\.)?viddler\.com/(?:embed|player)/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for NYTimes player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//graphics8\.nytimes\.com/bcvideo/[^/]+/iframe/embed\.html.+?)\1>',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for Libsyn player
mobj = re.search(
r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//html5-player\.libsyn\.com/embed/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for Ooyala videos
mobj = (re.search(r'player\.ooyala\.com/[^"?]+[?#][^"]*?(?:embedCode|ec)=(?P<ec>[^"&]+)', webpage) or
re.search(r'OO\.Player\.create\([\'"].*?[\'"],\s*[\'"](?P<ec>.{32})[\'"]', webpage) or
re.search(r'SBN\.VideoLinkset\.ooyala\([\'"](?P<ec>.{32})[\'"]\)', webpage) or
re.search(r'data-ooyala-video-id\s*=\s*[\'"](?P<ec>.{32})[\'"]', webpage))
if mobj is not None:
return OoyalaIE._build_url_result(smuggle_url(mobj.group('ec'), {'domain': url}))
# Look for multiple Ooyala embeds on SBN network websites
mobj = re.search(r'SBN\.VideoLinkset\.entryGroup\((\[.*?\])', webpage)
if mobj is not None:
embeds = self._parse_json(mobj.group(1), video_id, fatal=False)
if embeds:
return _playlist_from_matches(
embeds, getter=lambda v: OoyalaIE._url_for_embed_code(smuggle_url(v['provider_video_id'], {'domain': url})), ie='Ooyala')
# Look for Aparat videos
mobj = re.search(r'<iframe .*?src="(http://www\.aparat\.com/video/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), 'Aparat')
# Look for MPORA videos
mobj = re.search(r'<iframe .*?src="(http://mpora\.(?:com|de)/videos/[^"]+)"', webpage)
if mobj is not None:
return self.url_result(mobj.group(1), 'Mpora')
# Look for embedded NovaMov-based player
mobj = re.search(
r'''(?x)<(?:pagespeed_)?iframe[^>]+?src=(["\'])
(?P<url>http://(?:(?:embed|www)\.)?
(?:novamov\.com|
nowvideo\.(?:ch|sx|eu|at|ag|co)|
videoweed\.(?:es|com)|
movshare\.(?:net|sx|ag)|
divxstage\.(?:eu|net|ch|co|at|ag))
/embed\.php.+?)\1''', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded Facebook player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https://www\.facebook\.com/video/embed.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Facebook')
# Look for embedded VK player
mobj = re.search(r'<iframe[^>]+?src=(["\'])(?P<url>https?://vk\.com/video_ext\.php.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'VK')
# Look for embedded Odnoklassniki player
mobj = re.search(r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:odnoklassniki|ok)\.ru/videoembed/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Odnoklassniki')
# Look for embedded ivi player
mobj = re.search(r'<embed[^>]+?src=(["\'])(?P<url>https?://(?:www\.)?ivi\.ru/video/player.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Ivi')
# Look for embedded Huffington Post player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed\.live\.huffingtonpost\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'HuffPost')
# Look for embed.ly
mobj = re.search(r'class=["\']embedly-card["\'][^>]href=["\'](?P<url>[^"\']+)', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
mobj = re.search(r'class=["\']embedly-embed["\'][^>]src=["\'][^"\']*url=(?P<url>[^&]+)', webpage)
if mobj is not None:
return self.url_result(compat_urllib_parse_unquote(mobj.group('url')))
# Look for funnyordie embed
matches = re.findall(r'<iframe[^>]+?src="(https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"', webpage)
if matches:
return _playlist_from_matches(
matches, getter=unescapeHTML, ie='FunnyOrDie')
# Look for BBC iPlayer embed
matches = re.findall(r'setPlaylist\("(https?://www\.bbc\.co\.uk/iplayer/[^/]+/[\da-z]{8})"\)', webpage)
if matches:
return _playlist_from_matches(matches, ie='BBCCoUk')
# Look for embedded RUTV player
rutv_url = RUTVIE._extract_url(webpage)
if rutv_url:
return self.url_result(rutv_url, 'RUTV')
# Look for embedded TVC player
tvc_url = TVCIE._extract_url(webpage)
if tvc_url:
return self.url_result(tvc_url, 'TVC')
# Look for embedded SportBox player
sportbox_urls = SportBoxEmbedIE._extract_urls(webpage)
if sportbox_urls:
return _playlist_from_matches(sportbox_urls, ie='SportBoxEmbed')
# Look for embedded PornHub player
pornhub_url = PornHubIE._extract_url(webpage)
if pornhub_url:
return self.url_result(pornhub_url, 'PornHub')
# Look for embedded XHamster player
xhamster_urls = XHamsterEmbedIE._extract_urls(webpage)
if xhamster_urls:
return _playlist_from_matches(xhamster_urls, ie='XHamsterEmbed')
# Look for embedded TNAFlixNetwork player
tnaflix_urls = TNAFlixNetworkEmbedIE._extract_urls(webpage)
if tnaflix_urls:
return _playlist_from_matches(tnaflix_urls, ie=TNAFlixNetworkEmbedIE.ie_key())
# Look for embedded Tvigle player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//cloud\.tvigle\.ru/video/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Tvigle')
# Look for embedded TED player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://embed(?:-ssl)?\.ted\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'TED')
# Look for embedded Ustream videos
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>http://www\.ustream\.tv/embed/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Ustream')
# Look for embedded arte.tv player
mobj = re.search(
r'<(?:script|iframe) [^>]*?src="(?P<url>http://www\.arte\.tv/(?:playerv2/embed|arte_vp/index)[^"]+)"',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'ArteTVEmbed')
# Look for embedded francetv player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?://)?embed\.francetv\.fr/\?ue=.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for embedded smotri.com player
smotri_url = SmotriIE._extract_url(webpage)
if smotri_url:
return self.url_result(smotri_url, 'Smotri')
# Look for embedded Myvi.ru player
myvi_url = MyviIE._extract_url(webpage)
if myvi_url:
return self.url_result(myvi_url)
# Look for embedded soundcloud player
mobj = re.search(
r'<iframe\s+(?:[a-zA-Z0-9_-]+="[^"]+"\s+)*src="(?P<url>https?://(?:w\.)?soundcloud\.com/player[^"]+)"',
webpage)
if mobj is not None:
url = unescapeHTML(mobj.group('url'))
return self.url_result(url)
# Look for embedded vulture.com player
mobj = re.search(
r'<iframe src="(?P<url>https?://video\.vulture\.com/[^"]+)"',
webpage)
if mobj is not None:
url = unescapeHTML(mobj.group('url'))
return self.url_result(url, ie='Vulture')
# Look for embedded mtvservices player
mtvservices_url = MTVServicesEmbeddedIE._extract_url(webpage)
if mtvservices_url:
return self.url_result(mtvservices_url, ie='MTVServicesEmbedded')
# Look for embedded yahoo player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://(?:screen|movies)\.yahoo\.com/.+?\.html\?format=embed)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Yahoo')
# Look for embedded sbs.com.au player
mobj = re.search(
r'''(?x)
(?:
<meta\s+property="og:video"\s+content=|
<iframe[^>]+?src=
)
(["\'])(?P<url>https?://(?:www\.)?sbs\.com\.au/ondemand/video/.+?)\1''',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'SBS')
# Look for embedded Cinchcast player
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://player\.cinchcast\.com/.+?)\1',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Cinchcast')
mobj = re.search(
r'<iframe[^>]+?src=(["\'])(?P<url>https?://m(?:lb)?\.mlb\.com/shared/video/embed/embed\.html\?.+?)\1',
webpage)
if not mobj:
mobj = re.search(
r'data-video-link=["\'](?P<url>http://m.mlb.com/video/[^"\']+)',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'MLB')
mobj = re.search(
r'<(?:iframe|script)[^>]+?src=(["\'])(?P<url>%s)\1' % CondeNastIE.EMBED_URL,
webpage)
if mobj is not None:
return self.url_result(self._proto_relative_url(mobj.group('url'), scheme='http:'), 'CondeNast')
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:new\.)?livestream\.com/[^"]+/player[^"]+)"',
webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Livestream')
# Look for Zapiks embed
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://(?:www\.)?zapiks\.fr/index\.php\?.+?)"', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'Zapiks')
# Look for Kaltura embeds
mobj = (re.search(r"(?s)kWidget\.(?:thumb)?[Ee]mbed\(\{.*?(?P<q1>['\"])wid(?P=q1)\s*:\s*(?P<q2>['\"])_?(?P<partner_id>[^'\"]+)(?P=q2),.*?(?P<q3>['\"])entry_?[Ii]d(?P=q3)\s*:\s*(?P<q4>['\"])(?P<id>[^'\"]+)(?P=q4),", webpage) or
re.search(r'(?s)(?P<q1>["\'])(?:https?:)?//cdnapi(?:sec)?\.kaltura\.com/.*?(?:p|partner_id)/(?P<partner_id>\d+).*?(?P=q1).*?entry_?[Ii]d\s*:\s*(?P<q2>["\'])(?P<id>.+?)(?P=q2)', webpage))
if mobj is not None:
return self.url_result(smuggle_url(
'kaltura:%(partner_id)s:%(id)s' % mobj.groupdict(),
{'source_url': url}), 'Kaltura')
# Look for Eagle.Platform embeds
mobj = re.search(
r'<iframe[^>]+src="(?P<url>https?://.+?\.media\.eagleplatform\.com/index/player\?.+?)"', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'), 'EaglePlatform')
# Look for ClipYou (uses Eagle.Platform) embeds
mobj = re.search(
r'<iframe[^>]+src="https?://(?P<host>media\.clipyou\.ru)/index/player\?.*\brecord_id=(?P<id>\d+).*"', webpage)
if mobj is not None:
return self.url_result('eagleplatform:%(host)s:%(id)s' % mobj.groupdict(), 'EaglePlatform')
# Look for Pladform embeds
pladform_url = PladformIE._extract_url(webpage)
if pladform_url:
return self.url_result(pladform_url)
# Look for Videomore embeds
videomore_url = VideomoreIE._extract_url(webpage)
if videomore_url:
return self.url_result(videomore_url)
# Look for Playwire embeds
mobj = re.search(
r'<script[^>]+data-config=(["\'])(?P<url>(?:https?:)?//config\.playwire\.com/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for 5min embeds
mobj = re.search(
r'<meta[^>]+property="og:video"[^>]+content="https?://embed\.5min\.com/(?P<id>[0-9]+)/?', webpage)
if mobj is not None:
return self.url_result('5min:%s' % mobj.group('id'), 'FiveMin')
# Look for Crooks and Liars embeds
mobj = re.search(
r'<(?:iframe[^>]+src|param[^>]+value)=(["\'])(?P<url>(?:https?:)?//embed\.crooksandliars\.com/(?:embed|v)/.+?)\1', webpage)
if mobj is not None:
return self.url_result(mobj.group('url'))
# Look for NBC Sports VPlayer embeds
nbc_sports_url = NBCSportsVPlayerIE._extract_url(webpage)
if nbc_sports_url:
return self.url_result(nbc_sports_url, 'NBCSportsVPlayer')
# Look for Google Drive embeds
google_drive_url = GoogleDriveIE._extract_url(webpage)
if google_drive_url:
return self.url_result(google_drive_url, 'GoogleDrive')
# Look for UDN embeds
mobj = re.search(
r'<iframe[^>]+src="(?P<url>%s)"' % UDNEmbedIE._PROTOCOL_RELATIVE_VALID_URL, webpage)
if mobj is not None:
return self.url_result(
compat_urlparse.urljoin(url, mobj.group('url')), 'UDNEmbed')
# Look for Senate ISVP iframe
senate_isvp_url = SenateISVPIE._search_iframe_url(webpage)
if senate_isvp_url:
return self.url_result(senate_isvp_url, 'SenateISVP')
# Look for Dailymotion Cloud videos
dmcloud_url = DailymotionCloudIE._extract_dmcloud_url(webpage)
if dmcloud_url:
return self.url_result(dmcloud_url, 'DailymotionCloud')
# Look for OnionStudios embeds
onionstudios_url = OnionStudiosIE._extract_url(webpage)
if onionstudios_url:
return self.url_result(onionstudios_url)
# Look for ViewLift embeds
viewlift_url = ViewLiftEmbedIE._extract_url(webpage)
if viewlift_url:
return self.url_result(viewlift_url)
# Look for JWPlatform embeds
jwplatform_url = JWPlatformIE._extract_url(webpage)
if jwplatform_url:
return self.url_result(jwplatform_url, 'JWPlatform')
# Look for ScreenwaveMedia embeds
mobj = re.search(ScreenwaveMediaIE.EMBED_PATTERN, webpage)
if mobj is not None:
return self.url_result(unescapeHTML(mobj.group('url')), 'ScreenwaveMedia')
# Look for Digiteka embeds
digiteka_url = DigitekaIE._extract_url(webpage)
if digiteka_url:
return self.url_result(self._proto_relative_url(digiteka_url), DigitekaIE.ie_key())
# Look for Limelight embeds
mobj = re.search(r'LimelightPlayer\.doLoad(Media|Channel|ChannelList)\(["\'](?P<id>[a-z0-9]{32})', webpage)
if mobj:
lm = {
'Media': 'media',
'Channel': 'channel',
'ChannelList': 'channel_list',
}
return self.url_result('limelight:%s:%s' % (
lm[mobj.group(1)], mobj.group(2)), 'Limelight%s' % mobj.group(1), mobj.group(2))
# Look for AdobeTVVideo embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//video\.tv\.adobe\.com/v/\d+[^"]+)[\'"]',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))),
'AdobeTVVideo')
# Look for Vine embeds
mobj = re.search(
r'<iframe[^>]+src=[\'"]((?:https?:)?//(?:www\.)?vine\.co/v/[^/]+/embed/(?:simple|postcard))',
webpage)
if mobj is not None:
return self.url_result(
self._proto_relative_url(unescapeHTML(mobj.group(1))), 'Vine')
# Look for Instagram embeds
instagram_embed_url = InstagramIE._extract_embed_url(webpage)
if instagram_embed_url is not None:
return self.url_result(
self._proto_relative_url(instagram_embed_url), InstagramIE.ie_key())
# Look for LiveLeak embeds
liveleak_url = LiveLeakIE._extract_url(webpage)
if liveleak_url:
return self.url_result(liveleak_url, 'LiveLeak')
# Look for 3Q SDN embeds
threeqsdn_url = ThreeQSDNIE._extract_url(webpage)
if threeqsdn_url:
return {
'_type': 'url_transparent',
'ie_key': ThreeQSDNIE.ie_key(),
'url': self._proto_relative_url(threeqsdn_url),
'title': video_title,
'description': video_description,
'thumbnail': video_thumbnail,
'uploader': video_uploader,
}
def check_video(vurl):
if YoutubeIE.suitable(vurl):
return True
vpath = compat_urlparse.urlparse(vurl).path
vext = determine_ext(vpath)
return '.' in vpath and vext not in ('swf', 'png', 'jpg', 'srt', 'sbv', 'sub', 'vtt', 'ttml')
def filter_video(urls):
return list(filter(check_video, urls))
# Start with something easy: JW Player in SWFObject
found = filter_video(re.findall(r'flashvars: [\'"](?:.*&)?file=(http[^\'"&]*)', webpage))
if not found:
# Look for gorilla-vid style embedding
found = filter_video(re.findall(r'''(?sx)
(?:
jw_plugins|
JWPlayerOptions|
jwplayer\s*\(\s*["'][^'"]+["']\s*\)\s*\.setup
)
.*?
['"]?file['"]?\s*:\s*["\'](.*?)["\']''', webpage))
if not found:
# Broaden the search a little bit
found = filter_video(re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage))
if not found:
# Broaden the findall a little bit: JWPlayer JS loader
found = filter_video(re.findall(
r'[^A-Za-z0-9]?(?:file|video_url)["\']?:\s*["\'](http(?![^\'"]+\.[0-9]+[\'"])[^\'"]+)["\']', webpage))
if not found:
# Flow player
found = filter_video(re.findall(r'''(?xs)
flowplayer\("[^"]+",\s*
\{[^}]+?\}\s*,
\s*\{[^}]+? ["']?clip["']?\s*:\s*\{\s*
["']?url["']?\s*:\s*["']([^"']+)["']
''', webpage))
if not found:
# Cinerama player
found = re.findall(
r"cinerama\.embedPlayer\(\s*\'[^']+\',\s*'([^']+)'", webpage)
if not found:
# Try to find twitter cards info
found = filter_video(re.findall(
r'<meta (?:property|name)="twitter:player:stream" (?:content|value)="(.+?)"', webpage))
if not found:
# We look for Open Graph info:
# We have to match any number spaces between elements, some sites try to align them (eg.: statigr.am)
m_video_type = re.findall(r'<meta.*?property="og:video:type".*?content="video/(.*?)"', webpage)
# We only look in og:video if the MIME type is a video, don't try if it's a Flash player:
if m_video_type is not None:
found = filter_video(re.findall(r'<meta.*?property="og:video".*?content="(.*?)"', webpage))
if not found:
# HTML5 video
found = re.findall(r'(?s)<(?:video|audio)[^<]*(?:>.*?<source[^>]*)?\s+src=["\'](.*?)["\']', webpage)
if not found:
REDIRECT_REGEX = r'[0-9]{,2};\s*(?:URL|url)=\'?([^\'"]+)'
found = re.search(
r'(?i)<meta\s+(?=(?:[a-z-]+="[^"]+"\s+)*http-equiv="refresh")'
r'(?:[a-z-]+="[^"]+"\s+)*?content="%s' % REDIRECT_REGEX,
webpage)
if not found:
# Look also in Refresh HTTP header
refresh_header = head_response.headers.get('Refresh')
if refresh_header:
# In python 2 response HTTP headers are bytestrings
if sys.version_info < (3, 0) and isinstance(refresh_header, str):
refresh_header = refresh_header.decode('iso-8859-1')
found = re.search(REDIRECT_REGEX, refresh_header)
if found:
new_url = compat_urlparse.urljoin(url, unescapeHTML(found.group(1)))
self.report_following_redirect(new_url)
return {
'_type': 'url',
'url': new_url,
}
if not found:
raise UnsupportedError(url)
entries = []
for video_url in orderedSet(found):
video_url = unescapeHTML(video_url)
video_url = video_url.replace('\\/', '/')
video_url = compat_urlparse.urljoin(url, video_url)
video_id = compat_urllib_parse_unquote(os.path.basename(video_url))
# Sometimes, jwplayer extraction will result in a YouTube URL
if YoutubeIE.suitable(video_url):
entries.append(self.url_result(video_url, 'Youtube'))
continue
# here's a fun little line of code for you:
video_id = os.path.splitext(video_id)[0]
entry_info_dict = {
'id': video_id,
'uploader': video_uploader,
'title': video_title,
'age_limit': age_limit,
}
ext = determine_ext(video_url)
if ext == 'smil':
entry_info_dict['formats'] = self._extract_smil_formats(video_url, video_id)
elif ext == 'xspf':
return self.playlist_result(self._extract_xspf_playlist(video_url, video_id), video_id)
elif ext == 'm3u8':
entry_info_dict['formats'] = self._extract_m3u8_formats(video_url, video_id, ext='mp4')
elif ext == 'mpd':
entry_info_dict['formats'] = self._extract_mpd_formats(video_url, video_id)
elif ext == 'f4m':
entry_info_dict['formats'] = self._extract_f4m_formats(video_url, video_id)
else:
entry_info_dict['url'] = video_url
if entry_info_dict.get('formats'):
self._sort_formats(entry_info_dict['formats'])
entries.append(entry_info_dict)
if len(entries) == 1:
return entries[0]
else:
for num, e in enumerate(entries, start=1):
# 'url' results don't have a title
if e.get('title') is not None:
e['title'] = '%s (%d)' % (e['title'], num)
return {
'_type': 'playlist',
'entries': entries,
}
|
py | b41384c1f1b208c038d74e25c7cddaacac49b715 | import unittest
from circle import circle_area
from math import pi
class TestCircleArea(unittest.TestCase):
def test_area(self):
#Test area when radius >= 0
self.assertAlmostEqual(circle_area(1), pi)
self.assertAlmostEqual(circle_area(0), 0)
self.assertAlmostEqual(circle_area(2.1), pi * 2.1**2)
def test_values(self):
#Make sure value errors are raised when necessary
self.assertRaises(ValueError, circle_area, -2)
def test_type(self):
#Make sure type errors are raised when necessary
self.assertRaises(TypeError, circle_area, 3+5j)
self.assertRaises(TypeError, circle_area, True)
self.assertRaises(TypeError, circle_area, "text")
|
py | b41384e577f50b9211d7f29bb6310b8d4ea9e2d4 | import os
import argparse
from scipy import misc
import scipy.io as sio
import cv2
import time
import numpy as np
import tensorflow.python.framework.dtypes
from warpgan import WarpGAN
from align.detect_align import detect_align
# Parse aguements
parser = argparse.ArgumentParser()
parser.add_argument("--model_dir", help="The path to the pretrained model",
type=str, default="./warpgan_pretrained/warpgan_pretrained")
parser.add_argument("--input", help="The path to the aligned image",
type=str, default="./data/oriImgs")
parser.add_argument("--output", help="The prefix path to the output file, subfix will be added for different styles.",
type=str, default="./data/result")
parser.add_argument("--num_styles", help="The number of images to generate with different styles",
type=int, default=5)
parser.add_argument("--scale", help="The path to the input directory",
type=float, default=1.0)
parser.add_argument("--aligned", help="Set true if the input face is already normalized",
action='store_true', default=False)
args = parser.parse_args()
print("args ", args)
if __name__ == '__main__':
network = WarpGAN()
network.load_model(args.model_dir)
for name in os.listdir(args.input):
imgfile = os.path.join(args.input, name)
img = misc.imread(imgfile, mode='RGB')
if not args.aligned:
s = time.time()
img = detect_align(img)
e = time.time()
print("detect time cost ", e - s, " s")
if img is None:
print("detect failed *********** ", imgfile)
continue
cv2.imshow("img ", img)
# cv2.waitKey(0)
img = (img - 127.5) / 128.0
images = np.tile(img[None], [args.num_styles, 1, 1, 1])
scales = args.scale * np.ones((args.num_styles))
styles = np.random.normal(0., 1., (args.num_styles, network.input_style.shape[1].value))
start = time.time()
output = network.generate_BA(images, scales, 16, styles=styles)
output = 0.5*output + 0.5
end = time.time()
print("generate caricatue time cost: ", end - start, " s.")
for i in range(args.num_styles):
outdir = os.path.join(args.output, name[:-4])
misc.imsave(outdir + '_{}.jpg'.format(i), output[i])
cv2.imshow("img ", output[i])
# cv2.waitKey(0)
break
|
py | b4138592e239f7a5600a3e18fdef463de50c1cdb | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('update-mac', views.updateMac, name='updateMac'),
path('add-device', views.addDevice, name='addDevice'),
]
|
py | b41386d4f045ead5ebc30bc329768f1e2b1ad8b4 | """
Oracle database backend for Django.
Requires cx_Oracle: http://cx-oracle.sourceforge.net/
"""
from __future__ import unicode_literals
import datetime
import decimal
import re
import platform
import sys
import uuid
import warnings
def _setup_environment(environ):
# Cygwin requires some special voodoo to set the environment variables
# properly so that Oracle will see them.
if platform.system().upper().startswith('CYGWIN'):
try:
import ctypes
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading ctypes: %s; "
"the Oracle backend requires ctypes to "
"operate correctly under Cygwin." % e)
kernel32 = ctypes.CDLL('kernel32')
for name, value in environ:
kernel32.SetEnvironmentVariableA(name, value)
else:
import os
os.environ.update(environ)
_setup_environment([
# Oracle takes client-side character set encoding from the environment.
('NLS_LANG', '.UTF8'),
# This prevents unicode from getting mangled by getting encoded into the
# potentially non-unicode database character set.
('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'),
])
try:
import cx_Oracle as Database
except ImportError as e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
try:
import pytz
except ImportError:
pytz = None
from django.conf import settings
from django.db import utils
from django.db.backends import (BaseDatabaseFeatures, BaseDatabaseOperations,
BaseDatabaseWrapper, BaseDatabaseValidation, utils as backend_utils)
from django.db.backends.oracle.client import DatabaseClient
from django.db.backends.oracle.creation import DatabaseCreation
from django.db.backends.oracle.introspection import DatabaseIntrospection
from django.db.backends.oracle.schema import DatabaseSchemaEditor
from django.db.utils import InterfaceError
from django.utils import six, timezone
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import cached_property
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
# Check whether cx_Oracle was compiled with the WITH_UNICODE option if cx_Oracle is pre-5.1. This will
# also be True for cx_Oracle 5.1 and in Python 3.0. See #19606
if int(Database.version.split('.', 1)[0]) >= 5 and \
(int(Database.version.split('.', 2)[1]) >= 1 or
not hasattr(Database, 'UNICODE')):
convert_unicode = force_text
else:
convert_unicode = force_bytes
class Oracle_datetime(datetime.datetime):
"""
A datetime object, with an additional class attribute
to tell cx_Oracle to save the microseconds too.
"""
input_size = Database.TIMESTAMP
@classmethod
def from_datetime(cls, dt):
return Oracle_datetime(dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second, dt.microsecond)
class DatabaseFeatures(BaseDatabaseFeatures):
empty_fetchmany_value = ()
needs_datetime_string_cast = False
interprets_empty_strings_as_nulls = True
uses_savepoints = True
has_select_for_update = True
has_select_for_update_nowait = True
can_return_id_from_insert = True
allow_sliced_subqueries = False
supports_subqueries_in_group_by = False
supports_transactions = True
supports_timezones = False
has_zoneinfo_database = pytz is not None
supports_bitwise_or = False
can_defer_constraint_checks = True
supports_partially_nullable_unique_constraints = False
truncates_names = True
has_bulk_insert = True
supports_tablespaces = True
supports_sequence_reset = False
can_introspect_max_length = False
can_introspect_time_field = False
atomic_transactions = False
supports_combined_alters = False
nulls_order_largest = True
requires_literal_defaults = True
connection_persists_old_columns = True
closed_cursor_error_class = InterfaceError
bare_select_suffix = " FROM DUAL"
uppercases_column_names = True
# select for update with limit can be achieved on Oracle, but not with the current backend.
supports_select_for_update_with_limit = False
def introspected_boolean_field_type(self, field=None, created_separately=False):
"""
Some versions of Oracle -- we've seen this on 11.2.0.1 and suspect
it goes back -- have a weird bug where, when an integer column is
added to an existing table with a default, its precision is later
reported on introspection as 0, regardless of the real precision.
For Django introspection, this means that such columns are reported
as IntegerField even if they are really BigIntegerField or BooleanField.
The bug is solved in Oracle 11.2.0.2 and up.
"""
if self.connection.oracle_full_version < '11.2.0.2' and field and field.has_default() and created_separately:
return 'IntegerField'
return super(DatabaseFeatures, self).introspected_boolean_field_type(field, created_separately)
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.oracle.compiler"
# Oracle uses NUMBER(11) and NUMBER(19) for integer fields.
integer_field_ranges = {
'SmallIntegerField': (-99999999999, 99999999999),
'IntegerField': (-99999999999, 99999999999),
'BigIntegerField': (-9999999999999999999, 9999999999999999999),
'PositiveSmallIntegerField': (0, 99999999999),
'PositiveIntegerField': (0, 99999999999),
}
def autoinc_sql(self, table, column):
# To simulate auto-incrementing primary keys in Oracle, we have to
# create a sequence and a trigger.
sq_name = self._get_sequence_name(table)
tr_name = self._get_trigger_name(table)
tbl_name = self.quote_name(table)
col_name = self.quote_name(column)
sequence_sql = """
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(*) INTO i FROM USER_CATALOG
WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE';
IF i = 0 THEN
EXECUTE IMMEDIATE 'CREATE SEQUENCE "%(sq_name)s"';
END IF;
END;
/""" % locals()
trigger_sql = """
CREATE OR REPLACE TRIGGER "%(tr_name)s"
BEFORE INSERT ON %(tbl_name)s
FOR EACH ROW
WHEN (new.%(col_name)s IS NULL)
BEGIN
SELECT "%(sq_name)s".nextval
INTO :new.%(col_name)s FROM dual;
END;
/""" % locals()
return sequence_sql, trigger_sql
def cache_key_culling_sql(self):
return """
SELECT cache_key
FROM (SELECT cache_key, rank() OVER (ORDER BY cache_key) AS rank FROM %s)
WHERE rank = %%s + 1
"""
def date_extract_sql(self, lookup_type, field_name):
if lookup_type == 'week_day':
# TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
return "TO_CHAR(%s, 'D')" % field_name
else:
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions050.htm
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_interval_sql(self, sql, connector, timedelta):
"""
Implements the interval functionality for expressions
format for Oracle:
(datefield + INTERVAL '3 00:03:20.000000' DAY(1) TO SECOND(6))
"""
minutes, seconds = divmod(timedelta.seconds, 60)
hours, minutes = divmod(minutes, 60)
days = str(timedelta.days)
day_precision = len(days)
fmt = "(%s %s INTERVAL '%s %02d:%02d:%02d.%06d' DAY(%d) TO SECOND(6))"
return fmt % (sql, connector, days, hours, minutes, seconds,
timedelta.microseconds, day_precision)
def date_trunc_sql(self, lookup_type, field_name):
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions230.htm#i1002084
if lookup_type in ('year', 'month'):
return "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
else:
return "TRUNC(%s)" % field_name
# Oracle crashes with "ORA-03113: end-of-file on communication channel"
# if the time zone name is passed in parameter. Use interpolation instead.
# https://groups.google.com/forum/#!msg/django-developers/zwQju7hbG78/9l934yelwfsJ
# This regexp matches all time zone names from the zoneinfo database.
_tzname_re = re.compile(r'^[\w/:+-]+$')
def _convert_field_to_tz(self, field_name, tzname):
if not self._tzname_re.match(tzname):
raise ValueError("Invalid time zone name: %s" % tzname)
# Convert from UTC to local time, returning TIMESTAMP WITH TIME ZONE.
result = "(FROM_TZ(%s, '0:00') AT TIME ZONE '%s')" % (field_name, tzname)
# Extracting from a TIMESTAMP WITH TIME ZONE ignore the time zone.
# Convert to a DATETIME, which is called DATE by Oracle. There's no
# built-in function to do that; the easiest is to go through a string.
result = "TO_CHAR(%s, 'YYYY-MM-DD HH24:MI:SS')" % result
result = "TO_DATE(%s, 'YYYY-MM-DD HH24:MI:SS')" % result
# Re-convert to a TIMESTAMP because EXTRACT only handles the date part
# on DATE values, even though they actually store the time part.
return "CAST(%s AS TIMESTAMP)" % result
def datetime_extract_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = self._convert_field_to_tz(field_name, tzname)
if lookup_type == 'week_day':
# TO_CHAR(field, 'D') returns an integer from 1-7, where 1=Sunday.
sql = "TO_CHAR(%s, 'D')" % field_name
else:
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions050.htm
sql = "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
return sql, []
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = self._convert_field_to_tz(field_name, tzname)
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions230.htm#i1002084
if lookup_type in ('year', 'month'):
sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
elif lookup_type == 'day':
sql = "TRUNC(%s)" % field_name
elif lookup_type == 'hour':
sql = "TRUNC(%s, 'HH24')" % field_name
elif lookup_type == 'minute':
sql = "TRUNC(%s, 'MI')" % field_name
else:
sql = field_name # Cast to DATE removes sub-second precision.
return sql, []
def get_db_converters(self, internal_type):
converters = super(DatabaseOperations, self).get_db_converters(internal_type)
if internal_type == 'TextField':
converters.append(self.convert_textfield_value)
elif internal_type == 'BinaryField':
converters.append(self.convert_binaryfield_value)
elif internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
elif internal_type == 'DecimalField':
converters.append(self.convert_decimalfield_value)
elif internal_type == 'DateField':
converters.append(self.convert_datefield_value)
elif internal_type == 'TimeField':
converters.append(self.convert_timefield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
converters.append(self.convert_empty_values)
return converters
def convert_empty_values(self, value, field):
# Oracle stores empty strings as null. We need to undo this in
# order to adhere to the Django convention of using the empty
# string instead of null, but only if the field accepts the
# empty string.
if value is None and field.empty_strings_allowed:
value = ''
if field.get_internal_type() == 'BinaryField':
value = b''
return value
def convert_textfield_value(self, value, field):
if isinstance(value, Database.LOB):
value = force_text(value.read())
return value
def convert_binaryfield_value(self, value, field):
if isinstance(value, Database.LOB):
value = force_bytes(value.read())
return value
def convert_booleanfield_value(self, value, field):
if value in (1, 0):
value = bool(value)
return value
def convert_decimalfield_value(self, value, field):
if value is not None:
value = backend_utils.typecast_decimal(field.format_number(value))
return value
# cx_Oracle always returns datetime.datetime objects for
# DATE and TIMESTAMP columns, but Django wants to see a
# python datetime.date, .time, or .datetime.
def convert_datefield_value(self, value, field):
if isinstance(value, Database.Timestamp):
return value.date()
def convert_timefield_value(self, value, field):
if isinstance(value, Database.Timestamp):
value = value.time()
return value
def convert_uuidfield_value(self, value, field):
if value is not None:
value = uuid.UUID(value)
return value
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def drop_sequence_sql(self, table):
return "DROP SEQUENCE %s;" % self.quote_name(self._get_sequence_name(table))
def fetch_returned_insert_id(self, cursor):
return int(cursor._insert_id_var.getvalue())
def field_cast_sql(self, db_type, internal_type):
if db_type and db_type.endswith('LOB'):
return "DBMS_LOB.SUBSTR(%s)"
else:
return "%s"
def last_executed_query(self, cursor, sql, params):
# http://cx-oracle.sourceforge.net/html/cursor.html#Cursor.statement
# The DB API definition does not define this attribute.
statement = cursor.statement
if statement and six.PY2 and not isinstance(statement, unicode):
statement = statement.decode('utf-8')
# Unlike Psycopg's `query` and MySQLdb`'s `_last_executed`, CxOracle's
# `statement` doesn't contain the query parameters. refs #20010.
return super(DatabaseOperations, self).last_executed_query(cursor, statement, params)
def last_insert_id(self, cursor, table_name, pk_name):
sq_name = self._get_sequence_name(table_name)
cursor.execute('SELECT "%s".currval FROM dual' % sq_name)
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_in_list_size(self):
return 1000
def max_name_length(self):
return 30
def prep_for_iexact_query(self, x):
return x
def process_clob(self, value):
if value is None:
return ''
return force_text(value.read())
def quote_name(self, name):
# SQL92 requires delimited (quoted) names to be case-sensitive. When
# not quoted, Oracle has case-insensitive behavior for identifiers, but
# always defaults to uppercase.
# We simplify things by making Oracle identifiers always uppercase.
if not name.startswith('"') and not name.endswith('"'):
name = '"%s"' % backend_utils.truncate_name(name.upper(),
self.max_name_length())
# Oracle puts the query text into a (query % args) construct, so % signs
# in names need to be escaped. The '%%' will be collapsed back to '%' at
# that stage so we aren't really making the name longer here.
name = name.replace('%', '%%')
return name.upper()
def random_function_sql(self):
return "DBMS_RANDOM.RANDOM"
def regex_lookup(self, lookup_type):
if lookup_type == 'regex':
match_option = "'c'"
else:
match_option = "'i'"
return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option
def return_insert_id(self):
return "RETURNING %s INTO %%s", (InsertIdVar(),)
def savepoint_create_sql(self, sid):
return convert_unicode("SAVEPOINT " + self.quote_name(sid))
def savepoint_rollback_sql(self, sid):
return convert_unicode("ROLLBACK TO SAVEPOINT " + self.quote_name(sid))
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# Return a list of 'TRUNCATE x;', 'TRUNCATE y;',
# 'TRUNCATE z;'... style SQL statements
if tables:
# Oracle does support TRUNCATE, but it seems to get us into
# FK referential trouble, whereas DELETE FROM table works.
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Since we've just deleted all the rows, running our sequence
# ALTER code will reset the sequence to 0.
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
sql = []
for sequence_info in sequences:
sequence_name = self._get_sequence_name(sequence_info['table'])
table_name = self.quote_name(sequence_info['table'])
column_name = self.quote_name(sequence_info['column'] or 'id')
query = _get_sequence_reset_sql() % {
'sequence': sequence_name,
'table': table_name,
'column': column_name,
}
sql.append(query)
return sql
def sequence_reset_sql(self, style, model_list):
from django.db import models
output = []
query = _get_sequence_reset_sql()
for model in model_list:
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
table_name = self.quote_name(model._meta.db_table)
sequence_name = self._get_sequence_name(model._meta.db_table)
column_name = self.quote_name(f.column)
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
# Only one AutoField is allowed per model, so don't
# continue to loop
break
for f in model._meta.many_to_many:
if not f.rel.through:
table_name = self.quote_name(f.m2m_db_table())
sequence_name = self._get_sequence_name(f.m2m_db_table())
column_name = self.quote_name('id')
output.append(query % {'sequence': sequence_name,
'table': table_name,
'column': column_name})
return output
def start_transaction_sql(self):
return ''
def tablespace_sql(self, tablespace, inline=False):
if inline:
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
else:
return "TABLESPACE %s" % self.quote_name(tablespace)
def value_to_db_date(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
The default implementation transforms the date to text, but that is not
necessary for Oracle.
"""
return value
def value_to_db_datetime(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
If naive datetime is passed assumes that is in UTC. Normally Django
models.DateTimeField makes sure that if USE_TZ is True passed datetime
is timezone aware.
"""
if value is None:
return None
# cx_Oracle doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = value.astimezone(timezone.utc).replace(tzinfo=None)
else:
raise ValueError("Oracle backend does not support timezone-aware datetimes when USE_TZ is False.")
return Oracle_datetime.from_datetime(value)
def value_to_db_time(self, value):
if value is None:
return None
if isinstance(value, six.string_types):
return datetime.datetime.strptime(value, '%H:%M:%S')
# Oracle doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("Oracle backend does not support timezone-aware times.")
return Oracle_datetime(1900, 1, 1, value.hour, value.minute,
value.second, value.microsecond)
def year_lookup_bounds_for_date_field(self, value):
# Create bounds as real date values
first = datetime.date(value, 1, 1)
last = datetime.date(value, 12, 31)
return [first, last]
def year_lookup_bounds_for_datetime_field(self, value):
# cx_Oracle doesn't support tz-aware datetimes
bounds = super(DatabaseOperations, self).year_lookup_bounds_for_datetime_field(value)
if settings.USE_TZ:
bounds = [b.astimezone(timezone.utc) for b in bounds]
return [Oracle_datetime.from_datetime(b) for b in bounds]
def combine_expression(self, connector, sub_expressions):
"Oracle requires special cases for %% and & operators in query expressions"
if connector == '%%':
return 'MOD(%s)' % ','.join(sub_expressions)
elif connector == '&':
return 'BITAND(%s)' % ','.join(sub_expressions)
elif connector == '|':
raise NotImplementedError("Bit-wise or is not supported in Oracle.")
elif connector == '^':
return 'POWER(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def _get_sequence_name(self, table):
name_length = self.max_name_length() - 3
return '%s_SQ' % backend_utils.truncate_name(table, name_length).upper()
def _get_trigger_name(self, table):
name_length = self.max_name_length() - 3
return '%s_TR' % backend_utils.truncate_name(table, name_length).upper()
def bulk_insert_sql(self, fields, num_values):
items_sql = "SELECT %s FROM DUAL" % ", ".join(["%s"] * len(fields))
return " UNION ALL ".join([items_sql] * num_values)
class _UninitializedOperatorsDescriptor(object):
def __get__(self, instance, owner):
# If connection.operators is looked up before a connection has been
# created, transparently initialize connection.operators to avert an
# AttributeError.
if instance is None:
raise AttributeError("operators not available as class attribute")
# Creating a cursor will initialize the operators.
instance.cursor().close()
return instance.__dict__['operators']
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'oracle'
operators = _UninitializedOperatorsDescriptor()
_standard_operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
}
_likec_operators = _standard_operators.copy()
_likec_operators.update({
'contains': "LIKEC %s ESCAPE '\\'",
'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
'startswith': "LIKEC %s ESCAPE '\\'",
'endswith': "LIKEC %s ESCAPE '\\'",
'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
})
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True)
self.features.can_return_id_from_insert = use_returning_into
self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation(self)
def _connect_string(self):
settings_dict = self.settings_dict
if not settings_dict['HOST'].strip():
settings_dict['HOST'] = 'localhost'
if settings_dict['PORT'].strip():
dsn = Database.makedsn(settings_dict['HOST'],
int(settings_dict['PORT']),
settings_dict['NAME'])
else:
dsn = settings_dict['NAME']
return "%s/%s@%s" % (settings_dict['USER'],
settings_dict['PASSWORD'], dsn)
def get_connection_params(self):
conn_params = self.settings_dict['OPTIONS'].copy()
if 'use_returning_into' in conn_params:
del conn_params['use_returning_into']
return conn_params
def get_new_connection(self, conn_params):
conn_string = convert_unicode(self._connect_string())
return Database.connect(conn_string, **conn_params)
def init_connection_state(self):
cursor = self.create_cursor()
# Set the territory first. The territory overrides NLS_DATE_FORMAT
# and NLS_TIMESTAMP_FORMAT to the territory default. When all of
# these are set in single statement it isn't clear what is supposed
# to happen.
cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'")
# Set Oracle date to ANSI date format. This only needs to execute
# once when we create a new connection. We also set the Territory
# to 'AMERICA' which forces Sunday to evaluate to a '1' in
# TO_CHAR().
cursor.execute(
"ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'"
" NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'"
+ (" TIME_ZONE = 'UTC'" if settings.USE_TZ else ''))
cursor.close()
if 'operators' not in self.__dict__:
# Ticket #14149: Check whether our LIKE implementation will
# work for this connection or we need to fall back on LIKEC.
# This check is performed only once per DatabaseWrapper
# instance per thread, since subsequent connections will use
# the same settings.
cursor = self.create_cursor()
try:
cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s"
% self._standard_operators['contains'],
['X'])
except DatabaseError:
self.operators = self._likec_operators
else:
self.operators = self._standard_operators
cursor.close()
try:
self.connection.stmtcachesize = 20
except AttributeError:
# Django docs specify cx_Oracle version 4.3.1 or higher, but
# stmtcachesize is available only in 4.3.2 and up.
pass
# Ensure all changes are preserved even when AUTOCOMMIT is False.
if not self.get_autocommit():
self.commit()
def create_cursor(self):
return FormatStylePlaceholderCursor(self.connection)
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.DatabaseError as e:
# cx_Oracle 5.0.4 raises a cx_Oracle.DatabaseError exception
# with the following attributes and values:
# code = 2091
# message = 'ORA-02091: transaction rolled back
# 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
# _C00102056) violated - parent key not found'
# We convert that particular case to our IntegrityError exception
x = e.args[0]
if hasattr(x, 'code') and hasattr(x, 'message') \
and x.code == 2091 and 'ORA-02291' in x.message:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
# Oracle doesn't support releasing savepoints. But we fake them when query
# logging is enabled to keep query counts consistent with other backends.
def _savepoint_commit(self, sid):
if self.queries_logged:
self.queries_log.append({
'sql': '-- RELEASE SAVEPOINT %s (faked)' % self.ops.quote_name(sid),
'time': '0.000',
})
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def oracle_full_version(self):
with self.temporary_connection():
return self.connection.version
@cached_property
def oracle_version(self):
try:
return int(self.oracle_full_version.split('.')[0])
except ValueError:
return None
class OracleParam(object):
"""
Wrapper object for formatting parameters for Oracle. If the string
representation of the value is large enough (greater than 4000 characters)
the input size needs to be set as CLOB. Alternatively, if the parameter
has an `input_size` attribute, then the value of the `input_size` attribute
will be used instead. Otherwise, no input size will be set for the
parameter when executing the query.
"""
def __init__(self, param, cursor, strings_only=False):
# With raw SQL queries, datetimes can reach this function
# without being converted by DateTimeField.get_db_prep_value.
if settings.USE_TZ and (isinstance(param, datetime.datetime) and
not isinstance(param, Oracle_datetime)):
if timezone.is_naive(param):
warnings.warn("Oracle received a naive datetime (%s)"
" while time zone support is active." % param,
RuntimeWarning)
default_timezone = timezone.get_default_timezone()
param = timezone.make_aware(param, default_timezone)
param = Oracle_datetime.from_datetime(param.astimezone(timezone.utc))
string_size = 0
# Oracle doesn't recognize True and False correctly in Python 3.
# The conversion done below works both in 2 and 3.
if param is True:
param = 1
elif param is False:
param = 0
if hasattr(param, 'bind_parameter'):
self.force_bytes = param.bind_parameter(cursor)
elif isinstance(param, Database.Binary):
self.force_bytes = param
else:
# To transmit to the database, we need Unicode if supported
# To get size right, we must consider bytes.
self.force_bytes = convert_unicode(param, cursor.charset,
strings_only)
if isinstance(self.force_bytes, six.string_types):
# We could optimize by only converting up to 4000 bytes here
string_size = len(force_bytes(param, cursor.charset, strings_only))
if hasattr(param, 'input_size'):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif string_size > 4000:
# Mark any string param greater than 4000 characters as a CLOB.
self.input_size = Database.CLOB
else:
self.input_size = None
class VariableWrapper(object):
"""
An adapter class for cursor variables that prevents the wrapped object
from being converted into a string when used to instantiate an OracleParam.
This can be used generally for any other object that should be passed into
Cursor.execute as-is.
"""
def __init__(self, var):
self.var = var
def bind_parameter(self, cursor):
return self.var
def __getattr__(self, key):
return getattr(self.var, key)
def __setattr__(self, key, value):
if key == 'var':
self.__dict__[key] = value
else:
setattr(self.var, key, value)
class InsertIdVar(object):
"""
A late-binding cursor variable that can be passed to Cursor.execute
as a parameter, in order to receive the id of the row created by an
insert statement.
"""
def bind_parameter(self, cursor):
param = cursor.cursor.var(Database.NUMBER)
cursor._insert_id_var = param
return param
class FormatStylePlaceholderCursor(object):
"""
Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
style. This fixes it -- but note that if you want to use a literal "%s" in
a query, you'll need to use "%%s".
We also do automatic conversion between Unicode on the Python side and
UTF-8 -- for talking to Oracle -- in here.
"""
charset = 'utf-8'
def __init__(self, connection):
self.cursor = connection.cursor()
# Necessary to retrieve decimal values without rounding error.
self.cursor.numbersAsStrings = True
# Default arraysize of 1 is highly sub-optimal.
self.cursor.arraysize = 100
def _format_params(self, params):
try:
return dict((k, OracleParam(v, self, True)) for k, v in params.items())
except AttributeError:
return tuple(OracleParam(p, self, True) for p in params)
def _guess_input_sizes(self, params_list):
# Try dict handling; if that fails, treat as sequence
if hasattr(params_list[0], 'keys'):
sizes = {}
for params in params_list:
for k, value in params.items():
if value.input_size:
sizes[k] = value.input_size
self.setinputsizes(**sizes)
else:
# It's not a list of dicts; it's a list of sequences
sizes = [None] * len(params_list[0])
for params in params_list:
for i, value in enumerate(params):
if value.input_size:
sizes[i] = value.input_size
self.setinputsizes(*sizes)
def _param_generator(self, params):
# Try dict handling; if that fails, treat as sequence
if hasattr(params, 'items'):
return dict((k, v.force_bytes) for k, v in params.items())
else:
return [p.force_bytes for p in params]
def _fix_for_params(self, query, params):
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
if params is None:
params = []
query = convert_unicode(query, self.charset)
elif hasattr(params, 'keys'):
# Handle params as dict
args = dict((k, ":%s" % k) for k in params.keys())
query = convert_unicode(query % args, self.charset)
else:
# Handle params as sequence
args = [(':arg%d' % i) for i in range(len(params))]
query = convert_unicode(query % tuple(args), self.charset)
return query, self._format_params(params)
def execute(self, query, params=None):
query, params = self._fix_for_params(query, params)
self._guess_input_sizes([params])
try:
return self.cursor.execute(query, self._param_generator(params))
except Database.DatabaseError as e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def executemany(self, query, params=None):
if not params:
# No params given, nothing to do
return None
# uniform treatment for sequences and iterables
params_iter = iter(params)
query, firstparams = self._fix_for_params(query, next(params_iter))
# we build a list of formatted params; as we're going to traverse it
# more than once, we can't make it lazy by using a generator
formatted = [firstparams] + [self._format_params(p) for p in params_iter]
self._guess_input_sizes(formatted)
try:
return self.cursor.executemany(query,
[self._param_generator(p) for p in formatted])
except Database.DatabaseError as e:
# cx_Oracle <= 4.4.0 wrongly raises a DatabaseError for ORA-01400.
if hasattr(e.args[0], 'code') and e.args[0].code == 1400 and not isinstance(e, IntegrityError):
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def fetchone(self):
row = self.cursor.fetchone()
if row is None:
return row
return _rowfactory(row, self.cursor)
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
return tuple(_rowfactory(r, self.cursor) for r in self.cursor.fetchmany(size))
def fetchall(self):
return tuple(_rowfactory(r, self.cursor) for r in self.cursor.fetchall())
def close(self):
try:
self.cursor.close()
except Database.InterfaceError:
# already closed
pass
def var(self, *args):
return VariableWrapper(self.cursor.var(*args))
def arrayvar(self, *args):
return VariableWrapper(self.cursor.arrayvar(*args))
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return CursorIterator(self.cursor)
class CursorIterator(six.Iterator):
"""Cursor iterator wrapper that invokes our custom row factory."""
def __init__(self, cursor):
self.cursor = cursor
self.iter = iter(cursor)
def __iter__(self):
return self
def __next__(self):
return _rowfactory(next(self.iter), self.cursor)
def _rowfactory(row, cursor):
# Cast numeric values as the appropriate Python type based upon the
# cursor description, and convert strings to unicode.
casted = []
for value, desc in zip(row, cursor.description):
if value is not None and desc[1] is Database.NUMBER:
precision, scale = desc[4:6]
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point
# This will normally be an integer from a sequence,
# but it could be a decimal value.
if '.' in value:
value = decimal.Decimal(value)
else:
value = int(value)
else:
# FLOAT column: binary-precision floating point.
# This comes from FloatField columns.
value = float(value)
elif precision > 0:
# NUMBER(p,s) column: decimal-precision fixed point.
# This comes from IntField and DecimalField columns.
if scale == 0:
value = int(value)
else:
value = decimal.Decimal(value)
elif '.' in value:
# No type information. This normally comes from a
# mathematical expression in the SELECT list. Guess int
# or Decimal based on whether it has a decimal point.
value = decimal.Decimal(value)
else:
value = int(value)
# datetimes are returned as TIMESTAMP, except the results
# of "dates" queries, which are returned as DATETIME.
elif desc[1] in (Database.TIMESTAMP, Database.DATETIME):
# Confirm that dt is naive before overwriting its tzinfo.
if settings.USE_TZ and value is not None and timezone.is_naive(value):
value = value.replace(tzinfo=timezone.utc)
elif desc[1] in (Database.STRING, Database.FIXED_CHAR,
Database.LONG_STRING):
value = to_unicode(value)
casted.append(value)
return tuple(casted)
def to_unicode(s):
"""
Convert strings to Unicode objects (and return all other data types
unchanged).
"""
if isinstance(s, six.string_types):
return force_text(s)
return s
def _get_sequence_reset_sql():
# TODO: colorize this SQL code with style.SQL_KEYWORD(), etc.
return """
DECLARE
table_value integer;
seq_value integer;
BEGIN
SELECT NVL(MAX(%(column)s), 0) INTO table_value FROM %(table)s;
SELECT NVL(last_number - cache_size, 0) INTO seq_value FROM user_sequences
WHERE sequence_name = '%(sequence)s';
WHILE table_value > seq_value LOOP
SELECT "%(sequence)s".nextval INTO seq_value FROM dual;
END LOOP;
END;
/"""
|
py | b413886c1c74f7be928a406aa0753809c0469a9e | from triangler.color import ColorMethod
from triangler.edges import EdgeMethod
from triangler.mod import Triangler
from triangler.sampling import SampleMethod
|
py | b41388e38ee57455ce83cb612d118364936ae85e | #
# test_GitHub3Helper.py
#
import os
import unittest
from oompa.tracking.github.GitHub3Helper import GitHub3Helper
class GitHub3HelperTests(unittest.TestCase):
def testextractBlurb(self):
# XXX not moved to correct home yet
from oompa.tracking.github.GitHub3Helper import extractBlurb
cases = [
( None,
None,
),
( "",
"",
),
( "Long enough first paragraph - \nmultiple lines\n\nParagraph 2\n\nParagraph 2",
"Long enough first paragraph - \nmultiple lines\n...",
),
( "Three Paragraphs\n\nParagraph 2\n\nParagraph 3",
"Three Paragraphs\n\nParagraph 2\n...",
),
( "One Short Paragraph",
"One Short Paragraph",
),
]
for case in cases:
if case is None:
print("breaking early")
break
content = case[0]
expectedBlurb = case[1]
blurb = extractBlurb(content)
if blurb != expectedBlurb:
print("XXX mismatch:")
print(" content: %r" % content)
print(" expectedBlurb: %r" % expectedBlurb)
print(" blurb: %r" % blurb)
pass
assert blurb == expectedBlurb
pass
return
pass
|
py | b4138918ac2a8c1d5d41a70a39c07df841dc90c4 | # Copyright (c) 2021 Qualcomm Technologies, Inc.
# All rights reserved.
import torch
from torch_scatter import scatter_sum
def three_matrix_to_so2_irreps(m):
r"""
Decompose (rho_1 + rho_0)^{otimes 2} into (3 rho_0 + 2 rho_1 + rho_2)
:param m: [B, 3, 3]
:return: ([B, 1], [B, 1], [B, 1], [B, 2], [B, 2], [B, 2])
"""
v_0_0 = -0.5 * m[:, 0, 1] + 0.5 * m[:, 1, 0]
v_0_1 = 0.5 * m[:, 0, 0] + 0.5 * m[:, 1, 1]
v_0_2 = m[:, 2, 2]
v_1_0 = m[:, 2, :2]
v_1_1 = m[:, :2, 2]
v_2_0 = 0.5 * torch.stack([m[:, 0, 1] + m[:, 1, 0], -m[:, 0, 0] + m[:, 1, 1]], 1)
return v_0_0[:, None], v_0_1[:, None], v_0_2[:, None], v_1_0, v_1_1, v_2_0
def three_sym_matrix_to_so2_features(m):
"""
Express symmetric (rho_1+rho_0)^{otimes 2} matrix in 2 (rho_0+rho_1+rho_2) SO2 features.
:param m: [B, 3, 3]
:return: [B, 2, 5]
"""
_, v_0_1, v_0_2, v_1_0, _, v_2_0 = three_matrix_to_so2_irreps(m)
zero = torch.zeros_like(v_1_0)
return torch.stack(
[
torch.cat([v_0_1, v_1_0, v_2_0], 1),
torch.cat([v_0_2, zero, zero], 1),
],
1,
)
def three_matrix_to_so2_features(m):
"""
Express (rho_1+rho_0)^{otimes 2} matrix in 3 (rho_0+rho_1+rho_2) SO2 features.
:param m: [B, 3, 3]
:return: [B, 3, 5]
"""
v_0_0, v_0_1, v_0_2, v_1_0, v_1_1, v_2_0 = three_matrix_to_so2_irreps(m)
zero = torch.zeros_like(v_1_0)
return torch.stack(
[
torch.cat([v_0_0, v_1_0, v_2_0], 1),
torch.cat([v_0_1, v_1_1, zero], 1),
torch.cat([v_0_2, zero, zero], 1),
],
1,
)
def vector_vector_feature(v_a, v_b, weight, p_idx, frames, symmetric):
"""
Taking outer product, create matrix feature per pair, average, express in SO2 feature.
:param v_a: [E, 3]
:param v_b: [E, 3]
:param weight: [E]
:param p_idx: [E] index [0, V)
:param frames: [V, 3, 3] per vertex, rows are (X, Y, normal) vectors.
:param symmetric: bool
:return: [V, 2/3, 5] (2 channels if symmetric)
"""
m_pair = torch.einsum("ni,nj,n->nij", v_a, v_b, weight)
m_p = scatter_sum(m_pair, p_idx, dim=0) / scatter_sum(weight, p_idx)[:, None, None]
m_p_gauge = frames @ m_p @ frames.transpose(1, 2)
return (three_sym_matrix_to_so2_features if symmetric else three_matrix_to_so2_features)(
m_p_gauge
)
def matrix_features(edge_index, pos, frames, weight=None):
"""
Compute feature based on outer product of position difference between neighbouring vertices.
:param edge_index: [2, M] (indices of neighbours for M pairs)
:param weight: [M] (weight of each pair)
:param pos: [N, 3]
:param frames: [N, 3, 3] for each point, rows are X, Y, normal vectors
:param max_radius: float
:return: [N, 7, 5] 7 (rho_0+rho_1+rho_2) features
"""
p, q = edge_index
weight = torch.ones(len(p), device=pos.device, dtype=pos.dtype) if weight is None else weight
d = pos[q] - pos[p]
normal = frames[q, 2]
return torch.cat(
[
vector_vector_feature(d, d, weight, p, frames, symmetric=True),
vector_vector_feature(normal, normal, weight, p, frames, symmetric=True),
vector_vector_feature(d, normal, weight, p, frames, symmetric=False),
],
1,
)
def so2_feature_to_ambient_vector(v, frames):
"""
Transform rho_0 + rho_1 feature into ambient 3-vector using frame.
:param v: [N, C, 3]
:param frames: [N, 3, 3] for each point, rows are X, Y, normal vectors
:return: [N, C, 3]
"""
return v[:, :, [1, 2, 0]] @ frames # equiv to 'nci,nix->ncx'
def transform_frames(g, frames):
c, s, z, o = torch.cos(g), torch.sin(g), torch.zeros_like(g), torch.ones_like(g)
rho = torch.stack([c, -s, z, s, c, z, z, z, o], 1).view(-1, 3, 3)
frames_t = rho @ frames
return frames_t
def test_transformation():
import math
from scipy.stats import special_ortho_group
from gem_cnn.utils.rep_act import rep_act
import numpy as np
g = torch.rand(10, dtype=torch.double) * 2 * math.pi
m = torch.randn(10, 3, 3, dtype=torch.double)
m_sym = m @ m.transpose(1, 2) # Make symmetric
frames = torch.tensor(special_ortho_group(3).rvs(10))
frames_t = transform_frames(g, frames)
m_gauge = frames @ m @ frames.transpose(1, 2)
m_gauge_t = frames_t @ m @ frames_t.transpose(1, 2)
v = three_matrix_to_so2_features(m_gauge)
v_t = three_matrix_to_so2_features(m_gauge_t)
np.testing.assert_allclose(rep_act(v, g), v_t)
m_sym_gauge = frames @ m_sym @ frames.transpose(1, 2)
m_sym_gauge_t = frames_t @ m_sym @ frames_t.transpose(1, 2)
v = three_sym_matrix_to_so2_features(m_sym_gauge)
v_t = three_sym_matrix_to_so2_features(m_sym_gauge_t)
np.testing.assert_allclose(rep_act(v, g), v_t)
def test_features():
import math
from scipy.stats import special_ortho_group
from gem_cnn.utils.rep_act import rep_act
import numpy as np
num_v = 100
pos = torch.randn(num_v, 3, dtype=torch.double)
g = torch.rand(num_v, dtype=torch.double) * 2 * math.pi
frames = torch.tensor(special_ortho_group(3).rvs(num_v))
frames_t = transform_frames(g, frames)
v = matrix_features(pos, frames, 0.5)
v_t = matrix_features(pos, frames_t, 0.5)
np.testing.assert_allclose(rep_act(v, g), v_t, atol=1e-14)
|
bzl | b4138af74ae5872295cb9bb5055f8557b32b15c1 | # Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example showing how to create a rule that rules_cc can depend on."""
load("@rules_cc//cc:action_names.bzl", "CPP_LINK_STATIC_LIBRARY_ACTION_NAME")
load("@rules_cc//cc:toolchain_utils.bzl", "find_cpp_toolchain")
load("//examples/my_c_compile:my_c_compile.bzl", "MyCCompileInfo")
def _my_c_archive_impl(ctx):
cc_toolchain = find_cpp_toolchain(ctx)
object_file = ctx.attr.object[MyCCompileInfo].object
output_file = ctx.actions.declare_file(ctx.label.name + ".a")
feature_configuration = cc_common.configure_features(
ctx = ctx,
cc_toolchain = cc_toolchain,
requested_features = ctx.features,
unsupported_features = ctx.disabled_features,
)
linker_input = cc_common.create_linker_input(
owner = ctx.label,
libraries = depset(direct = [
cc_common.create_library_to_link(
actions = ctx.actions,
feature_configuration = feature_configuration,
cc_toolchain = cc_toolchain,
static_library = output_file,
),
]),
)
compilation_context = cc_common.create_compilation_context()
linking_context = cc_common.create_linking_context(linker_inputs = depset(direct = [linker_input]))
archiver_path = cc_common.get_tool_for_action(
feature_configuration = feature_configuration,
action_name = CPP_LINK_STATIC_LIBRARY_ACTION_NAME,
)
archiver_variables = cc_common.create_link_variables(
feature_configuration = feature_configuration,
cc_toolchain = cc_toolchain,
output_file = output_file.path,
is_using_linker = False,
)
command_line = cc_common.get_memory_inefficient_command_line(
feature_configuration = feature_configuration,
action_name = CPP_LINK_STATIC_LIBRARY_ACTION_NAME,
variables = archiver_variables,
)
args = ctx.actions.args()
args.add_all(command_line)
args.add(object_file)
env = cc_common.get_environment_variables(
feature_configuration = feature_configuration,
action_name = CPP_LINK_STATIC_LIBRARY_ACTION_NAME,
variables = archiver_variables,
)
ctx.actions.run(
executable = archiver_path,
arguments = [args],
env = env,
inputs = depset(
direct = [object_file],
transitive = [
cc_toolchain.all_files,
],
),
outputs = [output_file],
)
cc_info = cc_common.merge_cc_infos(cc_infos = [
CcInfo(compilation_context = compilation_context, linking_context = linking_context),
] + [dep[CcInfo] for dep in ctx.attr.deps])
return [cc_info]
my_c_archive = rule(
implementation = _my_c_archive_impl,
attrs = {
"deps": attr.label_list(providers = [CcInfo]),
"object": attr.label(mandatory = True, providers = [MyCCompileInfo]),
"_cc_toolchain": attr.label(default = Label("@bazel_tools//tools/cpp:current_cc_toolchain")),
},
fragments = ["cpp"],
toolchains = ["@bazel_tools//tools/cpp:toolchain_type"], # copybara-use-repo-external-label
incompatible_use_toolchain_transition = True,
)
|
py | b4138b8d15068675105e3fd5e68c2e4310f709b7 | from flask import jsonify, make_response
from twilio.rest import Client
from special_variables import _twilio_sid, _twilio_auth_token, _twilio_phone_number
account_sid = _twilio_sid
auth_token = _twilio_auth_token
client = Client(account_sid, auth_token)
def send_sms(to, body):
try:
if not (to and body):
return make_response(jsonify({"msg": "invalid data", 'required fields': ['to', 'body'] }), 400)
message = client.messages.create(to = to, body = body, from_ = _twilio_phone_number)
return make_response(jsonify({"msg": "sms sent successfully!", 'sms': message.body, 'to': to}), 200)
except Exception as e:
return make_response(jsonify({"msg": e.args}), 201)
|
py | b4138c0262ff8a64265da13ac447b41466f8c788 | # Code snippets to write a function converting temperature units
# to convert temperature values from Kelvin to either Celcius or Fahrenheit
# 1 to convert temperature values from Kelvin to Celcius and
def kelvinsToCelsius(tempKelvins):
return tempKelvins - 273.15
# 2 to convert temperature values from Celcius to Fahrenheit.
def celsiusToFahr(tempCelsius):
return 9/5 * tempCelsius + 32
# 3 to convert temperature values from Kelvin to Fahrenheit
def kelvinsToFahrenheit(tempKelvins):
tempCelsius = kelvinsToCelsius(tempKelvins)
tempFahr = celsiusToFahr(tempCelsius)
return tempFahr
# Embed the docstring:
"""
Function for converting temperature in Kelvins to Celsius or Fahrenheit.
Parameters
----------
tempK: <numerical>
Temperature in Kelvins
convertTo: <str>
Target temperature that can be either Celsius ('C') or Fahrenheit ('F'). Supported values: 'C' | 'F'
Returns
-------
<float>
Converted temperature.
"""
# 4 Temperature Calculator
# to convert temperature values from Kelvin to either Celcius or Fahrenheit
def tempCalculator(tempK, convertTo):
# Check if user wants the temperature in Celsius
if convertTo == "C":
# Convert the value to Celsius
# using the dedicated function for the task
# that we defined above
convertedTemp = kelvinsToCelsius(tempK)
elif convertTo == "F":
# Convert the value to Fahrenheit
# using the dedicated function for the task
# that we defined above
convertedTemp = kelvinsToFahrenheit(tempK)
# Return the result
return convertedTemp |
py | b4138c334aa51e9ba3a8d9a5eead38fee94f430c | # coding: utf-8
"""
Portainer API
Portainer API is an HTTP API served by Portainer. It is used by the Portainer UI and everything you can do with the UI can be done using the HTTP API. Examples are available at https://gist.github.com/deviantony/77026d402366b4b43fa5918d41bc42f8 You can find out more about Portainer at [http://portainer.io](http://portainer.io) and get some support on [Slack](http://portainer.io/slack/). # Authentication Most of the API endpoints require to be authenticated as well as some level of authorization to be used. Portainer API uses JSON Web Token to manage authentication and thus requires you to provide a token in the **Authorization** header of each request with the **Bearer** authentication mechanism. Example: ``` Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6MSwidXNlcm5hbWUiOiJhZG1pbiIsInJvbGUiOjEsImV4cCI6MTQ5OTM3NjE1NH0.NJ6vE8FY1WG6jsRQzfMqeatJ4vh2TWAeeYfDhP71YEE ``` # Security Each API endpoint has an associated access policy, it is documented in the description of each endpoint. Different access policies are available: * Public access * Authenticated access * Restricted access * Administrator access ### Public access No authentication is required to access the endpoints with this access policy. ### Authenticated access Authentication is required to access the endpoints with this access policy. ### Restricted access Authentication is required to access the endpoints with this access policy. Extra-checks might be added to ensure access to the resource is granted. Returned data might also be filtered. ### Administrator access Authentication as well as an administrator role are required to access the endpoints with this access policy. # Execute Docker requests Portainer **DO NOT** expose specific endpoints to manage your Docker resources (create a container, remove a volume, etc...). Instead, it acts as a reverse-proxy to the Docker HTTP API. This means that you can execute Docker requests **via** the Portainer HTTP API. To do so, you can use the `/endpoints/{id}/docker` Portainer API endpoint (which is not documented below due to Swagger limitations). This endpoint has a restricted access policy so you still need to be authenticated to be able to query this endpoint. Any query on this endpoint will be proxied to the Docker API of the associated endpoint (requests and responses objects are the same as documented in the Docker API). **NOTE**: You can find more information on how to query the Docker API in the [Docker official documentation](https://docs.docker.com/engine/api/v1.30/) as well as in [this Portainer example](https://gist.github.com/deviantony/77026d402366b4b43fa5918d41bc42f8). # noqa: E501
OpenAPI spec version: 1.24.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class UsersApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def user_admin_check(self, **kwargs): # noqa: E501
"""Check administrator account existence # noqa: E501
Check if an administrator account exists in the database. **Access policy**: public # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_admin_check(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.user_admin_check_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.user_admin_check_with_http_info(**kwargs) # noqa: E501
return data
def user_admin_check_with_http_info(self, **kwargs): # noqa: E501
"""Check administrator account existence # noqa: E501
Check if an administrator account exists in the database. **Access policy**: public # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_admin_check_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method user_admin_check" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['jwt'] # noqa: E501
return self.api_client.call_api(
'/users/admin/check', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def user_admin_init(self, body, **kwargs): # noqa: E501
"""Initialize administrator account # noqa: E501
Initialize the 'admin' user account. **Access policy**: public # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_admin_init(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param UserAdminInitRequest body: User details (required)
:return: User
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.user_admin_init_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.user_admin_init_with_http_info(body, **kwargs) # noqa: E501
return data
def user_admin_init_with_http_info(self, body, **kwargs): # noqa: E501
"""Initialize administrator account # noqa: E501
Initialize the 'admin' user account. **Access policy**: public # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_admin_init_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param UserAdminInitRequest body: User details (required)
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method user_admin_init" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `user_admin_init`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['jwt'] # noqa: E501
return self.api_client.call_api(
'/users/admin/init', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def user_create(self, body, **kwargs): # noqa: E501
"""Create a new user # noqa: E501
Create a new Portainer user. Only team leaders and administrators can create users. Only administrators can create an administrator user account. **Access policy**: restricted # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_create(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param UserCreateRequest body: User details (required)
:return: UserSubset
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.user_create_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.user_create_with_http_info(body, **kwargs) # noqa: E501
return data
def user_create_with_http_info(self, body, **kwargs): # noqa: E501
"""Create a new user # noqa: E501
Create a new Portainer user. Only team leaders and administrators can create users. Only administrators can create an administrator user account. **Access policy**: restricted # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_create_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param UserCreateRequest body: User details (required)
:return: UserSubset
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method user_create" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `user_create`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['jwt'] # noqa: E501
return self.api_client.call_api(
'/users', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserSubset', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def user_delete(self, id, **kwargs): # noqa: E501
"""Remove a user # noqa: E501
Remove a user. **Access policy**: administrator # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_delete(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: User identifier (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.user_delete_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.user_delete_with_http_info(id, **kwargs) # noqa: E501
return data
def user_delete_with_http_info(self, id, **kwargs): # noqa: E501
"""Remove a user # noqa: E501
Remove a user. **Access policy**: administrator # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_delete_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: User identifier (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method user_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `user_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['jwt'] # noqa: E501
return self.api_client.call_api(
'/users/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def user_inspect(self, id, **kwargs): # noqa: E501
"""Inspect a user # noqa: E501
Retrieve details about a user. **Access policy**: administrator # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_inspect(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: User identifier (required)
:return: User
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.user_inspect_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.user_inspect_with_http_info(id, **kwargs) # noqa: E501
return data
def user_inspect_with_http_info(self, id, **kwargs): # noqa: E501
"""Inspect a user # noqa: E501
Retrieve details about a user. **Access policy**: administrator # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_inspect_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: User identifier (required)
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method user_inspect" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `user_inspect`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['jwt'] # noqa: E501
return self.api_client.call_api(
'/users/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def user_list(self, **kwargs): # noqa: E501
"""List users # noqa: E501
List Portainer users. Non-administrator users will only be able to list other non-administrator user accounts. **Access policy**: restricted # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_list(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: UserListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.user_list_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.user_list_with_http_info(**kwargs) # noqa: E501
return data
def user_list_with_http_info(self, **kwargs): # noqa: E501
"""List users # noqa: E501
List Portainer users. Non-administrator users will only be able to list other non-administrator user accounts. **Access policy**: restricted # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_list_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: UserListResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method user_list" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['jwt'] # noqa: E501
return self.api_client.call_api(
'/users', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserListResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def user_memberships_inspect(self, id, **kwargs): # noqa: E501
"""Inspect a user memberships # noqa: E501
Inspect a user memberships. **Access policy**: authenticated # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_memberships_inspect(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: User identifier (required)
:return: UserMembershipsResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.user_memberships_inspect_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.user_memberships_inspect_with_http_info(id, **kwargs) # noqa: E501
return data
def user_memberships_inspect_with_http_info(self, id, **kwargs): # noqa: E501
"""Inspect a user memberships # noqa: E501
Inspect a user memberships. **Access policy**: authenticated # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_memberships_inspect_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: User identifier (required)
:return: UserMembershipsResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method user_memberships_inspect" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `user_memberships_inspect`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['jwt'] # noqa: E501
return self.api_client.call_api(
'/users/{id}/memberships', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserMembershipsResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def user_password_check(self, id, body, **kwargs): # noqa: E501
"""Check password validity for a user # noqa: E501
Check if the submitted password is valid for the specified user. **Access policy**: authenticated # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_password_check(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: User identifier (required)
:param UserPasswordCheckRequest body: User details (required)
:return: UserPasswordCheckResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.user_password_check_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.user_password_check_with_http_info(id, body, **kwargs) # noqa: E501
return data
def user_password_check_with_http_info(self, id, body, **kwargs): # noqa: E501
"""Check password validity for a user # noqa: E501
Check if the submitted password is valid for the specified user. **Access policy**: authenticated # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_password_check_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: User identifier (required)
:param UserPasswordCheckRequest body: User details (required)
:return: UserPasswordCheckResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method user_password_check" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `user_password_check`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `user_password_check`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['jwt'] # noqa: E501
return self.api_client.call_api(
'/users/{id}/passwd', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserPasswordCheckResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def user_update(self, id, body, **kwargs): # noqa: E501
"""Update a user # noqa: E501
Update user details. A regular user account can only update his details. **Access policy**: authenticated # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_update(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: User identifier (required)
:param UserUpdateRequest body: User details (required)
:return: User
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.user_update_with_http_info(id, body, **kwargs) # noqa: E501
else:
(data) = self.user_update_with_http_info(id, body, **kwargs) # noqa: E501
return data
def user_update_with_http_info(self, id, body, **kwargs): # noqa: E501
"""Update a user # noqa: E501
Update user details. A regular user account can only update his details. **Access policy**: authenticated # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.user_update_with_http_info(id, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: User identifier (required)
:param UserUpdateRequest body: User details (required)
:return: User
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method user_update" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `user_update`") # noqa: E501
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `user_update`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['jwt'] # noqa: E501
return self.api_client.call_api(
'/users/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='User', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
py | b4138da447b465cb0fb07c91b389d4d972f2334f | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from ._enums import *
__all__ = [
'ClusterParameterGroupParameterArgs',
'ClusterRestoreToPointInTimeArgs',
'ClusterS3ImportArgs',
'ClusterScalingConfigurationArgs',
'GlobalClusterGlobalClusterMemberArgs',
'InstanceRestoreToPointInTimeArgs',
'InstanceS3ImportArgs',
'OptionGroupOptionArgs',
'OptionGroupOptionOptionSettingArgs',
'ParameterGroupParameterArgs',
'ProxyAuthArgs',
'ProxyDefaultTargetGroupConnectionPoolConfigArgs',
'SecurityGroupIngressArgs',
]
@pulumi.input_type
class ClusterParameterGroupParameterArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str],
apply_method: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The name of the DB parameter.
:param pulumi.Input[str] value: The value of the DB parameter.
:param pulumi.Input[str] apply_method: "immediate" (default), or "pending-reboot". Some
engines can't apply some parameters without a reboot, and you will need to
specify "pending-reboot" here.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
if apply_method is not None:
pulumi.set(__self__, "apply_method", apply_method)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the DB parameter.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value of the DB parameter.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@property
@pulumi.getter(name="applyMethod")
def apply_method(self) -> Optional[pulumi.Input[str]]:
"""
"immediate" (default), or "pending-reboot". Some
engines can't apply some parameters without a reboot, and you will need to
specify "pending-reboot" here.
"""
return pulumi.get(self, "apply_method")
@apply_method.setter
def apply_method(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "apply_method", value)
@pulumi.input_type
class ClusterRestoreToPointInTimeArgs:
def __init__(__self__, *,
source_cluster_identifier: pulumi.Input[str],
restore_to_time: Optional[pulumi.Input[str]] = None,
restore_type: Optional[pulumi.Input[str]] = None,
use_latest_restorable_time: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] source_cluster_identifier: The identifier of the source database cluster from which to restore.
:param pulumi.Input[str] restore_to_time: Date and time in UTC format to restore the database cluster to. Conflicts with `use_latest_restorable_time`.
:param pulumi.Input[str] restore_type: Type of restore to be performed.
Valid options are `full-copy` (default) and `copy-on-write`.
:param pulumi.Input[bool] use_latest_restorable_time: Set to true to restore the database cluster to the latest restorable backup time. Defaults to false. Conflicts with `restore_to_time`.
"""
pulumi.set(__self__, "source_cluster_identifier", source_cluster_identifier)
if restore_to_time is not None:
pulumi.set(__self__, "restore_to_time", restore_to_time)
if restore_type is not None:
pulumi.set(__self__, "restore_type", restore_type)
if use_latest_restorable_time is not None:
pulumi.set(__self__, "use_latest_restorable_time", use_latest_restorable_time)
@property
@pulumi.getter(name="sourceClusterIdentifier")
def source_cluster_identifier(self) -> pulumi.Input[str]:
"""
The identifier of the source database cluster from which to restore.
"""
return pulumi.get(self, "source_cluster_identifier")
@source_cluster_identifier.setter
def source_cluster_identifier(self, value: pulumi.Input[str]):
pulumi.set(self, "source_cluster_identifier", value)
@property
@pulumi.getter(name="restoreToTime")
def restore_to_time(self) -> Optional[pulumi.Input[str]]:
"""
Date and time in UTC format to restore the database cluster to. Conflicts with `use_latest_restorable_time`.
"""
return pulumi.get(self, "restore_to_time")
@restore_to_time.setter
def restore_to_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "restore_to_time", value)
@property
@pulumi.getter(name="restoreType")
def restore_type(self) -> Optional[pulumi.Input[str]]:
"""
Type of restore to be performed.
Valid options are `full-copy` (default) and `copy-on-write`.
"""
return pulumi.get(self, "restore_type")
@restore_type.setter
def restore_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "restore_type", value)
@property
@pulumi.getter(name="useLatestRestorableTime")
def use_latest_restorable_time(self) -> Optional[pulumi.Input[bool]]:
"""
Set to true to restore the database cluster to the latest restorable backup time. Defaults to false. Conflicts with `restore_to_time`.
"""
return pulumi.get(self, "use_latest_restorable_time")
@use_latest_restorable_time.setter
def use_latest_restorable_time(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_latest_restorable_time", value)
@pulumi.input_type
class ClusterS3ImportArgs:
def __init__(__self__, *,
bucket_name: pulumi.Input[str],
ingestion_role: pulumi.Input[str],
source_engine: pulumi.Input[str],
source_engine_version: pulumi.Input[str],
bucket_prefix: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] bucket_name: The bucket name where your backup is stored
:param pulumi.Input[str] ingestion_role: Role applied to load the data.
:param pulumi.Input[str] source_engine: Source engine for the backup
:param pulumi.Input[str] source_engine_version: Version of the source engine used to make the backup
:param pulumi.Input[str] bucket_prefix: Can be blank, but is the path to your backup
"""
pulumi.set(__self__, "bucket_name", bucket_name)
pulumi.set(__self__, "ingestion_role", ingestion_role)
pulumi.set(__self__, "source_engine", source_engine)
pulumi.set(__self__, "source_engine_version", source_engine_version)
if bucket_prefix is not None:
pulumi.set(__self__, "bucket_prefix", bucket_prefix)
@property
@pulumi.getter(name="bucketName")
def bucket_name(self) -> pulumi.Input[str]:
"""
The bucket name where your backup is stored
"""
return pulumi.get(self, "bucket_name")
@bucket_name.setter
def bucket_name(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket_name", value)
@property
@pulumi.getter(name="ingestionRole")
def ingestion_role(self) -> pulumi.Input[str]:
"""
Role applied to load the data.
"""
return pulumi.get(self, "ingestion_role")
@ingestion_role.setter
def ingestion_role(self, value: pulumi.Input[str]):
pulumi.set(self, "ingestion_role", value)
@property
@pulumi.getter(name="sourceEngine")
def source_engine(self) -> pulumi.Input[str]:
"""
Source engine for the backup
"""
return pulumi.get(self, "source_engine")
@source_engine.setter
def source_engine(self, value: pulumi.Input[str]):
pulumi.set(self, "source_engine", value)
@property
@pulumi.getter(name="sourceEngineVersion")
def source_engine_version(self) -> pulumi.Input[str]:
"""
Version of the source engine used to make the backup
"""
return pulumi.get(self, "source_engine_version")
@source_engine_version.setter
def source_engine_version(self, value: pulumi.Input[str]):
pulumi.set(self, "source_engine_version", value)
@property
@pulumi.getter(name="bucketPrefix")
def bucket_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Can be blank, but is the path to your backup
"""
return pulumi.get(self, "bucket_prefix")
@bucket_prefix.setter
def bucket_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bucket_prefix", value)
@pulumi.input_type
class ClusterScalingConfigurationArgs:
def __init__(__self__, *,
auto_pause: Optional[pulumi.Input[bool]] = None,
max_capacity: Optional[pulumi.Input[int]] = None,
min_capacity: Optional[pulumi.Input[int]] = None,
seconds_until_auto_pause: Optional[pulumi.Input[int]] = None,
timeout_action: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[bool] auto_pause: Whether to enable automatic pause. A DB cluster can be paused only when it's idle (it has no connections). If a DB cluster is paused for more than seven days, the DB cluster might be backed up with a snapshot. In this case, the DB cluster is restored when there is a request to connect to it. Defaults to `true`.
:param pulumi.Input[int] max_capacity: The maximum capacity for an Aurora DB cluster in `serverless` DB engine mode. The maximum capacity must be greater than or equal to the minimum capacity. Valid Aurora MySQL capacity values are `1`, `2`, `4`, `8`, `16`, `32`, `64`, `128`, `256`. Valid Aurora PostgreSQL capacity values are (`2`, `4`, `8`, `16`, `32`, `64`, `192`, and `384`). Defaults to `16`.
:param pulumi.Input[int] min_capacity: The minimum capacity for an Aurora DB cluster in `serverless` DB engine mode. The minimum capacity must be lesser than or equal to the maximum capacity. Valid Aurora MySQL capacity values are `1`, `2`, `4`, `8`, `16`, `32`, `64`, `128`, `256`. Valid Aurora PostgreSQL capacity values are (`2`, `4`, `8`, `16`, `32`, `64`, `192`, and `384`). Defaults to `1`.
:param pulumi.Input[int] seconds_until_auto_pause: The time, in seconds, before an Aurora DB cluster in serverless mode is paused. Valid values are `300` through `86400`. Defaults to `300`.
:param pulumi.Input[str] timeout_action: The action to take when the timeout is reached. Valid values: `ForceApplyCapacityChange`, `RollbackCapacityChange`. Defaults to `RollbackCapacityChange`. See [documentation](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.how-it-works.html#aurora-serverless.how-it-works.timeout-action).
"""
if auto_pause is not None:
pulumi.set(__self__, "auto_pause", auto_pause)
if max_capacity is not None:
pulumi.set(__self__, "max_capacity", max_capacity)
if min_capacity is not None:
pulumi.set(__self__, "min_capacity", min_capacity)
if seconds_until_auto_pause is not None:
pulumi.set(__self__, "seconds_until_auto_pause", seconds_until_auto_pause)
if timeout_action is not None:
pulumi.set(__self__, "timeout_action", timeout_action)
@property
@pulumi.getter(name="autoPause")
def auto_pause(self) -> Optional[pulumi.Input[bool]]:
"""
Whether to enable automatic pause. A DB cluster can be paused only when it's idle (it has no connections). If a DB cluster is paused for more than seven days, the DB cluster might be backed up with a snapshot. In this case, the DB cluster is restored when there is a request to connect to it. Defaults to `true`.
"""
return pulumi.get(self, "auto_pause")
@auto_pause.setter
def auto_pause(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "auto_pause", value)
@property
@pulumi.getter(name="maxCapacity")
def max_capacity(self) -> Optional[pulumi.Input[int]]:
"""
The maximum capacity for an Aurora DB cluster in `serverless` DB engine mode. The maximum capacity must be greater than or equal to the minimum capacity. Valid Aurora MySQL capacity values are `1`, `2`, `4`, `8`, `16`, `32`, `64`, `128`, `256`. Valid Aurora PostgreSQL capacity values are (`2`, `4`, `8`, `16`, `32`, `64`, `192`, and `384`). Defaults to `16`.
"""
return pulumi.get(self, "max_capacity")
@max_capacity.setter
def max_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_capacity", value)
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> Optional[pulumi.Input[int]]:
"""
The minimum capacity for an Aurora DB cluster in `serverless` DB engine mode. The minimum capacity must be lesser than or equal to the maximum capacity. Valid Aurora MySQL capacity values are `1`, `2`, `4`, `8`, `16`, `32`, `64`, `128`, `256`. Valid Aurora PostgreSQL capacity values are (`2`, `4`, `8`, `16`, `32`, `64`, `192`, and `384`). Defaults to `1`.
"""
return pulumi.get(self, "min_capacity")
@min_capacity.setter
def min_capacity(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "min_capacity", value)
@property
@pulumi.getter(name="secondsUntilAutoPause")
def seconds_until_auto_pause(self) -> Optional[pulumi.Input[int]]:
"""
The time, in seconds, before an Aurora DB cluster in serverless mode is paused. Valid values are `300` through `86400`. Defaults to `300`.
"""
return pulumi.get(self, "seconds_until_auto_pause")
@seconds_until_auto_pause.setter
def seconds_until_auto_pause(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "seconds_until_auto_pause", value)
@property
@pulumi.getter(name="timeoutAction")
def timeout_action(self) -> Optional[pulumi.Input[str]]:
"""
The action to take when the timeout is reached. Valid values: `ForceApplyCapacityChange`, `RollbackCapacityChange`. Defaults to `RollbackCapacityChange`. See [documentation](https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/aurora-serverless.how-it-works.html#aurora-serverless.how-it-works.timeout-action).
"""
return pulumi.get(self, "timeout_action")
@timeout_action.setter
def timeout_action(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "timeout_action", value)
@pulumi.input_type
class GlobalClusterGlobalClusterMemberArgs:
def __init__(__self__, *,
db_cluster_arn: Optional[pulumi.Input[str]] = None,
is_writer: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] db_cluster_arn: Amazon Resource Name (ARN) of member DB Cluster
:param pulumi.Input[bool] is_writer: Whether the member is the primary DB Cluster
"""
if db_cluster_arn is not None:
pulumi.set(__self__, "db_cluster_arn", db_cluster_arn)
if is_writer is not None:
pulumi.set(__self__, "is_writer", is_writer)
@property
@pulumi.getter(name="dbClusterArn")
def db_cluster_arn(self) -> Optional[pulumi.Input[str]]:
"""
Amazon Resource Name (ARN) of member DB Cluster
"""
return pulumi.get(self, "db_cluster_arn")
@db_cluster_arn.setter
def db_cluster_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "db_cluster_arn", value)
@property
@pulumi.getter(name="isWriter")
def is_writer(self) -> Optional[pulumi.Input[bool]]:
"""
Whether the member is the primary DB Cluster
"""
return pulumi.get(self, "is_writer")
@is_writer.setter
def is_writer(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_writer", value)
@pulumi.input_type
class InstanceRestoreToPointInTimeArgs:
def __init__(__self__, *,
restore_time: Optional[pulumi.Input[str]] = None,
source_db_instance_identifier: Optional[pulumi.Input[str]] = None,
source_dbi_resource_id: Optional[pulumi.Input[str]] = None,
use_latest_restorable_time: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] restore_time: The date and time to restore from. Value must be a time in Universal Coordinated Time (UTC) format and must be before the latest restorable time for the DB instance. Cannot be specified with `use_latest_restorable_time`.
:param pulumi.Input[str] source_db_instance_identifier: The identifier of the source DB instance from which to restore. Must match the identifier of an existing DB instance. Required if `source_dbi_resource_id` is not specified.
:param pulumi.Input[str] source_dbi_resource_id: The resource ID of the source DB instance from which to restore. Required if `source_db_instance_identifier` is not specified.
:param pulumi.Input[bool] use_latest_restorable_time: A boolean value that indicates whether the DB instance is restored from the latest backup time. Defaults to `false`. Cannot be specified with `restore_time`.
"""
if restore_time is not None:
pulumi.set(__self__, "restore_time", restore_time)
if source_db_instance_identifier is not None:
pulumi.set(__self__, "source_db_instance_identifier", source_db_instance_identifier)
if source_dbi_resource_id is not None:
pulumi.set(__self__, "source_dbi_resource_id", source_dbi_resource_id)
if use_latest_restorable_time is not None:
pulumi.set(__self__, "use_latest_restorable_time", use_latest_restorable_time)
@property
@pulumi.getter(name="restoreTime")
def restore_time(self) -> Optional[pulumi.Input[str]]:
"""
The date and time to restore from. Value must be a time in Universal Coordinated Time (UTC) format and must be before the latest restorable time for the DB instance. Cannot be specified with `use_latest_restorable_time`.
"""
return pulumi.get(self, "restore_time")
@restore_time.setter
def restore_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "restore_time", value)
@property
@pulumi.getter(name="sourceDbInstanceIdentifier")
def source_db_instance_identifier(self) -> Optional[pulumi.Input[str]]:
"""
The identifier of the source DB instance from which to restore. Must match the identifier of an existing DB instance. Required if `source_dbi_resource_id` is not specified.
"""
return pulumi.get(self, "source_db_instance_identifier")
@source_db_instance_identifier.setter
def source_db_instance_identifier(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_db_instance_identifier", value)
@property
@pulumi.getter(name="sourceDbiResourceId")
def source_dbi_resource_id(self) -> Optional[pulumi.Input[str]]:
"""
The resource ID of the source DB instance from which to restore. Required if `source_db_instance_identifier` is not specified.
"""
return pulumi.get(self, "source_dbi_resource_id")
@source_dbi_resource_id.setter
def source_dbi_resource_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_dbi_resource_id", value)
@property
@pulumi.getter(name="useLatestRestorableTime")
def use_latest_restorable_time(self) -> Optional[pulumi.Input[bool]]:
"""
A boolean value that indicates whether the DB instance is restored from the latest backup time. Defaults to `false`. Cannot be specified with `restore_time`.
"""
return pulumi.get(self, "use_latest_restorable_time")
@use_latest_restorable_time.setter
def use_latest_restorable_time(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "use_latest_restorable_time", value)
@pulumi.input_type
class InstanceS3ImportArgs:
def __init__(__self__, *,
bucket_name: pulumi.Input[str],
ingestion_role: pulumi.Input[str],
source_engine: pulumi.Input[str],
source_engine_version: pulumi.Input[str],
bucket_prefix: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] bucket_name: The bucket name where your backup is stored
:param pulumi.Input[str] ingestion_role: Role applied to load the data.
:param pulumi.Input[str] source_engine: Source engine for the backup
:param pulumi.Input[str] source_engine_version: Version of the source engine used to make the backup
:param pulumi.Input[str] bucket_prefix: Can be blank, but is the path to your backup
"""
pulumi.set(__self__, "bucket_name", bucket_name)
pulumi.set(__self__, "ingestion_role", ingestion_role)
pulumi.set(__self__, "source_engine", source_engine)
pulumi.set(__self__, "source_engine_version", source_engine_version)
if bucket_prefix is not None:
pulumi.set(__self__, "bucket_prefix", bucket_prefix)
@property
@pulumi.getter(name="bucketName")
def bucket_name(self) -> pulumi.Input[str]:
"""
The bucket name where your backup is stored
"""
return pulumi.get(self, "bucket_name")
@bucket_name.setter
def bucket_name(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket_name", value)
@property
@pulumi.getter(name="ingestionRole")
def ingestion_role(self) -> pulumi.Input[str]:
"""
Role applied to load the data.
"""
return pulumi.get(self, "ingestion_role")
@ingestion_role.setter
def ingestion_role(self, value: pulumi.Input[str]):
pulumi.set(self, "ingestion_role", value)
@property
@pulumi.getter(name="sourceEngine")
def source_engine(self) -> pulumi.Input[str]:
"""
Source engine for the backup
"""
return pulumi.get(self, "source_engine")
@source_engine.setter
def source_engine(self, value: pulumi.Input[str]):
pulumi.set(self, "source_engine", value)
@property
@pulumi.getter(name="sourceEngineVersion")
def source_engine_version(self) -> pulumi.Input[str]:
"""
Version of the source engine used to make the backup
"""
return pulumi.get(self, "source_engine_version")
@source_engine_version.setter
def source_engine_version(self, value: pulumi.Input[str]):
pulumi.set(self, "source_engine_version", value)
@property
@pulumi.getter(name="bucketPrefix")
def bucket_prefix(self) -> Optional[pulumi.Input[str]]:
"""
Can be blank, but is the path to your backup
"""
return pulumi.get(self, "bucket_prefix")
@bucket_prefix.setter
def bucket_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bucket_prefix", value)
@pulumi.input_type
class OptionGroupOptionArgs:
def __init__(__self__, *,
option_name: pulumi.Input[str],
db_security_group_memberships: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
option_settings: Optional[pulumi.Input[Sequence[pulumi.Input['OptionGroupOptionOptionSettingArgs']]]] = None,
port: Optional[pulumi.Input[int]] = None,
version: Optional[pulumi.Input[str]] = None,
vpc_security_group_memberships: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[str] option_name: The Name of the Option (e.g., MEMCACHED).
:param pulumi.Input[Sequence[pulumi.Input[str]]] db_security_group_memberships: A list of DB Security Groups for which the option is enabled.
:param pulumi.Input[Sequence[pulumi.Input['OptionGroupOptionOptionSettingArgs']]] option_settings: A list of option settings to apply.
:param pulumi.Input[int] port: The Port number when connecting to the Option (e.g., 11211).
:param pulumi.Input[str] version: The version of the option (e.g., 13.1.0.0).
:param pulumi.Input[Sequence[pulumi.Input[str]]] vpc_security_group_memberships: A list of VPC Security Groups for which the option is enabled.
"""
pulumi.set(__self__, "option_name", option_name)
if db_security_group_memberships is not None:
pulumi.set(__self__, "db_security_group_memberships", db_security_group_memberships)
if option_settings is not None:
pulumi.set(__self__, "option_settings", option_settings)
if port is not None:
pulumi.set(__self__, "port", port)
if version is not None:
pulumi.set(__self__, "version", version)
if vpc_security_group_memberships is not None:
pulumi.set(__self__, "vpc_security_group_memberships", vpc_security_group_memberships)
@property
@pulumi.getter(name="optionName")
def option_name(self) -> pulumi.Input[str]:
"""
The Name of the Option (e.g., MEMCACHED).
"""
return pulumi.get(self, "option_name")
@option_name.setter
def option_name(self, value: pulumi.Input[str]):
pulumi.set(self, "option_name", value)
@property
@pulumi.getter(name="dbSecurityGroupMemberships")
def db_security_group_memberships(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of DB Security Groups for which the option is enabled.
"""
return pulumi.get(self, "db_security_group_memberships")
@db_security_group_memberships.setter
def db_security_group_memberships(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "db_security_group_memberships", value)
@property
@pulumi.getter(name="optionSettings")
def option_settings(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OptionGroupOptionOptionSettingArgs']]]]:
"""
A list of option settings to apply.
"""
return pulumi.get(self, "option_settings")
@option_settings.setter
def option_settings(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OptionGroupOptionOptionSettingArgs']]]]):
pulumi.set(self, "option_settings", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
The Port number when connecting to the Option (e.g., 11211).
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def version(self) -> Optional[pulumi.Input[str]]:
"""
The version of the option (e.g., 13.1.0.0).
"""
return pulumi.get(self, "version")
@version.setter
def version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "version", value)
@property
@pulumi.getter(name="vpcSecurityGroupMemberships")
def vpc_security_group_memberships(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
A list of VPC Security Groups for which the option is enabled.
"""
return pulumi.get(self, "vpc_security_group_memberships")
@vpc_security_group_memberships.setter
def vpc_security_group_memberships(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "vpc_security_group_memberships", value)
@pulumi.input_type
class OptionGroupOptionOptionSettingArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str]):
"""
:param pulumi.Input[str] name: The Name of the setting.
:param pulumi.Input[str] value: The Value of the setting.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The Name of the setting.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The Value of the setting.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@pulumi.input_type
class ParameterGroupParameterArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
value: pulumi.Input[str],
apply_method: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: The name of the DB parameter.
:param pulumi.Input[str] value: The value of the DB parameter.
:param pulumi.Input[str] apply_method: "immediate" (default), or "pending-reboot". Some
engines can't apply some parameters without a reboot, and you will need to
specify "pending-reboot" here.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "value", value)
if apply_method is not None:
pulumi.set(__self__, "apply_method", apply_method)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
The name of the DB parameter.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> pulumi.Input[str]:
"""
The value of the DB parameter.
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: pulumi.Input[str]):
pulumi.set(self, "value", value)
@property
@pulumi.getter(name="applyMethod")
def apply_method(self) -> Optional[pulumi.Input[str]]:
"""
"immediate" (default), or "pending-reboot". Some
engines can't apply some parameters without a reboot, and you will need to
specify "pending-reboot" here.
"""
return pulumi.get(self, "apply_method")
@apply_method.setter
def apply_method(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "apply_method", value)
@pulumi.input_type
class ProxyAuthArgs:
def __init__(__self__, *,
auth_scheme: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
iam_auth: Optional[pulumi.Input[str]] = None,
secret_arn: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] auth_scheme: The type of authentication that the proxy uses for connections from the proxy to the underlying database. One of `SECRETS`.
:param pulumi.Input[str] description: A user-specified description about the authentication used by a proxy to log in as a specific database user.
:param pulumi.Input[str] iam_auth: Whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy. One of `DISABLED`, `REQUIRED`.
:param pulumi.Input[str] secret_arn: The Amazon Resource Name (ARN) representing the secret that the proxy uses to authenticate to the RDS DB instance or Aurora DB cluster. These secrets are stored within Amazon Secrets Manager.
"""
if auth_scheme is not None:
pulumi.set(__self__, "auth_scheme", auth_scheme)
if description is not None:
pulumi.set(__self__, "description", description)
if iam_auth is not None:
pulumi.set(__self__, "iam_auth", iam_auth)
if secret_arn is not None:
pulumi.set(__self__, "secret_arn", secret_arn)
@property
@pulumi.getter(name="authScheme")
def auth_scheme(self) -> Optional[pulumi.Input[str]]:
"""
The type of authentication that the proxy uses for connections from the proxy to the underlying database. One of `SECRETS`.
"""
return pulumi.get(self, "auth_scheme")
@auth_scheme.setter
def auth_scheme(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "auth_scheme", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
A user-specified description about the authentication used by a proxy to log in as a specific database user.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="iamAuth")
def iam_auth(self) -> Optional[pulumi.Input[str]]:
"""
Whether to require or disallow AWS Identity and Access Management (IAM) authentication for connections to the proxy. One of `DISABLED`, `REQUIRED`.
"""
return pulumi.get(self, "iam_auth")
@iam_auth.setter
def iam_auth(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "iam_auth", value)
@property
@pulumi.getter(name="secretArn")
def secret_arn(self) -> Optional[pulumi.Input[str]]:
"""
The Amazon Resource Name (ARN) representing the secret that the proxy uses to authenticate to the RDS DB instance or Aurora DB cluster. These secrets are stored within Amazon Secrets Manager.
"""
return pulumi.get(self, "secret_arn")
@secret_arn.setter
def secret_arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "secret_arn", value)
@pulumi.input_type
class ProxyDefaultTargetGroupConnectionPoolConfigArgs:
def __init__(__self__, *,
connection_borrow_timeout: Optional[pulumi.Input[int]] = None,
init_query: Optional[pulumi.Input[str]] = None,
max_connections_percent: Optional[pulumi.Input[int]] = None,
max_idle_connections_percent: Optional[pulumi.Input[int]] = None,
session_pinning_filters: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[int] connection_borrow_timeout: The number of seconds for a proxy to wait for a connection to become available in the connection pool. Only applies when the proxy has opened its maximum number of connections and all connections are busy with client sessions.
:param pulumi.Input[str] init_query: One or more SQL statements for the proxy to run when opening each new database connection. Typically used with `SET` statements to make sure that each connection has identical settings such as time zone and character set. This setting is empty by default. For multiple statements, use semicolons as the separator. You can also include multiple variables in a single `SET` statement, such as `SET x=1, y=2`.
:param pulumi.Input[int] max_connections_percent: The maximum size of the connection pool for each target in a target group. For Aurora MySQL, it is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group.
:param pulumi.Input[int] max_idle_connections_percent: Controls how actively the proxy closes idle database connections in the connection pool. A high value enables the proxy to leave a high percentage of idle connections open. A low value causes the proxy to close idle client connections and return the underlying database connections to the connection pool. For Aurora MySQL, it is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group.
:param pulumi.Input[Sequence[pulumi.Input[str]]] session_pinning_filters: Each item in the list represents a class of SQL operations that normally cause all later statements in a session using a proxy to be pinned to the same underlying database connection. Including an item in the list exempts that class of SQL operations from the pinning behavior. Currently, the only allowed value is `EXCLUDE_VARIABLE_SETS`.
"""
if connection_borrow_timeout is not None:
pulumi.set(__self__, "connection_borrow_timeout", connection_borrow_timeout)
if init_query is not None:
pulumi.set(__self__, "init_query", init_query)
if max_connections_percent is not None:
pulumi.set(__self__, "max_connections_percent", max_connections_percent)
if max_idle_connections_percent is not None:
pulumi.set(__self__, "max_idle_connections_percent", max_idle_connections_percent)
if session_pinning_filters is not None:
pulumi.set(__self__, "session_pinning_filters", session_pinning_filters)
@property
@pulumi.getter(name="connectionBorrowTimeout")
def connection_borrow_timeout(self) -> Optional[pulumi.Input[int]]:
"""
The number of seconds for a proxy to wait for a connection to become available in the connection pool. Only applies when the proxy has opened its maximum number of connections and all connections are busy with client sessions.
"""
return pulumi.get(self, "connection_borrow_timeout")
@connection_borrow_timeout.setter
def connection_borrow_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "connection_borrow_timeout", value)
@property
@pulumi.getter(name="initQuery")
def init_query(self) -> Optional[pulumi.Input[str]]:
"""
One or more SQL statements for the proxy to run when opening each new database connection. Typically used with `SET` statements to make sure that each connection has identical settings such as time zone and character set. This setting is empty by default. For multiple statements, use semicolons as the separator. You can also include multiple variables in a single `SET` statement, such as `SET x=1, y=2`.
"""
return pulumi.get(self, "init_query")
@init_query.setter
def init_query(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "init_query", value)
@property
@pulumi.getter(name="maxConnectionsPercent")
def max_connections_percent(self) -> Optional[pulumi.Input[int]]:
"""
The maximum size of the connection pool for each target in a target group. For Aurora MySQL, it is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group.
"""
return pulumi.get(self, "max_connections_percent")
@max_connections_percent.setter
def max_connections_percent(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_connections_percent", value)
@property
@pulumi.getter(name="maxIdleConnectionsPercent")
def max_idle_connections_percent(self) -> Optional[pulumi.Input[int]]:
"""
Controls how actively the proxy closes idle database connections in the connection pool. A high value enables the proxy to leave a high percentage of idle connections open. A low value causes the proxy to close idle client connections and return the underlying database connections to the connection pool. For Aurora MySQL, it is expressed as a percentage of the max_connections setting for the RDS DB instance or Aurora DB cluster used by the target group.
"""
return pulumi.get(self, "max_idle_connections_percent")
@max_idle_connections_percent.setter
def max_idle_connections_percent(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "max_idle_connections_percent", value)
@property
@pulumi.getter(name="sessionPinningFilters")
def session_pinning_filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Each item in the list represents a class of SQL operations that normally cause all later statements in a session using a proxy to be pinned to the same underlying database connection. Including an item in the list exempts that class of SQL operations from the pinning behavior. Currently, the only allowed value is `EXCLUDE_VARIABLE_SETS`.
"""
return pulumi.get(self, "session_pinning_filters")
@session_pinning_filters.setter
def session_pinning_filters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "session_pinning_filters", value)
@pulumi.input_type
class SecurityGroupIngressArgs:
def __init__(__self__, *,
cidr: Optional[pulumi.Input[str]] = None,
security_group_id: Optional[pulumi.Input[str]] = None,
security_group_name: Optional[pulumi.Input[str]] = None,
security_group_owner_id: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] cidr: The CIDR block to accept
:param pulumi.Input[str] security_group_id: The ID of the security group to authorize
:param pulumi.Input[str] security_group_name: The name of the security group to authorize
:param pulumi.Input[str] security_group_owner_id: The owner Id of the security group provided
by `security_group_name`.
"""
if cidr is not None:
pulumi.set(__self__, "cidr", cidr)
if security_group_id is not None:
pulumi.set(__self__, "security_group_id", security_group_id)
if security_group_name is not None:
pulumi.set(__self__, "security_group_name", security_group_name)
if security_group_owner_id is not None:
pulumi.set(__self__, "security_group_owner_id", security_group_owner_id)
@property
@pulumi.getter
def cidr(self) -> Optional[pulumi.Input[str]]:
"""
The CIDR block to accept
"""
return pulumi.get(self, "cidr")
@cidr.setter
def cidr(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cidr", value)
@property
@pulumi.getter(name="securityGroupId")
def security_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The ID of the security group to authorize
"""
return pulumi.get(self, "security_group_id")
@security_group_id.setter
def security_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_group_id", value)
@property
@pulumi.getter(name="securityGroupName")
def security_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the security group to authorize
"""
return pulumi.get(self, "security_group_name")
@security_group_name.setter
def security_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_group_name", value)
@property
@pulumi.getter(name="securityGroupOwnerId")
def security_group_owner_id(self) -> Optional[pulumi.Input[str]]:
"""
The owner Id of the security group provided
by `security_group_name`.
"""
return pulumi.get(self, "security_group_owner_id")
@security_group_owner_id.setter
def security_group_owner_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_group_owner_id", value)
|
py | b4138e273bb725b1e7156c1895347595ae10b030 | import unittest
import tempfile
import os
import numpy as np
import six
import caffe
from test_net import simple_net_file
class TestSolver(unittest.TestCase):
@classmethod
def setUpClass(self):
super(TestSolver, self).setUpClass()
print('TestSolver.setUpClass')
@classmethod
def tearDownClass(self):
super(TestSolver, self).tearDownClass()
print('TestSolver.tearDownClass')
def setUp(self):
self.num_output = 13
net_f = simple_net_file(self.num_output)
f = tempfile.NamedTemporaryFile(mode='w+', delete=False)
f.write("""net: '""" + net_f + """'
test_iter: 10 test_interval: 10 base_lr: 0.01 momentum: 0.9
weight_decay: 0.0005 lr_policy: 'inv' gamma: 0.0001 power: 0.75
display: 100 max_iter: 100 snapshot_after_train: false
snapshot_prefix: "model" """)
f.close()
self.solver = caffe.SGDSolver(f.name)
# also make sure get_solver runs
caffe.get_solver(f.name)
caffe.set_mode_cpu()
# fill in valid labels
self.solver.net.blobs['label'].data[...] = \
np.random.randint(self.num_output,
size=self.solver.net.blobs['label'].data.shape)
self.solver.test_nets[0].blobs['label'].data[...] = \
np.random.randint(self.num_output,
size=self.solver.test_nets[0].blobs['label'].data.shape)
os.remove(f.name)
os.remove(net_f)
def test_solve(self):
self.assertEqual(self.solver.iter, 0)
self.solver.solve()
self.assertEqual(self.solver.iter, 100)
def test_net_memory(self):
"""Check that nets survive after the solver is destroyed."""
nets = [self.solver.net] + list(self.solver.test_nets)
self.assertEqual(len(nets), 2)
del self.solver
total = 0
for net in nets:
for ps in six.itervalues(net.params):
for p in ps:
total += p.data.sum() + p.diff.sum()
for bl in six.itervalues(net.blobs):
total += bl.data.sum() + bl.diff.sum()
def test_snapshot(self):
self.solver.snapshot()
# Check that these files exist and then remove them
files = ['model_iter_0.caffemodel', 'model_iter_0.solverstate']
for fn in files:
assert os.path.isfile(fn)
os.remove(fn)
|
py | b4138e29845e3d82e282ef2bb8d138a84f4f317a | # common library
import pandas as pd
import numpy as np
import time
import gym
# RL models from stable-baselines
from stable_baselines import GAIL, SAC
from stable_baselines import ACER
from stable_baselines import PPO2
from stable_baselines import A2C
from stable_baselines import DDPG
from stable_baselines import TD3
from stable_baselines.ddpg.policies import DDPGPolicy
from stable_baselines.common.policies import MlpPolicy, MlpLstmPolicy, MlpLnLstmPolicy
from stable_baselines.common.noise import NormalActionNoise, OrnsteinUhlenbeckActionNoise, AdaptiveParamNoiseSpec
from stable_baselines.common.vec_env import DummyVecEnv
from preprocessing.preprocessors import *
from config import config
# customized env
from env.EnvMultipleStock_train import StockEnvTrain
from env.EnvMultipleStock_validation import StockEnvValidation
from env.EnvMultipleStock_trade import StockEnvTrade
def train_A2C(env_train, model_name, timesteps=25000):
"""A2C model"""
start = time.time()
model = A2C('MlpPolicy', env_train, verbose=0)
model.learn(total_timesteps=timesteps)
end = time.time()
model.save(f"{config.TRAINED_MODEL_DIR}/{model_name}")
print('Training time (A2C): ', (end - start) / 60, ' minutes')
return model
def train_ACER(env_train, model_name, timesteps=25000):
start = time.time()
model = ACER('MlpPolicy', env_train, verbose=0)
model.learn(total_timesteps=timesteps)
end = time.time()
model.save(f"{config.TRAINED_MODEL_DIR}/{model_name}")
print('Training time (A2C): ', (end - start) / 60, ' minutes')
return model
def train_DDPG(env_train, model_name, timesteps=10000):
"""DDPG model"""
# add the noise objects for DDPG
n_actions = env_train.action_space.shape[-1]
param_noise = None
action_noise = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions), sigma=float(0.5) * np.ones(n_actions))
start = time.time()
model = DDPG('MlpPolicy', env_train, param_noise=param_noise, action_noise=action_noise)
model.learn(total_timesteps=timesteps)
end = time.time()
model.save(f"{config.TRAINED_MODEL_DIR}/{model_name}")
print('Training time (DDPG): ', (end-start)/60,' minutes')
return model
def train_PPO(env_train, model_name, timesteps=50000):
"""PPO model"""
start = time.time()
model = PPO2('MlpPolicy', env_train, ent_coef = 0.005, nminibatches = 8)
#model = PPO2('MlpPolicy', env_train, ent_coef = 0.005)
model.learn(total_timesteps=timesteps)
end = time.time()
model.save(f"{config.TRAINED_MODEL_DIR}/{model_name}")
print('Training time (PPO): ', (end - start) / 60, ' minutes')
return model
def train_GAIL(env_train, model_name, timesteps=1000):
"""GAIL Model"""
#from stable_baselines.gail import ExportDataset, generate_expert_traj
start = time.time()
# generate expert trajectories
model = SAC('MLpPolicy', env_train, verbose=1)
generate_expert_traj(model, 'expert_model_gail', n_timesteps=100, n_episodes=10)
# Load dataset
dataset = ExpertDataset(expert_path='expert_model_gail.npz', traj_limitation=10, verbose=1)
model = GAIL('MLpPolicy', env_train, dataset, verbose=1)
model.learn(total_timesteps=1000)
end = time.time()
model.save(f"{config.TRAINED_MODEL_DIR}/{model_name}")
print('Training time (PPO): ', (end - start) / 60, ' minutes')
return model
def DRL_prediction(df,
model,
name,
last_state,
iter_num,
unique_trade_date,
rebalance_window,
turbulence_threshold,
initial):
### make a prediction based on trained model###
## trading env
trade_data = data_split(df, start=unique_trade_date[iter_num - rebalance_window], end=unique_trade_date[iter_num])
env_trade = DummyVecEnv([lambda: StockEnvTrade(trade_data,
turbulence_threshold=turbulence_threshold,
initial=initial,
previous_state=last_state,
model_name=name,
iteration=iter_num)])
obs_trade = env_trade.reset()
for i in range(len(trade_data.index.unique())):
action, _states = model.predict(obs_trade)
obs_trade, rewards, dones, info = env_trade.step(action)
if i == (len(trade_data.index.unique()) - 2):
# print(env_test.render())
last_state = env_trade.render()
df_last_state = pd.DataFrame({'last_state': last_state})
df_last_state.to_csv('results/last_state_{}_{}.csv'.format(name, i), index=False)
return last_state
def DRL_validation(model, test_data, test_env, test_obs) -> None:
###validation process###
for i in range(len(test_data.index.unique())):
action, _states = model.predict(test_obs)
test_obs, rewards, dones, info = test_env.step(action)
def get_validation_sharpe(iteration):
###Calculate Sharpe ratio based on validation results###
df_total_value = pd.read_csv('results/account_value_validation_{}.csv'.format(iteration), index_col=0)
df_total_value.columns = ['account_value_train']
df_total_value['daily_return'] = df_total_value.pct_change(1)
sharpe = (4 ** 0.5) * df_total_value['daily_return'].mean() / \
df_total_value['daily_return'].std()
return sharpe
def run_specific_strategy(df, unique_trade_date, rebalance_window, validation_window, strategy = 'PPO') -> None:
"""Runs one of PPO, A2C and DDPG"""
assert strategy in ['PPO', 'A2C', 'DDPG'], 'Strategy not on of PPO, A2C or DDPG'
print("============Start Ensemble Strategy============")
# for ensemble model, it's necessary to feed the last state
# of the previous model to the current model as the initial state
last_state_ensemble = []
sharpe_list = []
model_use = []
# based on the analysis of the in-sample data
#turbulence_threshold = 140
insample_turbulence = df[(df.datadate<20151000) & (df.datadate>=20090000)]
insample_turbulence = insample_turbulence.drop_duplicates(subset=['datadate'])
insample_turbulence_threshold = np.quantile(insample_turbulence.turbulence.values, .90)
start = time.time()
for i in range(rebalance_window + validation_window, len(unique_trade_date), rebalance_window):
print("============================================")
## initial state is empty
if i - rebalance_window - validation_window == 0:
# inital state
initial = True
else:
# previous state
initial = False
# Tuning trubulence index based on historical data
# Turbulence lookback window is one quarter
end_date_index = df.index[df["datadate"] == unique_trade_date[i - rebalance_window - validation_window]].to_list()[-1]
start_date_index = end_date_index - validation_window*30 + 1
historical_turbulence = df.iloc[start_date_index:(end_date_index + 1), :]
#historical_turbulence = df[(df.datadate<unique_trade_date[i - rebalance_window - validation_window]) & (df.datadate>=(unique_trade_date[i - rebalance_window - validation_window - 63]))]
historical_turbulence = historical_turbulence.drop_duplicates(subset=['datadate'])
historical_turbulence_mean = np.mean(historical_turbulence.turbulence.values)
if historical_turbulence_mean > insample_turbulence_threshold:
# if the mean of the historical data is greater than the 90% quantile of insample turbulence data
# then we assume that the current market is volatile,
# therefore we set the 90% quantile of insample turbulence data as the turbulence threshold
# meaning the current turbulence can't exceed the 90% quantile of insample turbulence data
turbulence_threshold = insample_turbulence_threshold
else:
# if the mean of the historical data is less than the 90% quantile of insample turbulence data
# then we tune up the turbulence_threshold, meaning we lower the risk
turbulence_threshold = np.quantile(insample_turbulence.turbulence.values, 1)
print("turbulence_threshold: ", turbulence_threshold)
############## Environment Setup starts ##############
## training env
train = data_split(df, start=20090000, end=unique_trade_date[i - rebalance_window - validation_window])
env_train = DummyVecEnv([lambda: StockEnvTrain(train)])
## validation env
validation = data_split(df, start=unique_trade_date[i - rebalance_window - validation_window],
end=unique_trade_date[i - rebalance_window])
env_val = DummyVecEnv([lambda: StockEnvValidation(validation,
turbulence_threshold=turbulence_threshold,
iteration=i)])
obs_val = env_val.reset()
############## Environment Setup ends ##############
############## Training and Validation starts ##############
print("======Model training from: ", 20090000, "to ",
unique_trade_date[i - rebalance_window - validation_window])
# print("training: ",len(data_split(df, start=20090000, end=test.datadate.unique()[i-rebalance_window]) ))
# print("==============Model Training===========")
if strategy == 'A2C':
print("======A2C Training========")
model_a2c = train_A2C(env_train, model_name="A2C_30k_dow_{}".format(i), timesteps=30000)
print("======A2C Validation from: ", unique_trade_date[i - rebalance_window - validation_window], "to ",
unique_trade_date[i - rebalance_window])
DRL_validation(model=model_a2c, test_data=validation, test_env=env_val, test_obs=obs_val)
sharpe = get_validation_sharpe(i)
model_ensemble = model_a2c
elif strategy == 'PPO':
print("======PPO Training========")
model_ppo = train_PPO(env_train, model_name="PPO_100k_dow_{}".format(i), timesteps=100000)
print("======PPO Validation from: ", unique_trade_date[i - rebalance_window - validation_window], "to ",
unique_trade_date[i - rebalance_window])
DRL_validation(model=model_ppo, test_data=validation, test_env=env_val, test_obs=obs_val)
sharpe = get_validation_sharpe(i)
model_ensemble = model_ppo
elif strategy == 'DDPG':
print("======DDPG Training========")
model_ddpg = train_DDPG(env_train, model_name="DDPG_10k_dow_{}".format(i), timesteps=10000)
#model_ddpg = train_TD3(env_train, model_name="DDPG_10k_dow_{}".format(i), timesteps=20000)
print("======DDPG Validation from: ", unique_trade_date[i - rebalance_window - validation_window], "to ",
unique_trade_date[i - rebalance_window])
DRL_validation(model=model_ddpg, test_data=validation, test_env=env_val, test_obs=obs_val)
sharpe = get_validation_sharpe(i)
model_ensemble = model_ddpg
sharpe_list.append(sharpe)
# Model Selection based on sharpe ratio
#if (sharpe_ppo >= sharpe_a2c) & (sharpe_ppo >= sharpe_ddpg):
# model_ensemble = model_ppo
# model_use.append('PPO')
#elif (sharpe_a2c > sharpe_ppo) & (sharpe_a2c > sharpe_ddpg):
# model_ensemble = model_a2c
# model_use.append('A2C')
#else:
# model_ensemble = model_ddpg
# model_use.append('DDPG')
############## Training and Validation ends ##############
############## Trading starts ##############
print("======Trading from: ", unique_trade_date[i - rebalance_window], "to ", unique_trade_date[i])
#print("Used Model: ", model_ensemble)
last_state_ensemble = DRL_prediction(df=df, model=model_ensemble, name="ensemble",
last_state=last_state_ensemble, iter_num=i,
unique_trade_date=unique_trade_date,
rebalance_window=rebalance_window,
turbulence_threshold=turbulence_threshold,
initial=initial)
# print("============Trading Done============")
############## Trading ends ##############
end = time.time()
print("Ensemble Strategy took: ", (end - start) / 60, " minutes")
def run_ensemble_strategy(df, unique_trade_date, rebalance_window, validation_window) -> None:
"""Ensemble Strategy that combines PPO, A2C and DDPG"""
print("============Start Ensemble Strategy============")
# for ensemble model, it's necessary to feed the last state
# of the previous model to the current model as the initial state
last_state_ensemble = []
ppo_sharpe_list = []
ddpg_sharpe_list = []
a2c_sharpe_list = []
model_use = []
# based on the analysis of the in-sample data
#turbulence_threshold = 140
insample_turbulence = df[(df.datadate<20151000) & (df.datadate>=20090000)]
insample_turbulence = insample_turbulence.drop_duplicates(subset=['datadate'])
insample_turbulence_threshold = np.quantile(insample_turbulence.turbulence.values, .90)
start = time.time()
for i in range(rebalance_window + validation_window, len(unique_trade_date), rebalance_window):
print("============================================")
## initial state is empty
if i - rebalance_window - validation_window == 0:
# inital state
initial = True
else:
# previous state
initial = False
# Tuning trubulence index based on historical data
# Turbulence lookback window is one quarter
end_date_index = df.index[df["datadate"] == unique_trade_date[i - rebalance_window - validation_window]].to_list()[-1]
start_date_index = end_date_index - validation_window*30 + 1
historical_turbulence = df.iloc[start_date_index:(end_date_index + 1), :]
#historical_turbulence = df[(df.datadate<unique_trade_date[i - rebalance_window - validation_window]) & (df.datadate>=(unique_trade_date[i - rebalance_window - validation_window - 63]))]
historical_turbulence = historical_turbulence.drop_duplicates(subset=['datadate'])
historical_turbulence_mean = np.mean(historical_turbulence.turbulence.values)
if historical_turbulence_mean > insample_turbulence_threshold:
# if the mean of the historical data is greater than the 90% quantile of insample turbulence data
# then we assume that the current market is volatile,
# therefore we set the 90% quantile of insample turbulence data as the turbulence threshold
# meaning the current turbulence can't exceed the 90% quantile of insample turbulence data
turbulence_threshold = insample_turbulence_threshold
else:
# if the mean of the historical data is less than the 90% quantile of insample turbulence data
# then we tune up the turbulence_threshold, meaning we lower the risk
turbulence_threshold = np.quantile(insample_turbulence.turbulence.values, 1)
print("turbulence_threshold: ", turbulence_threshold)
############## Environment Setup starts ##############
## training env
train = data_split(df, start=20090000, end=unique_trade_date[i - rebalance_window - validation_window])
env_train = DummyVecEnv([lambda: StockEnvTrain(train)])
## validation env
validation = data_split(df, start=unique_trade_date[i - rebalance_window - validation_window],
end=unique_trade_date[i - rebalance_window])
env_val = DummyVecEnv([lambda: StockEnvValidation(validation,
turbulence_threshold=turbulence_threshold,
iteration=i)])
obs_val = env_val.reset()
############## Environment Setup ends ##############
############## Training and Validation starts ##############
print("======Model training from: ", 20090000, "to ",
unique_trade_date[i - rebalance_window - validation_window])
# print("training: ",len(data_split(df, start=20090000, end=test.datadate.unique()[i-rebalance_window]) ))
# print("==============Model Training===========")
print("======A2C Training========")
model_a2c = train_A2C(env_train, model_name="A2C_30k_dow_{}".format(i), timesteps=30000)
print("======A2C Validation from: ", unique_trade_date[i - rebalance_window - validation_window], "to ",
unique_trade_date[i - rebalance_window])
DRL_validation(model=model_a2c, test_data=validation, test_env=env_val, test_obs=obs_val)
sharpe_a2c = get_validation_sharpe(i)
print("A2C Sharpe Ratio: ", sharpe_a2c)
print("======PPO Training========")
model_ppo = train_PPO(env_train, model_name="PPO_100k_dow_{}".format(i), timesteps=100000)
print("======PPO Validation from: ", unique_trade_date[i - rebalance_window - validation_window], "to ",
unique_trade_date[i - rebalance_window])
DRL_validation(model=model_ppo, test_data=validation, test_env=env_val, test_obs=obs_val)
sharpe_ppo = get_validation_sharpe(i)
print("PPO Sharpe Ratio: ", sharpe_ppo)
print("======DDPG Training========")
model_ddpg = train_DDPG(env_train, model_name="DDPG_10k_dow_{}".format(i), timesteps=10000)
#model_ddpg = train_TD3(env_train, model_name="DDPG_10k_dow_{}".format(i), timesteps=20000)
print("======DDPG Validation from: ", unique_trade_date[i - rebalance_window - validation_window], "to ",
unique_trade_date[i - rebalance_window])
DRL_validation(model=model_ddpg, test_data=validation, test_env=env_val, test_obs=obs_val)
sharpe_ddpg = get_validation_sharpe(i)
ppo_sharpe_list.append(sharpe_ppo)
a2c_sharpe_list.append(sharpe_a2c)
ddpg_sharpe_list.append(sharpe_ddpg)
# Model Selection based on sharpe ratio
if (sharpe_ppo >= sharpe_a2c) & (sharpe_ppo >= sharpe_ddpg):
model_ensemble = model_ppo
model_use.append('PPO')
elif (sharpe_a2c > sharpe_ppo) & (sharpe_a2c > sharpe_ddpg):
model_ensemble = model_a2c
model_use.append('A2C')
else:
model_ensemble = model_ddpg
model_use.append('DDPG')
############## Training and Validation ends ##############
############## Trading starts ##############
print("======Trading from: ", unique_trade_date[i - rebalance_window], "to ", unique_trade_date[i])
#print("Used Model: ", model_ensemble)
last_state_ensemble = DRL_prediction(df=df, model=model_ensemble, name="ensemble",
last_state=last_state_ensemble, iter_num=i,
unique_trade_date=unique_trade_date,
rebalance_window=rebalance_window,
turbulence_threshold=turbulence_threshold,
initial=initial)
# print("============Trading Done============")
############## Trading ends ##############
end = time.time()
print("Ensemble Strategy took: ", (end - start) / 60, " minutes")
|
py | b4138efadc183f84e49a2bec3f66b9dc6989338e | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2017 The Raven Core developers
# Copyright (c) 2018 The RPG Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test Wallet encryption"""
import time
from test_framework.test_framework import RPGTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
class WalletEncryptionTest(RPGTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
passphrase = "WalletPassphrase"
passphrase2 = "SecondWalletPassphrase"
# Make sure the wallet isn't encrypted first
address = self.nodes[0].getnewaddress()
privkey = self.nodes[0].dumpprivkey(address)
assert_equal(privkey[:1], "c")
assert_equal(len(privkey), 52)
# Encrypt the wallet
self.nodes[0].node_encrypt_wallet(passphrase)
self.start_node(0)
# Test that the wallet is encrypted
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Check that walletpassphrase works
self.nodes[0].walletpassphrase(passphrase, 2)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
# Check that the timeout is right
time.sleep(2)
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test wrong passphrase
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase + "wrong", 10)
# Test walletlock
self.nodes[0].walletpassphrase(passphrase, 84600)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
self.nodes[0].walletlock()
assert_raises_rpc_error(-13, "Please enter the wallet passphrase with walletpassphrase first", self.nodes[0].dumpprivkey, address)
# Test passphrase changes
self.nodes[0].walletpassphrasechange(passphrase, passphrase2)
assert_raises_rpc_error(-14, "wallet passphrase entered was incorrect", self.nodes[0].walletpassphrase, passphrase, 10)
self.nodes[0].walletpassphrase(passphrase2, 10)
assert_equal(privkey, self.nodes[0].dumpprivkey(address))
if __name__ == '__main__':
WalletEncryptionTest().main()
|
py | b4138f06c304915fe53f329ff8a5c29d519a64fb | """UHB URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, include, re_path
from django.contrib import admin
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title="Newsfeed API",
default_version='v1',
description="API documentation of Newfeed Portal",
terms_of_service="https://www.njnur.github.io"
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns = [
re_path(r'^doc(?P<format>\.json|\.yaml)$',
schema_view.without_ui(cache_timeout=0), name='schema-json'),
path('doc/', schema_view.with_ui('swagger', cache_timeout=0),
name='schema-swagger-ui'), # <-- Here
path('redoc/', schema_view.with_ui('redoc', cache_timeout=0),
name='schema-redoc'), # <-- Here
path('admin/doc/', include('django.contrib.admindocs.urls')),
path('admin/', admin.site.urls),
path('o/', include('oauth2_provider.urls', namespace='oauth2_provider')),
path('user/', include(('apps.user.urls', 'user'), namespace='user')),
path('', include(('apps.newsfeed.urls', 'newsfeed'), namespace='newsfeed')),
]
|
py | b4139123d04e53fc36cf2590123d0c359a64d06c | import asyncio
from dataclasses import dataclass
from dbdaora import (
DictFallbackDataSource,
DictMemoryDataSource,
HashRepository,
make_hash_service,
)
@dataclass
class Person:
id: str
name: str
age: int
def make_person(name: str, age: int) -> Person:
return Person(name.replace(' ', '_').lower(), name, age)
class PersonRepository(HashRepository[Person, str]):
...
async def make_memory_data_source() -> DictMemoryDataSource:
return DictMemoryDataSource()
async def make_fallback_data_source() -> DictFallbackDataSource:
return DictFallbackDataSource()
service = asyncio.run(
make_hash_service(
PersonRepository,
memory_data_source_factory=make_memory_data_source,
fallback_data_source_factory=make_fallback_data_source,
repository_expire_time=60,
)
)
person = make_person('John Doe', 33)
asyncio.run(service.add(person))
geted_person = asyncio.run(service.get_one(person.id))
print(geted_person)
|
py | b4139125f24ffcd9f50094ea3eeb13bc356d43f2 | # -*- coding: utf-8 -*-
# @Author: Catofes
# @Date: 2015-08-15
'''
Class to stores everything into a json file.
'''
import json
from const import Constant
from singleton import Singleton
class Storage(Singleton):
def __init__(self):
'''
Database stores every info.
version int
#if value in file is unequal to value defined in this class.
#An database update will be applied.
user dict:
username str
key str
collections list:
collection_info(dict):
collection_name str
collection_type str
collection_describe str
collection_songs list:
song_id(int)
songs dict:
song_id(int) dict:
song_id int
artist str
song_name str
mp3_url str
album_name str
quality str
lyric str
tlyric str
player_info dict:
player_list list:
songs_id(int)
playing_list list:
songs_id(int)
playing_mode int
playing_offset int
:return:
'''
if hasattr(self, '_init'):
return
self._init = True
self.version = 4
self.database = {
'version': 4,
'user': {
'username': '',
'password': '',
'user_id': '',
'nickname': '',
},
'collections': [[]],
'songs': {},
'player_info': {
'player_list': [],
'player_list_type': '',
'player_list_title': '',
'playing_list': [],
'playing_mode': 0,
'idx': 0,
'ridx': 0,
'playing_volume': 60,
}
}
self.storage_path = Constant.storage_path
self.cookie_path = Constant.cookie_path
self.file = None
def load(self):
try:
self.file = file(self.storage_path, 'r')
self.database = json.loads(self.file.read())
self.file.close()
except (ValueError, OSError, IOError):
self.__init__()
if not self.check_version():
self.save()
def check_version(self):
if self.database['version'] == self.version:
return True
else:
# Should do some update.
if self.database['version'] == 1:
self.database['version'] = 2
self.database['cache'] = False
elif self.database['version'] == 2:
self.database['version'] = 3
self.database.pop('cache')
elif self.database['version'] == 3:
self.database['version'] = 4
self.database['user'] = {'username': '',
'password': '',
'user_id': '',
'nickname': ''}
self.check_version()
return False
def save(self):
self.file = file(self.storage_path, 'w')
self.file.write(json.dumps(self.database))
self.file.close()
|
py | b413917afab0e1b1ce364d16aa84a1cc4cecd3a4 | import os
import mmap
import cPickle as pickle
import operator
from functools32 import lru_cache
from ..data import Store, ObjectStore
from nel import logging
log = logging.getLogger()
class FileObjectStore(ObjectStore):
def __init__(self, path):
self.path = path
self.store = mmdict(path)
@classmethod
def get_protocol(cls):
return 'file'
def iter_ids(self):
return self.store.iterkeys()
def exists(self, oid):
return oid in self.store
def fetch(self, oid):
return self.store[oid]
def fetch_many(self, oids):
return [self.fetch(oid) for oid in oids]
def fetch_all(self):
return self.store.iteritems()
def save_many(self, obj_iter):
self.store.close()
mmdict.write(self.path, ((o['_id'], o) for o in obj_iter))
self.store = mmdict(self.path)
@classmethod
def GetPath(cls, store_id, uri):
path = store_id.replace(':', '/')
if uri and uri.startswith('file://'):
path = os.path.join(uri[7:], path)
return path
@classmethod
def Get(cls, store_id, uri='file://', **kwargs):
return cls(cls.GetPath(store_id, uri))
class mmdict(object):
def __init__(self, path):
self.path = path
self.index = {}
index_path = self.path + '.index'
if os.path.exists(index_path):
log.debug('Loading mmap store: %s ...' % index_path)
with open(index_path, 'rb') as f:
self.index = dict(self.deserialise(f))
self.data_file = open(path + '.data', 'rb')
self.data_mmap = mmap.mmap(self.data_file.fileno(), 0, prot=mmap.PROT_READ)
else:
log.warn('No existing mmap store found: %s ...' % index_path)
@staticmethod
def serialise(obj, f):
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
@staticmethod
def deserialise(f):
return pickle.load(f)
@staticmethod
def static_itervalues(path):
with open(path + '.data', 'rb') as f:
while True:
try:
yield mmdict.deserialise(f)
except EOFError: break
def iteritems(self):
sorted_idx = sorted(self.index.iteritems(), key=operator.itemgetter(1))
for i, v in enumerate(self.itervalues()):
yield (sorted_idx[i][0], v)
def iterkeys(self):
return self.index.iterkeys()
def itervalues(self):
self.data_mmap.seek(0)
while True:
try:
yield self.deserialise(self.data_mmap)
except EOFError: break
def __len__(self):
return len(self.index)
def __contains__(self, key):
return key in self.index
@lru_cache(maxsize=20000)
def __getitem__(self, key):
if key not in self:
return None
self.data_mmap.seek(self.index[key])
return self.deserialise(self.data_mmap)
def __enter__(self):
return self
def close(self):
if hasattr(self, 'data_mmap') and self.data_mmap != None:
self.data_mmap.close()
if hasattr(self, 'data_file') and self.data_file != None:
self.data_file.close()
def __exit__(self, type, value, traceback):
self.close()
def __del__(self):
self.close()
@staticmethod
def write(path, iter_kvs):
index = []
with open(path + '.data', 'wb') as f:
for key, value in iter_kvs:
index.append((key, f.tell()))
mmdict.serialise(value, f)
with open(path + '.index','wb') as f:
mmdict.serialise(index, f)
|
py | b413918c794a2afa6d9a68ae721d362820bb3ee2 | import os
import logging
import argparse
from src.common.translate import translate_time_expression_templates, get_client
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--template_dir", default="data/templates/distribution", help="Templates directory")
parser.add_argument("--lang", default=None, type=str, required=False,
help="Language code. If not specified, computes for all")
args = parser.parse_args()
translate_client = get_client()
# Iterate over languages
if args.lang is not None:
target_langs = [args.lang]
else:
target_langs = [f.replace(".json", "") for f in os.listdir("data/templates/start_end") if "en" not in f]
en_templates = [line.strip() for line in open(f"{args.template_dir}/en.txt")]
for target in target_langs:
logger.info(target)
target_templates = translate_time_expression_templates(translate_client, en_templates, target)
with open(f"{args.template_dir}/{target}.txt", "w", encoding="utf-8") as f_out:
for template in target_templates:
f_out.write(template + "\n")
if __name__ == '__main__':
main()
|
py | b413935d65dd47b32757c7f5c3bdd785ae78c64f | from flask import abort, jsonify
from flask_restful import Resource
from flask_simplelogin import login_required
from grocery_classifier.models import Product
class ProductResource(Resource):
def get(self):
products = Product.query.all() or abort(204)
return jsonify(
{"products": [product.to_dict() for product in products]}
)
@login_required(basic=True, username="admin")
def post(self):
"""
Creates a new product.
Only admin user authenticated using basic auth can post
Basic takes base64 encripted username:password.
# curl -XPOST localhost:5000/api/v1/product/ \
# -H "Authorization: Basic Y2h1Y2s6bm9ycmlz" \
# -H "Content-Type: application/json"
"""
return NotImplementedError(
"Someone please complete this example and send a PR :)"
)
class ProductItemResource(Resource):
def get(self, product_id):
product = Product.query.filter_by(id=product_id).first() or abort(404)
return jsonify(product.to_dict())
|
py | b41394358ccefefea2abec05380561d20ddb86c6 | import numpy as np
import tensorflow as tf
from rl.core import Episodes
from rl.core.model import Algorithm
class VanillaPolicyGradient(Algorithm):
def __init__(self, environment, random_seed, policy_factory, advantage_function,
Rollout, min_steps_per_batch):
self._environment = environment
self._random_seed = random_seed
self._policy_factory = policy_factory
self._advantage_function = advantage_function
self._Rollout = Rollout
self._min_steps_per_batch = min_steps_per_batch
self._graph = tf.Graph()
with self._graph.as_default():
# set random seed here, or somewhere
tf.set_random_seed(self._random_seed)
self._session = tf.Session(graph=self._graph)
self._policy = policy_factory.create_policy(
observation_space=environment.observation_space,
action_space=environment.action_space,
session=self._session)
self._session.run(tf.global_variables_initializer())
self._session.run(tf.local_variables_initializer())
self._policy_return = -np.inf
self._policy_steps = -np.inf
def __enter__(self):
self._session.__enter__()
self._advantage_function.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._advantage_function.__exit__(exc_type, exc_val, exc_tb)
self._session.__exit__(exc_type, exc_val, exc_tb)
def action(self, observation, deterministic):
return self._policy.action(observation, deterministic)
def update(self):
episodes = self._generate_episodes()
self._policy.update(
observations=self._get_batch_observations(episodes),
actions=self._get_batch_actions(episodes),
advantages=self._advantage_function.get_advantages(episodes))
self._advantage_function.update(episodes=episodes)
def _generate_episodes(self):
episodes = Episodes()
while episodes.num_steps() < self._min_steps_per_batch:
episode = self._Rollout(
environment=self._environment,
policy=self._policy,
random_seed=self._random_seed,
deterministic=False,
render=False)
episodes.append(episode)
return episodes
def _get_batch_observations(self, episodes):
return [observation
for episode in episodes
for observation in episode.get_observations() ]
def _get_batch_actions(self, episodes):
return [action
for episode in episodes
for action in episode.get_actions() ]
|
py | b413970d632cbd4cc3873ff71a5159063021f6d1 | from slab_nlp.topic_bert import BertTopicSLab
if __name__ == '__main__':
docs = open(r'C:\Document\中共嘉兴\text.txt', encoding='UTF-8').readlines()
model = BertTopicSLab('中共BERT', docs)
# model.hierarchical_model()
model.model()
model.hierarchical_model()
# model.hierarchical_compare(20, 30)
|
py | b4139975aafae71c3a1eb2f9db90287968dbd73d | s = 'abc-abcxyz'
print(s.removeprefix('abc-'))
# abcxyz
print(s.removeprefix('aabc-'))
# abc-abcxyz
print(s.lstrip('abc-'))
# xyz
def my_removeprefix(s, prefix):
if s.startswith(prefix):
return s[len(prefix):]
else:
return s
print(my_removeprefix(s, 'abc-'))
# abcxyz
s = 'abcxyz-xyz'
print(s.removesuffix('-xyz'))
# abcxyz
print(s.removesuffix('-xyzz'))
# abcxyz-xyz
def my_removesuffix(s, suffix):
return s[:-len(suffix)] if s.endswith(suffix) else s
print(my_removesuffix(s, '-xyz'))
# abcxyz
s = 'abc-abcxyz-xyz'
print(s.removeprefix('abc-').removesuffix('-xyz'))
# abcxyz
print(my_removeprefix(my_removesuffix(s, '-xyz'), 'abc-'))
# abcxyz
|
py | b4139b63305ffafb079ada224c6d4e2cd4ce6315 | # date: 2019年11月1日
# author: lw
# e-mail: [email protected]
# description: 本文件主要实现基于位置索引和双字索引的诗文查询函数,返回匹配的结果
from data_import_process import import_d_index # 引入双字索引导入函数
from data_import_process import import_p_index # 引入位置所引导入函数
# from data_import_process import import_poets
from data_import_process import import_author_title_index
from data_import_process import import_poets_info
import re
# 定义查询种类
query_type = {"title":1,"author":2,"paragraphs":3, "mix":4}
# 导入索引和诗文信息内容,进行系统查询内容的构建工作
author_index, title_index = import_author_title_index()
double_index = import_d_index()
position_index = import_p_index()
# count,poets = import_poets()
count,poets_info = import_poets_info()
# 双字查询函数,对于双字短语,通过直接查询双字索引返回结果
# param:sentence 短语文本
# return:返回文本编号列表
# date:2019.11.2
def dw_query(sentence):
if sentence in double_index.keys():
return double_index[sentence]
else:
return []
# 作者查询函数,通过直接查询作者索引表放回结果(诗文编号列表)
# param:sentence 短语文本
# return:返回文本编号列表
# date:2019.11.2
def author_query(sentence):
if sentence in author_index.keys():
return author_index[sentence]
else:
return []
# 处理非双字的短语或单字查询,标题的查询函数融入到此函数中
# param:Word_list 单字列表,这里面没有去除“,”和“。”
# param: pos_index 位置索引,可以是标题的位置索引也可以是诗文的位置索引
# return : 返回一个编号列表,如果没有找到,返回空的数据结构
# date:2019.11.2
def phrase_query(word_list,pos_index):
res_index = [] # 保存返回结果的编号列表
pre_dict = dict() # 保留前一个字的位置信息
gap = 1
for i in range(len(word_list)):
if word_list[i] in [',','。']: # 如果为逗号或者句号,不做处理
gap = gap+1
continue
elif i==0 :
pre_dict = dict(pos_index[word_list[0]]) # 获取第一个字的位置信息
res_index.extend(pre_dict.keys())
continue
cur_dict = dict(pos_index[word_list[i]]) # 获取该字对应的词项字典
res_index = [x for x in res_index if x in cur_dict.keys()]
temp_index = []
for item in res_index:
tt = [ i for i in pre_dict[item] if i+gap in cur_dict[item]]
if len(tt) != 0:
temp_index.append(item)
res_index = temp_index
pre_dict = cur_dict
gap =1
return [int(item) for item in res_index] # 由于位置索引的文档编号为字符,需要转为int类型的列表
# 支持文本域的与或非查询
def extend_query(query_sentence):
query_items = re.findall(r"\(|\)|[Aa][Nn][Dd]|[Oo][Rr]|[Nn][Oo][Tt]|[^a-zA-z\(\) ]*",
query_sentence) # 通过正则表达式进行分划区块
query_items_postfix = convert2postfixexpr(query_items) # 转为后缀形式的查询
result = []
print(query_items_postfix)
for item in query_items_postfix:
print(result)
if re.fullmatch(r"[Aa][Nn][Dd]", item): # 将result的最上面两个结果合并
second = result.pop()
first = result.pop()
print("and")
print([i for i in first if i in second])
result.append(list(set(first).intersection(set(second))))
elif re.fullmatch(r"[Oo][Rr]", item): # 或,合并后去重
second = result.pop()
first = result.pop()
print("or")
result.append(list(set(first).union(set(second))))
elif re.fullmatch(r"[Nn][Oo][Tt]", item): # not ,通过count求出对于全局的补集,不能直接求全局的补集,这样代价太大,需要优化一下
first = result.pop()
print("not")
result.append(list(set(range(count)).difference(set(first))))
else: # 对查询项进行查询操作,结果压入result
text_list = [i for i in item]
print(text_list)
if (',' not in text_list and '。' not in text_list \
and len(text_list) == 2): # 调用处理双字查询的函数
result.append(dw_query(item))
print(result)
else:
result.append(phrase_query(text_list, position_index)) # 调用基于位置查询的处理函数查询诗文正文
print(result)
if len(result) == 1:
return result.pop()
else:
return []
# 查询函数,传入参数为查询文本
# 函数实现功能为对输入的查询文本进行判断,如果是双字查询则使用双字索引
# 否则,使用位置索引实现基于短语的查询功能,具体的双字查询和短语查询均通过独立的函数实现
# 本函数中只进行调用和相关的处理
# 默认为混合查询模式,即自行进行判断查询获取结果,较慢,如果需要精准的快速查询选择相应类别的直接查询模块
# param: query_sentence 查询文本
# return: 结果文本的编号列表
# date:2019.11.2
def query(query_sentence,type):
# 处理查询文本,将查询文本分为一个单字的列表,短语查询忽略“,”和“。”
if query_type[type] == 1: # 标题查询
text_list = [i for i in query_sentence]
return phrase_query(text_list,title_index)
elif query_type[type] == 2: # 调用作者查询模块
return author_query(query_sentence)
elif query_type[type] == 3: # 诗文文本查询模块,实现与或非的查询功能
return extend_query(query_sentence)
elif query_type[type] == 4: # 混合查询模式,即不指定特定的查询域,进行所有结果的查询,找到了所有的结果进行合并返回
text_list = [i for i in query_sentence]
return []
# 返回查询结果,如果没有找到,则返回空列表
return []
# 将获取的与或非表达式item列表转为后缀形式,之后进行与或非查询
# not 优先级最高,and 次之 , or最低
# param:expr_list 为经过正则处理之后形成的列表
# 返回一个list(栈类型),其中保存一个后缀形式的查询表达式
# date:2019.11.6
def convert2postfixexpr(expr_list):
sym_stack = list() # 符号栈
item_stack = list() # 查询元素栈
queue_dict = {")":4,"not":3,"and":2,"or":1,"(":0} # 定义与或非的优先级
for item in expr_list :
if re.fullmatch(r"\(|\)|[Aa][Nn][Dd]|[Oo][Rr]|[Nn][Oo][Tt]",item): # 匹配运算符
while(len(sym_stack)!=0): # 栈不为空,不停弹出栈顶进行比较
top = sym_stack.pop() # stack top
if queue_dict[item.lower()] <= queue_dict[top.lower()]:
if top != ')': # 非),弹出栈顶即可
item_stack.append(top)
continue
else: # 如果栈顶为”)“,则将括号以内的符号全部弹出
while (len(sym_stack) != 0):
t_top = sym_stack.pop()
if t_top == '(':
break
item_stack.append(t_top)
else: # 优先级更高,入栈退出循环
sym_stack.append(top)
sym_stack.append(item)
break
if len(sym_stack) == 0: # empty , push stack
sym_stack.append(item)
elif item != "":
item_stack.append(item) # 为查询项则直接入栈
# 最后如果符号栈不为空,则将符号栈中所有非括号内容全部放入item_stack
while(len(sym_stack)!=0):
top = sym_stack.pop()
if top not in ["(",")"]:
item_stack.append(top)
# 返回后缀形式的查询列表
return item_stack
# 将诗文编号转换为响应的诗文列表,可以对此函数进行修改实现对于返回结果的控制
# param: index_list 诗文编号
# return : 返回一个诗文信息的列表
def convert_result(index_list):
if len(index_list)==0 :
return []
return [poets_info[int(i)] for i in index_list]
# 将诗文编号转换为响应的诗文列表,可以对此函数进行修改实现对于返回结果的控制
# 在查询得到的词汇上进行加粗处理,为前端显示做工作
# param: index_list 诗文编号
# return : 返回一个诗文信息的列表
def convert_result_for_html(index_list,query_sentence,type):
result = []
if len(index_list)==0 :
return result
else:
items = re.findall(r"[^a-zA-z\(\) ]*", query_sentence)
items = [i for i in items if i != ""]
for i in range(len(poets_info)):
if i in index_list:
temp = dict(poets_info[i])
for item in items: # 替换加粗显示
if str(temp[type]).find(item) != -1:
temp[type] = str(temp[type]).replace(item,"<b>"+item+"</b>")
result.append(temp)
return result
# # 测试调用
# index_list = query("李白","title")
# print("result_list",len(index_list),index_list)
# print(convert_result(index_list)) |
py | b4139c03c5e556021d6d2cc943d489d9fb7cdc78 | # Copyright 2021 (David) Siu-Kei Muk. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from afb.core.specs import param
from afb.utils import misc
def make_from_config(mfr):
sig = {
"config": param.ParameterSpec(
str,
description="Config file in YAML/JSON, containing a single "
'object specification for class "%s"' % mfr.cls.__name__,
required=True),
}
def from_config(config):
"""Constructs object from object specification in config file.
This function constructs the object from object specification contained
in a config file.
Current, only YAML and JSON is supported. The format is determined by the
file extension, where
- `.yaml`, `yml` -> YAML
- `.json` -> JSON
The config file must contain exactly one object specification. That is, it
must contain a singleton dictionary that maps a factory name to its
parameters.
"""
params = misc.load_config(config)
key, args = next(iter(params.items()))
return mfr.make(method=key, params=args)
return {"factory": from_config, "signature": sig}
|
py | b4139c1245718b1b871a5f289b29091157262718 | # coding: utf-8
"""
Control-M Services
Provides access to BMC Control-M Services # noqa: E501
OpenAPI spec version: 9.20.30
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from clients.ctm_saas_client.configuration import Configuration
class AssociateData(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'type': 'str',
'name': 'str'
}
attribute_map = {
'type': 'type',
'name': 'name'
}
def __init__(self, type=None, name=None, _configuration=None): # noqa: E501
"""AssociateData - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._type = None
self._name = None
self.discriminator = None
if type is not None:
self.type = type
if name is not None:
self.name = name
@property
def type(self):
"""Gets the type of this AssociateData. # noqa: E501
associate typed REQUIRED # noqa: E501
:return: The type of this AssociateData. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this AssociateData.
associate typed REQUIRED # noqa: E501
:param type: The type of this AssociateData. # noqa: E501
:type: str
"""
self._type = type
@property
def name(self):
"""Gets the name of this AssociateData. # noqa: E501
associate name REQUIRED # noqa: E501
:return: The name of this AssociateData. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this AssociateData.
associate name REQUIRED # noqa: E501
:param name: The name of this AssociateData. # noqa: E501
:type: str
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AssociateData, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AssociateData):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, AssociateData):
return True
return self.to_dict() != other.to_dict()
|
py | b4139c7b4440d2ccf118dcabf3bb11f9903f81f7 | """
This example displays the basic animations in sequence, at a five second interval.
For NeoPixel FeatherWing. Update pixel_pin and pixel_num to match your wiring if using
a different form of NeoPixels.
This example may not work on SAMD21 (M0) boards.
"""
import board
import neopixel
from adafruit_led_animation.animation.solid import Solid
from adafruit_led_animation.animation.colorcycle import ColorCycle
from adafruit_led_animation.animation.blink import Blink
from adafruit_led_animation.animation.comet import Comet
from adafruit_led_animation.animation.chase import Chase
from adafruit_led_animation.animation.pulse import Pulse
from adafruit_led_animation.sequence import AnimationSequence
from adafruit_led_animation.color import (
PURPLE,
WHITE,
AMBER,
JADE,
TEAL,
PINK,
MAGENTA,
ORANGE,
)
# Update to match the pin connected to your NeoPixels
pixel_pin = board.D6
# Update to match the number of NeoPixels you have connected
pixel_num = 32
pixels = neopixel.NeoPixel(pixel_pin, pixel_num, brightness=0.5, auto_write=False)
solid = Solid(pixels, color=PINK)
blink = Blink(pixels, speed=0.5, color=JADE)
colorcycle = ColorCycle(pixels, speed=0.4, colors=[MAGENTA, ORANGE, TEAL])
chase = Chase(pixels, speed=0.1, color=WHITE, size=3, spacing=6)
comet = Comet(pixels, speed=0.01, color=PURPLE, tail_length=10, bounce=True)
pulse = Pulse(pixels, speed=0.1, color=AMBER, period=3)
animations = AnimationSequence(
solid, blink, colorcycle, chase, comet, pulse, advance_interval=5, auto_clear=True,
)
while True:
animations.animate()
|
py | b4139cb6e5c26d62d0fd590614fe0d39b015b93e | import os
import _thread
from pracmln.mln.util import colorize
from pracmln.utils.latexmath2png import math2png
from pracmln.utils.visualization import DECLARATIONS
def __splitdict(d, dnew):
'''
'''
if not d:
yield dnew
return
key, values = d.popitem()
for v in values:
dnew_ = dict(dnew)
dnew_[key] = v
for d_ in __splitdict(dict(d), dnew_): yield d_
def splitd(d):
return __splitdict(d, {})
def partition(l, s):
'''
Partitions the list ``l`` into sublists of size ``s`` and returns a generator
iterating over them.
'''
for i in range(0, len(l), s): yield l[i:i+s]
def prac_heading(s, upper=True, color='green'):
'''
Returns a colorized and formatted string for pretty priting module
headings.
:param s: the string to be formatted
:param upper: (bool) if string should be converted to uppercase. default
is true
:param color: the color in which the heading should be printed. default
is green
:return: the colorized and formatted string
'''
b = colorize('+{}+'.format(''.ljust(len(s)+2, '=')), (None, color, True), True)
t = colorize('| {} |'.format(s.upper() if upper else s), (None, color, True), True)
return '\n{}\n{}\n{}\n'.format(b, t, b)
def synchronized(lock):
'''
Synchronization decorator.
'''
def wrap(f):
def func(*args, **kw):
lock.acquire()
try:
return f(*args, **kw)
finally:
lock.release()
return func
return wrap
def get_query_png(queries, dbs, filename='cond_prob', filedir='/tmp', skolemword=''):
'''
Preprocessing of png generation: assemble latex code for argmax term
:param queries: list or comma-separated string of query predicates
:param dbs: evidence database
:param filename: filename prefix of the generated file
:param filedir: location of temporary generated file
:param skolemword: string value for skolemword looked up in mongo database
:return: a png string generated by math2png
'''
safefilename = '{}-{}-{}'.format(filename, os.getpid(), _thread.get_ident())
declarations = DECLARATIONS + [r'''\newcommand{\simil}[1]{\ensuremath{sim\left(\begin{array}{cc}#1\end{array}\right)}}''']
if isinstance(queries, str):
queries = queries.split(',')
evidencelist = []
if isinstance(dbs, list):
for db in dbs:
evidencelist.extend([e for e in list(db.evidence.keys()) if db.evidence[e] == 1.0])
elif isinstance(dbs, str):
evidencelist = dbs.split(',')
else:
evidencelist.extend([e if dbs.evidence[e] == 1.0 else '!' + e for e in list(dbs.evidence.keys())])
# escape possibly occurring underscores in predicate names
query = r'''\\'''.join([r'''\text{{ {0} }} '''.format(q.replace('_', '\_')) for q in queries])
evidence = r'''\\'''.join([r'''\text{{ {0} }} '''.format(e.replace('_', '\_')) for e in evidencelist])
head = '\\prod_{{ {0} }}'.format(query)
underset = '_{{ \\tiny\\textit{{adt}} \in \\textit{{ADT-lib}} }}'
query = r'''\text{{ adt, }} '''
skolem = '\\text{{ {} }}: '.format(skolemword)
# generate actual equation
head = r'''{0}\argmax{1}'''.format(head, underset)
bracket_term = r'''\simil{{ \begin{{array}}{{c}}{0}\end{{array}} & {1}\begin{{array}}{{c}}{2}\end{{array}} }}'''.format(
query, skolem, evidence)
eq = r'''{} {}'''.format(head, bracket_term)
return math2png(eq, filedir, declarations=declarations, filename=safefilename, size=10)
#===============================================================================
# main function for testing only!
#===============================================================================
if __name__ == '__main__':
# print list(partition(range(2), 3))
print(splitd({1:[2,3,4], 2:[5,7], 3:[8]}))
|
py | b4139d4727f5dc0eac224a53d6a0519d26c06d41 | #!/usr/bin/env python3
import csv
import numpy
def placefield_func(X, Y, centerX, centerY, max_rate, width):
return max_rate*numpy.exp(-0.5*(X-centerX)**2/width**2-0.5*(Y-centerY)**2/width**2)
def shift2D(mat, shift0, shift1):
ret=numpy.roll(numpy.roll(mat, shift0, axis=0), shift1, axis=1) #torus
#non-torus
if shift0==1:
ret[0,:]=0.0
elif shift0==-1:
ret[-1,:]=0.0
if shift1==1:
ret[:,0]=0.0
elif shift1==-1:
ret[:,-1]=0.0
return ret
slope=1.0
threshold=2.0/1000.0
def ReLU(x):
return numpy.maximum(slope*(x-threshold), 0.0)
#parameters
time_pitch=1.0 #ms
save_pitch=10
simlen_sec=150.0
simlen=int(simlen_sec*1000.0/time_pitch)
xlen=50
ylen=50
tauI=10.0 #ms
eta=1.0
taudeltaW=30.0*1000.0 #ms
tauSTD=300.0 #ms
tauSTF=200.0 #ms
coefSTD=0.4
winh=5e-4
Wmin=0.0
Wsum=1.0
Wsuminit=0.5
synapse_arr=[[1,1],[1,0],[1,-1],[0,1],[0,-1],[-1,1],[-1,0],[-1,-1]]
synapse_num=len(synapse_arr)
W=numpy.zeros([synapse_num, xlen, ylen])
deltaW=numpy.zeros([synapse_num, xlen, ylen])
for i in range(xlen):
for j in range(ylen):
temp=numpy.random.rand(synapse_num)
W[:,i,j]=Wsuminit*temp/numpy.sum(temp)
I=numpy.zeros([xlen,ylen])
STD=numpy.ones_like(I)
STF=coefSTD*numpy.ones_like(I)
Iinh=0.0
#save
f_rate=open("rate.csv", "w")
csv_rate=csv.writer(f_rate, delimiter=",")
f_pos=open("pos.csv", "w")
csv_pos=csv.writer(f_pos, delimiter=",")
f_rate1D=open("rate1D.csv", "w")
csv_rate1D=csv.writer(f_rate1D, delimiter=",")
f_pos1D=open("pos1D.csv", "w")
csv_pos1D=csv.writer(f_pos1D, delimiter=",")
numpy.save("W_init.npy", W)
#place cell
noise_amp=0.05/1000.0
theta_amp=5.0/1000.0
PFrate_move=5.0/1000.0
PFrate_rest=1.0/1000.0
PFfreq_nodopa=0.1/1000.0
PFfreq_dopa=1.0
PFlen=int(200.0/time_pitch)
PFwidth=2.0
centerX=numpy.zeros([xlen, ylen])
centerY=numpy.zeros([xlen, ylen])
for i in range(xlen):
for j in range(ylen):
centerX[i,j]=float(i)
centerY[i,j]=float(j)
posA=[0.5*float(xlen), 0.3*float(ylen)]
posB=[0.5*float(xlen), 0.7*float(ylen)]
posC1=[0.9*float(xlen), 0.7*float(ylen)]
posD1=[0.9*float(xlen), 0.3*float(ylen)]
posC2=[0.1*float(xlen), 0.7*float(ylen)]
posD2=[0.1*float(xlen), 0.3*float(ylen)]
time1=2000.0
time2=4000.0
time3=6000.0
time4=8000.0
set_len=15000.0
direction=-1
moving=0
dopamine=0
pos_phase=0.0
Iext_sw=0
for t in range(simlen):
time_ms=float(t)*time_pitch
if time_ms%1000==0:
print(time_ms/1000,"sec")
if time_ms%set_len<time_pitch:
direction=-direction
if direction==1:
posC=posC1
posD=posD1
else:
posC=posC2
posD=posD2
#for prospective sequence
if time_ms%set_len>=1000.0 and time_ms%set_len<1000.0+time_pitch:
Iext_sw=PFlen
#position
time_set=time_ms%set_len
if time_set<=time1:
pos_phase=0.0
x=posA[0]
y=posA[1]
moving=0
dopamine=0
elif time_set<=time2:
pos_phase=(time_set-time1)/(time2-time1)
x=posA[0]+(posB[0]-posA[0])*pos_phase
y=posA[1]+(posB[1]-posA[1])*pos_phase
moving=1
dopamine=0
elif time_set<=time3:
pos_phase=(time_set-time2)/(time3-time2)
x=posB[0]+(posC[0]-posB[0])*pos_phase
y=posB[1]+(posC[1]-posB[1])*pos_phase
pos_phase=pos_phase+1.0+(1.0-direction)
moving=1
dopamine=0
elif time_set<=time4:
pos_phase=(time_set-time3)/(time4-time3)
x=posC[0]+(posD[0]-posC[0])*pos_phase
y=posC[1]+(posD[1]-posC[1])*pos_phase
pos_phase=pos_phase+2.0+(1.0-direction)
moving=1
dopamine=0
elif time_set<=set_len:
x=posD[0]
y=posD[1]
pos_phase=3.0+(1.0-direction)
moving=0
dopamine=0#(1-direction)/2 #reward OFF
#external inputs
if moving:
Itheta=theta_amp*0.5*(numpy.sin(2.0*numpy.pi*time_ms/1000.0*7.0)+1.0)
Iext=placefield_func(x, y, centerX, centerY, PFrate_move, PFwidth)
else:
Itheta=0.0
if dopamine:
if numpy.random.rand()<PFfreq_dopa:
Iext_sw=PFlen
else:
if numpy.random.rand()<PFfreq_nodopa:
Iext_sw=PFlen
if Iext_sw>0:
Iext=placefield_func(x, y, centerX, centerY, PFrate_rest, PFwidth)
Iext_sw=Iext_sw-1
else:
Iext=0.0
#recurrent transmission
rate=ReLU(I)
rate_out=rate*STD*STF
Isyn=numpy.zeros_like(rate)
for i in range(synapse_num):
Isyn=Isyn+W[i,:,:]*shift2D(rate_out,synapse_arr[i][0],synapse_arr[i][1])
#dynamics
I=I+time_pitch*(-I/tauI+Isyn-Iinh+Iext-Itheta)+noise_amp*numpy.sqrt(time_pitch)*numpy.random.randn(xlen,ylen)
Iinh=Iinh+time_pitch*(-Iinh/tauI+winh*numpy.sum(rate_out))
STD=STD+time_pitch*((1.0-STD)/tauSTD-rate_out)
STF=STF+time_pitch*((coefSTD-STF)/tauSTF+coefSTD*rate*(1.0-STF))
#Hebb
rate=ReLU(I)
W=W+time_pitch*deltaW
for i in range(synapse_num):
deltaW[i,:,:]=deltaW[i,:,:]+time_pitch*(-deltaW[i,:,:]+eta*rate*shift2D(rate_out,synapse_arr[i][0],synapse_arr[i][1]))/taudeltaW #Hebb
#normalization
W[W<Wmin]=Wmin
temp=numpy.repeat(numpy.sum(W, axis=0, keepdims=True), synapse_num, axis=0)
W=numpy.select([temp<=Wsum, temp>Wsum], [W, Wsum*W/temp])
#save results
if t%save_pitch==0:
temp=numpy.hstack([time_ms/1000.0, rate.reshape(rate.size)])
csv_rate.writerow(temp); f_rate.flush();
csv_pos.writerow(numpy.array([time_ms/1000.0,x,y])); f_pos.flush();
#1-dim position
temp=numpy.hstack([time_ms/1000.0, \
rate[int(posA[0]), int(posA[1]):int(posB[1])], \
rate[int(posB[0]):int(posC1[0]), int(posB[1])], \
rate[int(posC1[0]), int(posC1[1]):int(posD1[1]):-1], \
rate[int(posB[0]):int(posC2[0]):-1, int(posB[1])], \
rate[int(posC2[0]), int(posC2[1]):int(posD2[1]):-1] \
])
csv_rate1D.writerow(temp); f_rate1D.flush();
csv_pos1D.writerow(numpy.array([time_ms/1000.0,pos_phase])); f_pos1D.flush()
if t!=0 and (time_ms/1000.0)%int(set_len/1000)==0:
temp=int(time_ms/1000.0)
numpy.save("W_"+str(temp)+"s.npy", W)
numpy.save("W_end.npy", W)
|
py | b4139d661e9bb6cd9c645cb30ef6b9222d25e113 | import os
import yaml
class Config(object):
"""Script configuration file parser.
Attributes
----------
dataset: str
Name of the dataset to train on (i.e., 'omniglot').
num_epochs: int
Number of training epochs.
num_episodes: int
Number of episodes per epoch.
num_ways_train: int
Number of random classes per episode for training.
num_support_train: int
Number of samples per class to use as support for training.
num_query_train: int
Number of samples per class to use as query for training.
num_ways_val: int
Number of random classes per episode for validation.
num_support_val: int
Number of samples per class to use as support for validation.
num_query_val: int
Number of samples per class to use as query for validation.
seed: int
Random seed.
"""
def __init__(self, config_yaml: str) -> None:
if not os.path.exists(config_yaml):
raise ValueError(
f"The config file at {config_yaml} is missing.")
config = yaml.load(open(config_yaml, "r"))
self.dataset = config["dataset"]
self.num_epochs = config.get("num_epochs", 100)
self.num_episodes = config.get("num_episodes", 100)
self.num_ways_train = config.get("num_ways_train", 60)
self.num_support_train = config.get("num_support_train", 5)
self.num_query_train = config.get("num_query_train", 5)
self.num_ways_val = config.get("num_ways_val", 5)
self.num_support_val = config.get("num_support_val", 5)
self.num_query_val = config.get("num_query_val", 15)
self.seed = config.get("seed", 0)
|
py | b4139e2c8b15a2aa620ffe3b2b4014f4b60a7f32 | import torch
import torch.nn as nn
from torch.autograd import Variable
import random
import torch.nn.functional as F
from torch.nn.utils import weight_norm
import pdb
from torch.nn import functional as F
from torch.distributions.normal import Normal
import math
import numpy as np
import yaml
'''CNN model'''
class CNN(nn.Module):
def __init__(self, input_dim, output_dim, hidden_size=(1024, 512), activation='relu', discrim=False, dropout=-1):
super(CNN, self).__init__()
dims = []
dims.append(input_dim)
dims.extend(hidden_size)
dims.append(output_dim)
kernels = [3] * (len(dims)-1)
self.layers = nn.ModuleList()
for i in range(len(dims)-1):
self.layers.append(nn.Conv2d(dims[i], dims[i+1], kernels[i]))
if activation == 'relu':
self.activation = nn.ReLU()
elif activation == 'sigmoid':
self.activation = nn.Sigmoid()
self.sigmoid = nn.Sigmoid() if discrim else None
self.dropout = dropout
x_dummy = torch.ones(input_size).unsqueeze(0) * torch.tensor(float('nan'))
for i in range(len(self.layers)):
x_dummy = self.layers[i](x_dummy)
if i != len(self.layers)-1:
x_dummy = self.activation(x_dummy)
if self.dropout != -1:
x_dummy = nn.Dropout(min(0.1, self.dropout/3) if i == 1 else self.dropout)(x_dummy)
elif self.sigmoid:
x_dummy = self.sigmoid(x_dummy)
self.fc = nn.Linear(x_dummy.numel(), dims[i+1])
def forward(self, x):
for i in range(len(self.layers)):
x = self.layers[i](x)
if i != len(self.layers)-1:
x = self.activation(x)
if self.dropout != -1:
x = nn.Dropout(min(0.1, self.dropout/3) if i == 1 else self.dropout)(x)
elif self.sigmoid:
x = self.sigmoid(x)
x = self.fc(x)
return x
'''MLP model'''
class MLP(nn.Module):
def __init__(self, input_dim, output_dim, hidden_size=(1024, 512), activation='relu', discrim=False, dropout=-1):
super(MLP, self).__init__()
dims = []
dims.append(input_dim)
dims.extend(hidden_size)
dims.append(output_dim)
self.layers = nn.ModuleList()
for i in range(len(dims)-1):
self.layers.append(nn.Linear(dims[i], dims[i+1]))
if activation == 'relu':
self.activation = nn.ReLU()
elif activation == 'sigmoid':
self.activation = nn.Sigmoid()
self.sigmoid = nn.Sigmoid() if discrim else None
self.dropout = dropout
def forward(self, x):
for i in range(len(self.layers)):
x = self.layers[i](x)
if i != len(self.layers)-1:
x = self.activation(x)
if self.dropout != -1:
x = nn.Dropout(min(0.1, self.dropout/3) if i == 1 else self.dropout)(x)
elif self.sigmoid:
x = self.sigmoid(x)
return x
class PECNet(nn.Module):
def __init__(self, enc_past_size, enc_dest_size, enc_latent_size, dec_size, predictor_size, non_local_theta_size, non_local_phi_size, non_local_g_size, fdim, zdim, nonlocal_pools, non_local_dim, sigma, past_length, future_length, verbose):
'''
Args:
size parameters: Dimension sizes
nonlocal_pools: Number of nonlocal pooling operations to be performed
sigma: Standard deviation used for sampling N(0, sigma)
past_length: Length of past history (number of timesteps)
future_length: Length of future trajectory to be predicted
'''
super(PECNet, self).__init__()
self.zdim = zdim
self.nonlocal_pools = nonlocal_pools
self.sigma = sigma
# takes in the past
self.encoder_past = MLP(input_dim = past_length*2, output_dim = fdim, hidden_size=enc_past_size)
self.encoder_dest = MLP(input_dim = 2, output_dim = fdim, hidden_size=enc_dest_size)
self.encoder_latent = MLP(input_dim = 2*fdim, output_dim = 2*zdim, hidden_size=enc_latent_size)
self.decoder = MLP(input_dim = fdim + zdim, output_dim = 2, hidden_size=dec_size)
self.non_local_theta = MLP(input_dim = 2*fdim + 2, output_dim = non_local_dim, hidden_size=non_local_theta_size)
self.non_local_phi = MLP(input_dim = 2*fdim + 2, output_dim = non_local_dim, hidden_size=non_local_phi_size)
self.non_local_g = MLP(input_dim = 2*fdim + 2, output_dim = 2*fdim + 2, hidden_size=non_local_g_size)
self.predictor = MLP(input_dim = 2*fdim + 2, output_dim = 2*(future_length-1), hidden_size=predictor_size)
architecture = lambda net: [l.in_features for l in net.layers] + [net.layers[-1].out_features]
if verbose:
print("Past Encoder architecture : {}".format(architecture(self.encoder_past)))
print("Dest Encoder architecture : {}".format(architecture(self.encoder_dest)))
print("Latent Encoder architecture : {}".format(architecture(self.encoder_latent)))
print("Decoder architecture : {}".format(architecture(self.decoder)))
print("Predictor architecture : {}".format(architecture(self.predictor)))
print("Non Local Theta architecture : {}".format(architecture(self.non_local_theta)))
print("Non Local Phi architecture : {}".format(architecture(self.non_local_phi)))
print("Non Local g architecture : {}".format(architecture(self.non_local_g)))
def non_local_social_pooling(self, feat, mask):
# N,C
theta_x = self.non_local_theta(feat)
# C,N
phi_x = self.non_local_phi(feat).transpose(1,0)
# f_ij = (theta_i)^T(phi_j), (N,N)
f = torch.matmul(theta_x, phi_x)
# f_weights_i = exp(f_ij)/(\sum_{j=1}^N exp(f_ij))
f_weights = F.softmax(f, dim = -1)
# setting weights of non neighbours to zero
f_weights = f_weights * mask
# rescaling row weights to 1
f_weights = F.normalize(f_weights, p=1, dim=1)
# ith row of all_pooled_f = \sum_{j=1}^N f_weights_i_j * g_row_j
pooled_f = torch.matmul(f_weights, self.non_local_g(feat))
return pooled_f + feat
def forward(self, x, initial_pos, dest = None, mask = None, device=torch.device('cpu')):
# provide destination iff training
# assert model.training
assert self.training ^ (dest is None)
assert self.training ^ (mask is None)
# encode
ftraj = self.encoder_past(x)
if not self.training:
z = torch.Tensor(x.size(0), self.zdim)
z.normal_(0, self.sigma)
else:
# during training, use the destination to produce generated_dest and use it again to predict final future points
# CVAE code
dest_features = self.encoder_dest(dest)
features = torch.cat((ftraj, dest_features), dim = 1)
latent = self.encoder_latent(features)
mu = latent[:, 0:self.zdim] # 2-d array
logvar = latent[:, self.zdim:] # 2-d array
var = logvar.mul(0.5).exp_()
eps = torch.DoubleTensor(var.size()).normal_()
eps = eps.to(device)
z = eps.mul(var).add_(mu)
z = z.double().to(device)
decoder_input = torch.cat((ftraj, z), dim = 1)
generated_dest = self.decoder(decoder_input)
if self.training:
# prediction in training, no best selection
generated_dest_features = self.encoder_dest(generated_dest)
prediction_features = torch.cat((ftraj, generated_dest_features, initial_pos), dim = 1)
for i in range(self.nonlocal_pools):
# non local social pooling
prediction_features = self.non_local_social_pooling(prediction_features, mask)
pred_future = self.predictor(prediction_features)
return generated_dest, mu, logvar, pred_future
return generated_dest
# separated for forward to let choose the best destination
def predict(self, past, generated_dest, mask, initial_pos):
ftraj = self.encoder_past(past)
generated_dest_features = self.encoder_dest(generated_dest)
prediction_features = torch.cat((ftraj, generated_dest_features, initial_pos), dim = 1)
for i in range(self.nonlocal_pools):
# non local social pooling
prediction_features = self.non_local_social_pooling(prediction_features, mask)
interpolated_future = self.predictor(prediction_features)
return interpolated_future
if __name__ == "__main__":
pass |
py | b4139f006b6778024ca7d435f253c4b85a8a4044 | __author__ = 'Antony Cherepanov'
from inversionscount import InversionsCounter
from closestpoints import ClosestPoints
|
py | b4139f0077d44aa2a35cb498fab92d1eb92f910a | def isPhoneNumber(text):
if len(text) != 12:
return False
for i in range(0,3):
if not text[i].isdecimal():
return False
if text[3] != '-':
return False
for i in range(4,7):
if not text[i].isdecimal():
return False
if text[7] != '-':
return False
for i in range(8, 12):
if not text[i].isdecimal():
return False
return True
# print('415-555-4242 is a phone number:')
# print(isPhoneNumber('415-555-4242'))
# print('Moshi moshi is a phone number:')
# print(isPhoneNumber('Moshi moshi'))
message = 'Call me at 415-555-1011 tomorrow. 415-555-9999 is my office.'
for i in range(len(message)):
chunk = message[i:i+12]
if isPhoneNumber(chunk):
print('Phone number found: ' + chunk)
print('Done')
|
py | b413a05d029716b1008fd51a15f9eba7cd73c8f0 | import os
import redis
import json
import traceback
import sys
from lambdarest import create_lambda_handler
from timestamp import get_reverse_timestamp
from leaderboard_exceptions import UserNotFoundException, InvalidRequestException, AccessDeniedException
from leaderboard_scripts import lua_script_get_around, lua_script_get_my_rank, lua_script_put_score, lua_script_delete_score
ADMIN_SECRET_TOKEN = os.environ.get('ADMIN_SECRET_TOKEN')
DEFAULT_FETCH_COUNT = int(os.environ.get('DEFAULT_FETCH_COUNT'))
MAX_FETCH_COUNT = int(os.environ.get('MAX_FETCH_COUNT'))
redis_client = redis.StrictRedis(
host=os.environ.get('REDIS_HOST'),
port=os.environ.get('REDIS_PORT'),
charset="utf-8",
decode_responses=True)
lambda_handler = create_lambda_handler(error_handler=None)
def leaderboard_str(service_id: str, leader_board_id: str):
return f'{service_id}:leaderboard:{leader_board_id}'
def leaderboard_timestamp_str(service_id: str, leader_board_id: str):
return f'{service_id}:leaderboard:{leader_board_id}:timestamp'
def user_properties_key_str(service_id: str, user_id: str):
return f'{service_id}:user:{user_id}:properties'
@lambda_handler.handle("get", path="/<string:service_id>/leaderboards/<string:leader_board_id>")
def get_leaderboard_status(event, service_id, leader_board_id):
cardinality = redis_client.hlen(leaderboard_timestamp_str(service_id, leader_board_id))
return {"cardinality": cardinality}
@lambda_handler.handle("get", path="/<string:service_id>/leaderboards/<string:leader_board_id>/<string:user_id>")
def get_user_score(event, service_id, leader_board_id, user_id):
data = redis_client.eval(lua_script_get_my_rank, 2,
leaderboard_str(service_id, leader_board_id),
leaderboard_timestamp_str(service_id, leader_board_id),
user_id)
if data is None:
raise UserNotFoundException("user not found")
query_param_dict = event.get("json", {}).get("query", {})
include_properties = query_param_dict.get("properties", False)
response = {"userId": data[1], "rank": data[0], "score": data[2]}
if include_properties:
properties = redis_client.get(user_properties_key_str(service_id, user_id))
if properties is not None:
response["properties"] = json.loads(properties)
return response
@lambda_handler.handle("delete", path="/<string:service_id>/leaderboards/<string:leader_board_id>/<string:user_id>")
def delete_user_score(event, service_id, leader_board_id, user_id):
redis_client.eval(lua_script_delete_score, 2,
leaderboard_str(service_id, leader_board_id),
leaderboard_timestamp_str(service_id, leader_board_id),
user_id)
return
# pick top rank of leader board
@lambda_handler.handle("get", path="/<string:service_id>/leaderboards/<string:leader_board_id>/top")
def get_top_rank_scores(event, service_id, leader_board_id):
query_param_dict = event.get("json", {}).get("query", {})
# if exlicit limit query parameter not exists then apply fetch default count
limit = query_param_dict.get("limit", DEFAULT_FETCH_COUNT)
offset = query_param_dict.get("offset", 0)
if limit <= 0:
raise ValueError("limit parameter must be positive value.")
if offset <= 0:
raise ValueError("limit parameter must be positive value.")
limit = min(limit, MAX_FETCH_COUNT)
rank_data = redis_client.zrevrange(leaderboard_str(service_id, leader_board_id), offset, offset+limit-1, withscores=True)
# rank_data = redis_client.zrevrangebyscore(
# leader_board_id, "+inf", "-inf", withscores=True, start=0, num=limit)
response = []
for idx, data in enumerate(rank_data, start=offset+1):
response.append({"userId": data[0].split(':')[1], "rank": idx, "score": data[1]})
include_properties = query_param_dict.get("properties", False)
if include_properties:
properties = redis_client.mget(list(map(lambda x: user_properties_key_str(service_id, x['userId']), response)))
for i, prop in enumerate(properties, start=0):
if prop is not None:
response[i]["properties"] = json.loads(prop)
return response
@lambda_handler.handle("get", path="/<string:service_id>/leaderboards/<string:leader_board_id>/<string:user_id>/around")
def get_around_rank_scores(event, service_id, leader_board_id, user_id):
query_param_dict = event.get("json", {}).get("query", {})
# if exlicit limit query parameter not exists then apply fetch default count
limit = query_param_dict.get("limit", 1)
if limit <= 0:
raise ValueError("limit parameter must be positive value.")
# limit count for prevent huge fetch
limit = min(limit, MAX_FETCH_COUNT)
# rank_data = redis_client.zrevrangebyscore(
# leader_board_id, "+inf", "-inf", withscores=True, start=0, num=limit)
response = []
rank_data = redis_client.eval(lua_script_get_around, 2,
leaderboard_str(service_id, leader_board_id), leaderboard_timestamp_str(service_id, leader_board_id),
user_id, limit)
if rank_data is None:
return []
for data in [rank_data[i:(i+3)] for i in range(0, len(rank_data), 3)]:
response.append({"userId": data[1].split(":")[1], "rank": data[0], "score": data[2]})
include_properties = query_param_dict.get("properties", False)
if include_properties:
properties = redis_client.mget(list(map(lambda x: user_properties_key_str(service_id, x['userId']), response)))
for i, prop in enumerate(properties, start=0):
if prop is not None:
response[i]["properties"] = json.loads(prop)
return response
@lambda_handler.handle("put", path="/<string:service_id>/leaderboards/<string:leader_board_id>/<string:user_id>")
def put_score(event, service_id, leader_board_id, user_id):
if event["body"] is None:
raise InvalidRequestException("request parameter invalid")
body = json.loads(event["body"])
if "score" not in body:
raise InvalidRequestException(
"'score' parameter not exists in request body")
if body["score"] == 0:
return
if body["score"] < 0:
raise ValueError("score parameter must be positive value.")
prev_score = redis_client.eval(lua_script_put_score, 2,
leaderboard_str(service_id, leader_board_id),
leaderboard_timestamp_str(service_id, leader_board_id),
user_id, body["score"], get_reverse_timestamp())
return {"prevScore": prev_score}
# lboard_user_id = redis_client.hget(leaderboard_timestamp_str(
# service_id, leader_board_id), user_id)
# # exist prev value
# if lboard_user_id is not None:
# prev_score = redis_client.zscore(leaderboard_str(
# service_id, leader_board_id), lboard_user_id)
# if prev_score is not None and body["score"] <= prev_score:
# return
# update_timestamp = get_now_timestamp()
# redis_client.hset(leaderboard_timestamp_str(
# service_id, leader_board_id), user_id, update_timestamp)
# stamped_user_id = timestamp_user_id(user_id, update_timestamp)
# redis_client.zadd(leaderboard_str(
# service_id, leader_board_id), ch=True, mapping={stamped_user_id: body["score"]})
# return
# @lambda_handler.handle("post", path="/<string:service_id>/<string:leader_board_id>/<string:user_id>")
# def incr_score(event, service_id, leader_board_id, user_id):
# if event["body"] is None:
# raise InvalidRequestException("request parameter invalid")
# body = json.loads(event["body"])
# if "delta" not in body:
# raise InvalidRequestException(
# "'delta' parameter not exists in request body")
# redis_client.zincrby(leaderboard_str(
# service_id, leader_board_id), body["delta"], user_id)
# return
@lambda_handler.handle("put", path="/<string:service_id>/users/<string:user_id>")
def put_user_property(event, service_id, user_id):
body = json.loads(event["body"])
if "properties" in body:
redis_client.set(user_properties_key_str(service_id, user_id), json.dumps(body["properties"]))
return
@lambda_handler.handle("delete", path="/<string:service_id>/leaderboards/<string:leader_board_id>")
def delete_leader_board(event, service_id, leader_board_id):
auth_token = event.get("headers", {}).get("X-Auth", "")
if auth_token != ADMIN_SECRET_TOKEN:
raise AccessDeniedException("Invalid authentication")
redis_client.delete(leaderboard_str(service_id, leader_board_id), leaderboard_timestamp_str(service_id, leader_board_id))
return
# for debug purpose
# @lambda_handler.handle("get")
# def get_default_handle(event):
# return event
@lambda_handler.handle("get", path="/<string:service_id>/leaderboards")
def delete_leader_board(event, service_id):
auth_token = event.get("headers", {}).get("X-Auth", "")
query_param_dict = event.get("json", {}).get("query", {})
# if exlicit limit query parameter not exists then apply fetch default count
limit = query_param_dict.get("limit", DEFAULT_FETCH_COUNT)
if limit < 0:
raise ValueError("limit parameter must be positive value.")
if auth_token != ADMIN_SECRET_TOKEN:
raise AccessDeniedException("Invalid authentication")
scan_responses = redis_client.scan(count=limit, match=f'{service_id}:leaderboard:*:timestamp')
response = [k.split(":")[2] for k in scan_responses[1:][0]]
return response
def handler(event, context):
try:
return lambda_handler(event=event)
except (ValueError, InvalidRequestException) as verror:
return {
"statusCode": "400",
"body": json.dumps({
"message": str(verror)
})
}
except AccessDeniedException as perror:
return {
"statusCode": "403",
"body": json.dumps({
"message": str(perror)
})
}
except UserNotFoundException as kerror:
return {
"statusCode": "404",
"body": json.dumps({
"message": str(kerror)
})
}
except Exception as ex:
traceback.print_exc()
return {
"statusCode": "500",
"body": json.dumps({
"message": str(ex)
})
}
# rank_data = redis_client.zrevrangebyscore(
# LeaderBoardId, "+inf", "-inf", withscores=True, start=0, num=50)
# return {
# # 'path': event['path'],
# # 'method': event['httpMethod'],
# 'statusCode': '200',
# 'headers': {},
# 'body': json.dumps(event),
# 'isBase64Encoded': False
# }
|
py | b413a0b9f26506b7a5a05d657e96f43d89f14741 | from setuptools import setup, find_packages
def readme():
with open('README.md') as f:
return f.read()
print(find_packages())
setup(
name='SCQUA',
version='0.0.2',
description='Single-Cell sequencing QUAlity assessment',
long_description=readme(),
packages=find_packages(),
install_requires=['numpy', 'pandas', 'scanpy'],
author='Chichau Miau',
author_email='[email protected]',
license='MIT'
)
|
py | b413a0f86038fb2e366b0799f6b307d9263a5bbe | from .add_edge_no_circular import AddEdgeNoCircular
from .add_vertex import AddVertex
from .delete_edge import DeleteEdge
from .detach_vertex_named import DetachVertexNamed
from .set_payload import SetPayload
from .tag import Tag
class Log:
"""
A log for dependency graph actions.
"""
def __init__(self):
self._current_action = None
self._first_action = None
def tag(self, graph, tag):
"""
Tags the current state of the dependency as the given tag.
"""
return self._push_action(graph, Tag(tag))
def add_vertex(self, graph, name, payload, root):
return self._push_action(graph, AddVertex(name, payload, root))
def detach_vertex_named(self, graph, name):
return self._push_action(graph, DetachVertexNamed(name))
def add_edge_no_circular(self, graph, origin, destination, requirement):
action = AddEdgeNoCircular(origin, destination, requirement)
return self._push_action(graph, action)
def delete_edge(self, graph, origin, destination, requirement):
action = DeleteEdge(origin, destination, requirement)
return self._push_action(graph, action)
def set_payload(self, graph, name, payload):
return self._push_action(graph, SetPayload(name, payload))
def pop(self, graph):
action = self._current_action
if not action:
return
self._current_action = action.previous
if not self._current_action:
self._first_action = None
action.down(graph)
return action
def rewind_to(self, graph, tag):
while True:
action = self.pop(graph)
if not action:
raise ValueError('No tag "{}" found'.format(tag))
if isinstance(action, Tag) and action.tag == tag:
break
def _push_action(self, graph, action):
"""
Adds the given action to the log, running the action
:param graph: The graph
:param action: The action
:type action: Action
"""
action.previous = self._current_action
if self._current_action:
self._current_action.next = action
self._current_action = action
if not self._first_action:
self._first_action = action
return action.up(graph)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.