max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
nncf/torch/graph/transformations/layout.py | xiao1228/nncf | 310 | 11131439 | from nncf.common.graph.transformations.layout import TransformationLayout
class PTTransformationLayout(TransformationLayout):
pass
|
SoftLayer/CLI/ticket/subjects.py | dvzrv/softlayer-python | 126 | 11131454 | """List Subject IDs for ticket creation."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
@click.command()
@environment.pass_env
def cli(env):
"""List Subject IDs for ticket creation."""
ticket_mgr = SoftLayer.TicketManager(env.client)
table = formatting.Table(['id', 'subject'])
for subject in ticket_mgr.list_subjects():
table.add_row([subject['id'], subject['name']])
env.fout(table)
|
util/chplenv/chpl_arch.py | jhh67/chapel | 1,602 | 11131455 | <reponame>jhh67/chapel
#!/usr/bin/env python3
import optparse
import sys
import chpl_cpu, overrides
from utils import error, memoize, warning
@memoize
def get(flag='host'):
if flag == 'host':
arch_val = overrides.get('CHPL_HOST_ARCH', '')
elif flag == 'target':
arch_val = overrides.get('CHPL_TARGET_ARCH', '')
else:
error("Invalid flag: '{0}'".format(flag), ValueError)
if arch_val:
return arch_val
# compute the default
return chpl_cpu.get_default_machine(flag)
def validate(flag='host'):
pass
def _main():
parser = optparse.OptionParser(usage="usage: %prog [--host|target]")
parser.add_option('--target', dest='flag', action='store_const',
const='target', default='target')
parser.add_option('--host', dest='flag', action='store_const',
const='host')
(options, args) = parser.parse_args()
arch = get(options.flag)
sys.stdout.write("{0}\n".format(arch))
if __name__ == '__main__':
_main()
|
test/files/generate.py | yssource/xtensor | 1,592 | 11131478 | #!/usr/bin/env python3
import glob
fs = glob.glob("xio_expected_results/*.txt")
include_file = "#include <string>\n\n"
for f in fs:
with open(f) as ff:
ctn = ff.read()
n = f.split("/")[1]
include_file += "static std::string {} = R\"xio({})xio\";\n\n\n".format(n[:-4], ctn)
with open("xio_expected_results.hpp", "w+") as fo:
fo.write(include_file) |
tests/test_init.py | michaeljoseph/changes | 135 | 11131485 | import os
import textwrap
from pathlib import Path
import pytest
import responses
import changes
from .conftest import AUTH_TOKEN_ENVVAR, BUG_LABEL_JSON, LABEL_URL
@pytest.fixture
def answer_prompts(mocker):
mocker.patch('changes.config.click.launch', autospec=True)
prompt = mocker.patch('changes.config.click.prompt', autospec=True)
prompt.side_effect = ['foo', 'docs/releases', 'version.txt', '.']
prompt = mocker.patch('changes.config.prompt.choose_labels', autospec=True)
prompt.return_value = ['bug']
saved_token = None
if os.environ.get(AUTH_TOKEN_ENVVAR):
saved_token = os.environ[AUTH_TOKEN_ENVVAR]
del os.environ[AUTH_TOKEN_ENVVAR]
yield
if saved_token:
os.environ[AUTH_TOKEN_ENVVAR] = saved_token
@responses.activate
def test_init_prompts_for_auth_token_and_writes_tool_config(
capsys, git_repo, changes_config_in_tmpdir, answer_prompts
):
responses.add(
responses.GET,
LABEL_URL,
json=BUG_LABEL_JSON,
status=200,
content_type='application/json',
)
changes.initialise()
assert changes_config_in_tmpdir.exists()
expected_config = textwrap.dedent(
"""\
[changes]
auth_token = "foo"
"""
)
assert expected_config == changes_config_in_tmpdir.read_text()
expected_output = textwrap.dedent(
"""\
No auth token found, asking for it...
You need a Github Auth Token for changes to create a release.
Releases directory {} not found, creating it....
""".format(
Path('docs').joinpath('releases')
)
)
out, _ = capsys.readouterr()
assert expected_output == out
@responses.activate
def test_init_finds_auth_token_in_environment(
capsys,
git_repo,
with_auth_token_envvar,
changes_config_in_tmpdir,
with_releases_directory_and_bumpversion_file_prompt,
):
responses.add(
responses.GET,
LABEL_URL,
json=BUG_LABEL_JSON,
status=200,
content_type='application/json',
)
changes.initialise()
# envvar setting is not written to the config file
assert not changes_config_in_tmpdir.exists()
expected_output = textwrap.dedent(
"""\
Found Github Auth Token in the environment...
Releases directory {} not found, creating it....
""".format(
Path('docs').joinpath('releases')
)
)
out, _ = capsys.readouterr()
assert expected_output == out
|
hector_quadrotor/geodesy/src/geodesy/props.py | Eashwar-S/Swarm_Drones | 742 | 11131501 | <reponame>Eashwar-S/Swarm_Drones
# Software License Agreement (BSD License)
#
# Copyright (C) 2012, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the author nor of other contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
.. module:: props
`geographic_msgs/KeyValue`_ property interface for Geographic Information messages.
.. _`geographic_msgs/KeyValue`: http://ros.org/doc/api/geographic_msgs/html/msg/KeyValue.html
"""
from geographic_msgs.msg import KeyValue
def get(msg, key):
""" Get property value.
:param msg: Message containing properties.
:param key: Property key to match.
:returns: Corresponding value, if defined; None otherwise.
Beware: the value may be '', which evaluates False as a
predicate, use ``is not None`` to test for presence.
"""
for prop in msg.props:
if prop.key == key:
return prop.value
return None
def match(msg, key_set):
""" Match message properties.
:param msg: Message containing properties.
:param key_set: Set of property keys to match.
:returns: (key, value) of first property matched; None otherwise.
:raises: :exc:`ValueError` if key_set is not a set
"""
if type(key_set) is not set:
raise ValueError('property matching requires a set of keys')
for prop in msg.props:
if prop.key in key_set:
return (prop.key, prop.value)
return None
def put(msg, key, val=''):
""" Add KeyValue to message properties.
:param msg: Message to update.
:param key: Property key name.
:param value: Corresponding value string (default '').
"""
for prop in msg.props:
if prop.key == key:
# key already present, update value
prop.value = str(val)
return
# key missing, append a new KeyValue pair
msg.props.append(KeyValue(key=key, value=str(val)))
|
DQM/SiStripMonitorDigi/python/SiStripMonitorDigi_cfi.py | ckamtsikis/cmssw | 852 | 11131505 | <reponame>ckamtsikis/cmssw<filename>DQM/SiStripMonitorDigi/python/SiStripMonitorDigi_cfi.py
import FWCore.ParameterSet.Config as cms
# SiStripMonitorDigi
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
SiStripMonitorDigi = DQMEDAnalyzer('SiStripMonitorDigi',
TopFolderName = cms.string('SiStrip'),
# add digi producers same way as Domenico in SiStripClusterizer
DigiProducersList = cms.VInputTag(
cms.InputTag('siStripDigis','ZeroSuppressed'),
cms.InputTag('siStripZeroSuppression','VirginRaw'),
cms.InputTag('siStripZeroSuppression','ProcessedRaw'),
cms.InputTag('siStripZeroSuppression','ScopeMode')
),
TH1NApvShots = cms.PSet(
Nbins = cms.int32(201),
xmin = cms.double(-0.5),
xmax = cms.double(200.5),
subdetswitchon = cms.bool(False),
globalswitchon = cms.bool(False)
),
TH1ChargeMedianApvShots = cms.PSet(
Nbins = cms.int32(256),
xmin = cms.double(0.5),
xmax = cms.double(256.5),
subdetswitchon = cms.bool(False),
globalswitchon = cms.bool(True)
),
TH1NStripsApvShots = cms.PSet(
Nbins = cms.int32(64),
xmin = cms.double(64.5),
xmax = cms.double(128.5),
subdetswitchon = cms.bool(False),
globalswitchon = cms.bool(False)
),
TProfNShotsVsTime = cms.PSet(
Nbins = cms.int32(600),
xmin = cms.double(0.0),
xmax = cms.double(1.0*60*60),
ymin = cms.double(0.0),
ymax = cms.double(0.0),
subdetswitchon = cms.bool(False),
globalswitchon = cms.bool(False)
),
TH1ApvNumApvShots = cms.PSet(
Nbins = cms.int32(6),
xmin = cms.double(0.5),
xmax = cms.double(6.5),
subdetswitchon = cms.bool(False),
globalswitchon = cms.bool(False)
),
TProfGlobalNShots = cms.PSet(
globalswitchon = cms.bool(False)
),
TH1ADCsCoolestStrip = cms.PSet(
Nbinx = cms.int32(60),
xmin = cms.double(-0.5),
xmax = cms.double(299.5),
layerswitchon = cms.bool(False),
moduleswitchon = cms.bool(True)
),
TH1ADCsHottestStrip = cms.PSet(
Nbinx = cms.int32(60),
xmin = cms.double(-0.5),
xmax = cms.double(299.5),
layerswitchon = cms.bool(False),
moduleswitchon = cms.bool(True)
),
TH1DigiADCs = cms.PSet(
Nbinx = cms.int32(64),
xmin = cms.double(-0.5),
xmax = cms.double(255.5),
layerswitchon = cms.bool(True),
moduleswitchon = cms.bool(True)
),
TH1NumberOfDigis = cms.PSet(
Nbinx = cms.int32(50),
xmin = cms.double(-0.5),
xmax = cms.double(999.5),
layerswitchon = cms.bool(True),
moduleswitchon = cms.bool(True)
),
TH1NumberOfDigisPerStrip = cms.PSet(
Nbinx = cms.int32(768),
xmin = cms.double(-0.5),
xmax = cms.double(767.5),
moduleswitchon = cms.bool(False)
),
TH1StripOccupancy = cms.PSet(
Nbinx = cms.int32(51),
xmin = cms.double(-0.01),
xmax = cms.double(1.01),
layerswitchon = cms.bool(True),
moduleswitchon = cms.bool(True)
),
TProfNumberOfDigi = cms.PSet(
Nbinx = cms.int32(100),
xmin = cms.double(-0.5),
xmax = cms.double(499.5),
layerswitchon = cms.bool(False),
moduleswitchon = cms.bool(False)
),
TProfDigiADC = cms.PSet(
Nbinx = cms.int32(100),
xmin = cms.double(0.0),
xmax = cms.double(499.5),
layerswitchon = cms.bool(False),
moduleswitchon = cms.bool(False)
),
TProfTotalNumberOfDigis = cms.PSet(
Nbins = cms.int32(360),
xmin = cms.double(0.0),
xmax = cms.double(1.0*60*60),
ymin = cms.double(0.0),
ymax = cms.double(0.0),
subdetswitchon = cms.bool(False)
),
TProfNDigisFED = cms.PSet(
Nbinsx = cms.int32(440),
xmax = cms.double(489.5),
xmin = cms.double(49.5),
Nbinsy = cms.int32(200),
ymin = cms.double(-0.5),
ymax = cms.double(199999.5),
globalswitchon = cms.bool(True)
),
TkHistoMap_On = cms.bool(True),
TkHistoMapNApvShots_On = cms.bool(False),
TkHistoMapNStripApvShots_On = cms.bool(False),
TkHistoMapMedianChargeApvShots_On = cms.bool(False),
CreateTrendMEs = cms.bool(False),
Trending = cms.PSet(
Nbins = cms.int32(600),
xmin = cms.double(0.0),
xmax = cms.double(1.0*60*60),
ymin = cms.double(0.0),
ymax = cms.double(10000.0)
),
TProfDigiApvCycle = cms.PSet(
Nbins = cms.int32(70),
xmin = cms.double(-0.5),
xmax = cms.double(69.5),
Nbinsy = cms.int32(200),
ymin = cms.double(0.0),
ymax = cms.double(0.0),
subdetswitchon = cms.bool(False)
),
TH2DigiApvCycle = cms.PSet(
Nbins = cms.int32(70),
xmin = cms.double(-0.5),
xmax = cms.double(69.5),
Nbinsy = cms.int32(200),
ymin = cms.double(0.0),
yfactor = cms.double(0.2),
subdetswitchon = cms.bool(False)
),
TProfTotalNumberOfDigisVsLS = cms.PSet(
subdetswitchon = cms.bool(False)
),
TotalNumberOfDigisFailure = cms.PSet(
Nbins = cms.int32(2500),
ignoreFirstNLumisections = cms.int32(20),
integrateNLumisections = cms.int32(3),
subdetswitchon = cms.bool(False)
),
# xLumiProf = cms.int32(5),
Mod_On = cms.bool(True),
HistoryProducer = cms.InputTag("consecutiveHEs"),
ApvPhaseProducer = cms.InputTag("APVPhases"),
UseDCSFiltering = cms.bool(True),
# rest of parameters
SelectAllDetectors = cms.bool(False),
ShowMechanicalStructureView = cms.bool(True),
ShowReadoutView = cms.bool(False),
ShowControlView = cms.bool(False),
CalculateStripOccupancy = cms.bool(False),
ResetMEsEachRun = cms.bool(False),
TrendVs10LS = cms.bool(False),
)
from Configuration.Eras.Modifier_run3_common_cff import run3_common
run3_common.toModify(SiStripMonitorDigi, TH1NumberOfDigis = dict(xmax = 39999.5, Nbinx = 500))
|
src/python/nimbusml/internal/entrypoints/_ensemblesubsetselector_randompartitionselector.py | michaelgsharp/NimbusML | 134 | 11131522 | # - Generated by tools/entrypoint_compiler.py: do not edit by hand
"""
RandomPartitionSelector
"""
from ..utils.entrypoints import Component
from ..utils.utils import try_set
def random_partition_selector(
feature_selector=None,
**params):
"""
**Description**
None
:param feature_selector: The Feature selector (settings).
"""
entrypoint_name = 'RandomPartitionSelector'
settings = {}
if feature_selector is not None:
settings['FeatureSelector'] = try_set(
obj=feature_selector, none_acceptable=True, is_of_type=dict)
component = Component(
name=entrypoint_name,
settings=settings,
kind='EnsembleSubsetSelector')
return component
|
numpyro/compat/infer.py | MarcoGorelli/numpyro | 1,394 | 11131542 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import math
from jax import jit
import numpyro
from numpyro.compat.pyro import get_param_store
from numpyro.infer import elbo, hmc, mcmc, svi
class HMC(hmc.HMC):
def __init__(
self,
model=None,
potential_fn=None,
step_size=1,
adapt_step_size=True,
adapt_mass_matrix=True,
full_mass=False,
use_multinomial_sampling=True,
transforms=None,
max_plate_nesting=None,
jit_compile=False,
jit_options=None,
ignore_jit_warnings=False,
trajectory_length=2 * math.pi,
target_accept_prob=0.8,
):
super(HMC, self).__init__(
model=model,
potential_fn=potential_fn,
step_size=step_size,
adapt_step_size=adapt_step_size,
adapt_mass_matrix=adapt_mass_matrix,
dense_mass=full_mass,
target_accept_prob=target_accept_prob,
trajectory_length=trajectory_length,
)
class NUTS(hmc.NUTS):
def __init__(
self,
model=None,
potential_fn=None,
step_size=1,
adapt_step_size=True,
adapt_mass_matrix=True,
full_mass=False,
use_multinomial_sampling=True,
transforms=None,
max_plate_nesting=None,
jit_compile=False,
jit_options=None,
ignore_jit_warnings=False,
trajectory_length=2 * math.pi,
target_accept_prob=0.8,
max_tree_depth=10,
):
if potential_fn is not None:
raise ValueError(
"Only `model` argument is supported in generic module;"
" `potential_fn` is not supported."
)
super(NUTS, self).__init__(
model=model,
potential_fn=potential_fn,
step_size=step_size,
adapt_step_size=adapt_step_size,
adapt_mass_matrix=adapt_mass_matrix,
dense_mass=full_mass,
target_accept_prob=target_accept_prob,
trajectory_length=trajectory_length,
max_tree_depth=max_tree_depth,
)
class MCMC(object):
def __init__(
self,
kernel,
num_samples,
num_warmup=None,
initial_params=None,
num_chains=1,
hook_fn=None,
mp_context=None,
disable_progbar=False,
disable_validation=True,
transforms=None,
):
if num_warmup is None:
num_warmup = num_samples
self._initial_params = initial_params
self._mcmc = mcmc.MCMC(
kernel,
num_warmup,
num_samples,
num_chains=num_chains,
progress_bar=(not disable_progbar),
)
def run(self, *args, rng_key=None, **kwargs):
if rng_key is None:
rng_key = numpyro.prng_key()
self._mcmc.run(rng_key, *args, init_params=self._initial_params, **kwargs)
def get_samples(self, num_samples=None, group_by_chain=False):
if num_samples is not None:
raise ValueError("`num_samples` arg unsupported in NumPyro.")
return self._mcmc.get_samples(group_by_chain=group_by_chain)
def summary(self, prob=0.9):
self._mcmc.print_summary()
class SVI(svi.SVI):
def __init__(
self,
model,
guide,
optim,
loss,
loss_and_grads=None,
num_samples=10,
num_steps=0,
**kwargs
):
super(SVI, self).__init__(model=model, guide=guide, optim=optim, loss=loss)
self.svi_state = None
def evaluate_loss(self, *args, **kwargs):
return self.evaluate(self.svi_state, *args, **kwargs)
def step(self, *args, rng_key=None, **kwargs):
if self.svi_state is None:
if rng_key is None:
rng_key = numpyro.prng_key()
self.svi_state = self.init(rng_key, *args, **kwargs)
try:
self.svi_state, loss = jit(self.update)(self.svi_state, *args, **kwargs)
except TypeError as e:
if "not a valid JAX type" in str(e):
raise TypeError(
"NumPyro backend requires args, kwargs to be arrays or tuples, "
"dicts of arrays."
) from e
else:
raise e
params = jit(super(SVI, self).get_params)(self.svi_state)
get_param_store().update(params)
return loss
def get_params(self):
return super(SVI, self).get_params(self.svi_state)
class Trace_ELBO(elbo.Trace_ELBO):
def __init__(
self,
num_particles=1,
max_plate_nesting=float("inf"),
max_iarange_nesting=None, # DEPRECATED
vectorize_particles=False,
strict_enumeration_warning=True,
ignore_jit_warnings=False,
jit_options=None,
retain_graph=None,
tail_adaptive_beta=-1.0,
):
super(Trace_ELBO, self).__init__(num_particles=num_particles)
# JIT is enabled by default
JitTrace_ELBO = Trace_ELBO
|
yellowbrick/datasets/path.py | mrtrkmn/yellowbrick | 3,662 | 11131543 | # yellowbrick.datasets.path
# Helper functions for looking up dataset paths.
#
# Author: <NAME>
# Created: Thu Jul 26 14:10:51 2018 -0400
#
# Copyright (C) 2018 The scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: path.py [7082742] <EMAIL> $
"""
Helper functions for looking up dataset paths.
"""
##########################################################################
## Imports
##########################################################################
import os
import shutil
from .signature import sha256sum
from yellowbrick.exceptions import DatasetsError
##########################################################################
## Fixtures
##########################################################################
FIXTURES = os.path.join(os.path.dirname(__file__), "fixtures")
##########################################################################
## Dataset path utilities
##########################################################################
def get_data_home(path=None):
"""
Return the path of the Yellowbrick data directory. This folder is used by
dataset loaders to avoid downloading data several times.
By default, this folder is colocated with the code in the install directory
so that data shipped with the package can be easily located. Alternatively
it can be set by the ``$YELLOWBRICK_DATA`` environment variable, or
programmatically by giving a folder path. Note that the ``'~'`` symbol is
expanded to the user home directory, and environment variables are also
expanded when resolving the path.
"""
if path is None:
path = os.environ.get("YELLOWBRICK_DATA", FIXTURES)
path = os.path.expanduser(path)
path = os.path.expandvars(path)
if not os.path.exists(path):
os.makedirs(path)
return path
def find_dataset_path(dataset, data_home=None, fname=None, ext=".csv.gz", raises=True):
"""
Looks up the path to the dataset specified in the data home directory,
which is found using the ``get_data_home`` function. By default data home
is colocated with the code, but can be modified with the YELLOWBRICK_DATA
environment variable, or passing in a different directory.
The file returned will be by default, the name of the dataset in compressed
CSV format. Other files and extensions can be passed in to locate other data
types or auxilliary files.
If the dataset is not found a ``DatasetsError`` is raised by default.
Parameters
----------
dataset : str
The name of the dataset; should either be a folder in data home or
specified in the yellowbrick.datasets.DATASETS variable.
data_home : str, optional
The path on disk where data is stored. If not passed in, it is looked
up from YELLOWBRICK_DATA or the default returned by ``get_data_home``.
fname : str, optional
The filename to look up in the dataset path, by default it will be the
name of the dataset. The fname must include an extension.
ext : str, default: ".csv.gz"
The extension of the data to look up in the dataset path, if the fname
is specified then the ext parameter is ignored. If ext is None then
the directory of the dataset will be returned.
raises : bool, default: True
If the path does not exist, raises a DatasetsError unless this flag is set
to False, at which point None is returned (e.g. for checking if the
path exists or not).
Returns
-------
path : str or None
A path to the requested file, guaranteed to exist if an exception is
not raised during processing of the request (unless None is returned).
raises : DatasetsError
If raise is True and the path does not exist, raises a DatasetsError.
"""
# Figure out the root directory of the datasets
data_home = get_data_home(data_home)
# Figure out the relative path to the dataset
if fname is None:
if ext is None:
path = os.path.join(data_home, dataset)
else:
path = os.path.join(data_home, dataset, "{}{}".format(dataset, ext))
else:
path = os.path.join(data_home, dataset, fname)
# Determine if the path exists
if not os.path.exists(path):
# Suppress exceptions if required
if not raises:
return None
raise DatasetsError(
("could not find dataset at {} - does it need to be downloaded?").format(
path
)
)
return path
def dataset_exists(dataset, data_home=None):
"""
Checks to see if a directory with the name of the specified dataset exists
in the data home directory, found with ``get_data_home``.
Parameters
----------
dataset : str
The name of the dataset; should either be a folder in data home or
specified in the yellowbrick.datasets.DATASETS variable.
data_home : str, optional
The path on disk where data is stored. If not passed in, it is looked
up from YELLOWBRICK_DATA or the default returned by ``get_data_home``.
Returns
-------
exists : bool
If a folder with the dataset name is in the data home directory.
"""
data_home = get_data_home(data_home)
path = os.path.join(data_home, dataset)
return os.path.exists(path) and os.path.isdir(path)
def dataset_archive(dataset, signature, data_home=None, ext=".zip"):
"""
Checks to see if the dataset archive file exists in the data home directory,
found with ``get_data_home``. By specifying the signature, this function
also checks to see if the archive is the latest version by comparing the
sha256sum of the local archive with the specified signature.
Parameters
----------
dataset : str
The name of the dataset; should either be a folder in data home or
specified in the yellowbrick.datasets.DATASETS variable.
signature : str
The SHA 256 signature of the dataset, used to determine if the archive
is the latest version of the dataset or not.
data_home : str, optional
The path on disk where data is stored. If not passed in, it is looked
up from YELLOWBRICK_DATA or the default returned by ``get_data_home``.
ext : str, default: ".zip"
The extension of the archive file.
Returns
-------
exists : bool
True if the dataset archive exists and is the latest version.
"""
data_home = get_data_home(data_home)
path = os.path.join(data_home, dataset + ext)
if os.path.exists(path) and os.path.isfile(path):
return sha256sum(path) == signature
return False
def cleanup_dataset(dataset, data_home=None, ext=".zip"):
"""
Removes the dataset directory and archive file from the data home directory.
Parameters
----------
dataset : str
The name of the dataset; should either be a folder in data home or
specified in the yellowbrick.datasets.DATASETS variable.
data_home : str, optional
The path on disk where data is stored. If not passed in, it is looked
up from YELLOWBRICK_DATA or the default returned by ``get_data_home``.
ext : str, default: ".zip"
The extension of the archive file.
Returns
-------
removed : int
The number of objects removed from data_home.
"""
removed = 0
data_home = get_data_home(data_home)
# Paths to remove
datadir = os.path.join(data_home, dataset)
archive = os.path.join(data_home, dataset + ext)
# Remove directory and contents
if os.path.exists(datadir):
shutil.rmtree(datadir)
removed += 1
# Remove the archive file
if os.path.exists(archive):
os.remove(archive)
removed += 1
return removed
|
env/lib/python3.8/site-packages/fiona/fio/load.py | CristianCristanchoT/predyctiva | 778 | 11131550 | <gh_stars>100-1000
"""$ fio load"""
from functools import partial
import logging
import click
import cligj
import fiona
from fiona.fio import options, with_context_env
from fiona.schema import FIELD_TYPES_MAP_REV
from fiona.transform import transform_geom
def _cb_key_val(ctx, param, value):
"""
click callback to validate `--opt KEY1=VAL1 --opt KEY2=VAL2` and collect
in a dictionary like the one below, which is what the CLI function receives.
If no value or `None` is received then an empty dictionary is returned.
{
'KEY1': 'VAL1',
'KEY2': 'VAL2'
}
Note: `==VAL` breaks this as `str.split('=', 1)` is used.
"""
if not value:
return {}
else:
out = {}
for pair in value:
if "=" not in pair:
raise click.BadParameter(
"Invalid syntax for KEY=VAL arg: {}".format(pair)
)
else:
k, v = pair.split("=", 1)
k = k.lower()
v = v.lower()
out[k] = None if v.lower() in ["none", "null", "nil", "nada"] else v
return out
@click.command(short_help="Load GeoJSON to a dataset in another format.")
@click.argument('output', required=True)
@click.option('-f', '--format', '--driver', 'driver', required=True,
help="Output format driver name.")
@options.src_crs_opt
@click.option('--dst-crs', '--dst_crs',
help="Destination CRS. Defaults to --src-crs when not given.")
@cligj.features_in_arg
@click.option(
"--layer",
metavar="INDEX|NAME",
callback=options.cb_layer,
help="Load features into specified layer. Layers use "
"zero-based numbering when accessed by index.",
)
@click.option(
"--co",
"--profile",
"creation_options",
metavar="NAME=VALUE",
multiple=True,
callback=_cb_key_val,
help="Driver specific creation options. See the documentation for the selected output driver for more information.",
)
@click.pass_context
@with_context_env
def load(ctx, output, driver, src_crs, dst_crs, features, layer, creation_options):
"""Load features from JSON to a file in another format.
The input is a GeoJSON feature collection or optionally a sequence of
GeoJSON feature objects.
"""
logger = logging.getLogger(__name__)
dst_crs = dst_crs or src_crs
if src_crs and dst_crs and src_crs != dst_crs:
transformer = partial(transform_geom, src_crs, dst_crs,
antimeridian_cutting=True, precision=-1)
else:
def transformer(x):
return x
def feature_gen():
for feat in features:
feat['geometry'] = transformer(feat['geometry'])
yield feat
try:
source = feature_gen()
# Use schema of first feature as a template.
# TODO: schema specified on command line?
first = next(source)
schema = {'geometry': first['geometry']['type']}
schema['properties'] = dict([
(k, FIELD_TYPES_MAP_REV.get(type(v)) or 'str')
for k, v in first['properties'].items()])
with fiona.open(
output,
"w",
driver=driver,
crs=dst_crs,
schema=schema,
layer=layer,
**creation_options
) as dst:
dst.write(first)
dst.writerecords(source)
except Exception:
logger.exception("Exception caught during processing")
raise click.Abort()
|
pythonforandroid/recipes/numpy/__init__.py | syrykh/python-for-android | 6,278 | 11131561 | from pythonforandroid.recipe import CompiledComponentsPythonRecipe
from pythonforandroid.logger import shprint, info
from pythonforandroid.util import current_directory
from multiprocessing import cpu_count
from os.path import join
import glob
import sh
class NumpyRecipe(CompiledComponentsPythonRecipe):
version = '1.18.1'
url = 'https://pypi.python.org/packages/source/n/numpy/numpy-{version}.zip'
site_packages_name = 'numpy'
depends = ['setuptools', 'cython']
install_in_hostpython = True
call_hostpython_via_targetpython = False
patches = [
join('patches', 'hostnumpy-xlocale.patch'),
join('patches', 'remove-default-paths.patch'),
join('patches', 'add_libm_explicitly_to_build.patch'),
join('patches', 'compiler_cxx_fix.patch'),
]
def _build_compiled_components(self, arch):
info('Building compiled components in {}'.format(self.name))
env = self.get_recipe_env(arch)
with current_directory(self.get_build_dir(arch.arch)):
hostpython = sh.Command(self.real_hostpython_location)
if self.install_in_hostpython:
shprint(hostpython, 'setup.py', 'clean', '--all', '--force', _env=env)
hostpython = sh.Command(self.hostpython_location)
shprint(hostpython, 'setup.py', self.build_cmd, '-v',
_env=env, *self.setup_extra_args)
build_dir = glob.glob('build/lib.*')[0]
shprint(sh.find, build_dir, '-name', '"*.o"', '-exec',
env['STRIP'], '{}', ';', _env=env)
def _rebuild_compiled_components(self, arch, env):
info('Rebuilding compiled components in {}'.format(self.name))
hostpython = sh.Command(self.real_hostpython_location)
shprint(hostpython, 'setup.py', 'clean', '--all', '--force', _env=env)
shprint(hostpython, 'setup.py', self.build_cmd, '-v', _env=env,
*self.setup_extra_args)
def build_compiled_components(self, arch):
self.setup_extra_args = ['-j', str(cpu_count())]
self._build_compiled_components(arch)
self.setup_extra_args = []
def rebuild_compiled_components(self, arch, env):
self.setup_extra_args = ['-j', str(cpu_count())]
self._rebuild_compiled_components(arch, env)
self.setup_extra_args = []
recipe = NumpyRecipe()
|
tools/localedata/extract_icu_data.py | rio-31/android_frameworks_base-1 | 164 | 11131563 | <reponame>rio-31/android_frameworks_base-1
#!/usr/bin/env python
#
# Copyright 2016 The Android Open Source Project. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Generate a C++ data table containing locale data."""
import collections
import glob
import os.path
import sys
def get_locale_parts(locale):
"""Split a locale into three parts, for langauge, script, and region."""
parts = locale.split('_')
if len(parts) == 1:
return (parts[0], None, None)
elif len(parts) == 2:
if len(parts[1]) == 4: # parts[1] is a script
return (parts[0], parts[1], None)
else:
return (parts[0], None, parts[1])
else:
assert len(parts) == 3
return tuple(parts)
def read_likely_subtags(input_file_name):
"""Read and parse ICU's likelySubtags.txt."""
with open(input_file_name) as input_file:
likely_script_dict = {
# Android's additions for pseudo-locales. These internal codes make
# sure that the pseudo-locales would not match other English or
# Arabic locales. (We can't use private-use ISO 15924 codes, since
# they may be used by apps for other purposes.)
"en_XA": "~~~A",
"ar_XB": "~~~B",
# Removed data from later versions of ICU
"ji": "Hebr", # Old code for Yiddish, still used in Java and Android
}
representative_locales = {
# Android's additions
"en_Latn_GB", # representative for en_Latn_001
"es_Latn_MX", # representative for es_Latn_419
"es_Latn_US", # representative for es_Latn_419 (not the best idea,
# but Android has been shipping with it for quite a
# while. Fortunately, MX < US, so if both exist, MX
# would be chosen.)
}
for line in input_file:
line = unicode(line, 'UTF-8').strip(u' \n\uFEFF').encode('UTF-8')
if line.startswith('//'):
continue
if '{' in line and '}' in line:
from_locale = line[:line.index('{')]
to_locale = line[line.index('"')+1:line.rindex('"')]
from_lang, from_scr, from_region = get_locale_parts(from_locale)
_, to_scr, to_region = get_locale_parts(to_locale)
if from_lang == 'und':
continue # not very useful for our purposes
if from_region is None and to_region not in ['001', 'ZZ']:
representative_locales.add(to_locale)
if from_scr is None:
likely_script_dict[from_locale] = to_scr
return likely_script_dict, frozenset(representative_locales)
# From packLanguageOrRegion() in ResourceTypes.cpp
def pack_language_or_region(inp, base):
"""Pack langauge or region in a two-byte tuple."""
if inp is None:
return (0, 0)
elif len(inp) == 2:
return ord(inp[0]), ord(inp[1])
else:
assert len(inp) == 3
base = ord(base)
first = ord(inp[0]) - base
second = ord(inp[1]) - base
third = ord(inp[2]) - base
return (0x80 | (third << 2) | (second >>3),
((second << 5) | first) & 0xFF)
# From packLanguage() in ResourceTypes.cpp
def pack_language(language):
"""Pack language in a two-byte tuple."""
return pack_language_or_region(language, 'a')
# From packRegion() in ResourceTypes.cpp
def pack_region(region):
"""Pack region in a two-byte tuple."""
return pack_language_or_region(region, '0')
def pack_to_uint32(locale):
"""Pack language+region of locale into a 32-bit unsigned integer."""
lang, _, region = get_locale_parts(locale)
plang = pack_language(lang)
pregion = pack_region(region)
return (plang[0] << 24) | (plang[1] << 16) | (pregion[0] << 8) | pregion[1]
def dump_script_codes(all_scripts):
"""Dump the SCRIPT_CODES table."""
print 'const char SCRIPT_CODES[][4] = {'
for index, script in enumerate(all_scripts):
print " /* %-2d */ {'%c', '%c', '%c', '%c'}," % (
index, script[0], script[1], script[2], script[3])
print '};'
print
def dump_script_data(likely_script_dict, all_scripts):
"""Dump the script data."""
print
print 'const std::unordered_map<uint32_t, uint8_t> LIKELY_SCRIPTS({'
for locale in sorted(likely_script_dict.keys()):
script = likely_script_dict[locale]
print ' {0x%08Xu, %2du}, // %s -> %s' % (
pack_to_uint32(locale),
all_scripts.index(script),
locale.replace('_', '-'),
script)
print '});'
def pack_to_uint64(locale):
"""Pack a full locale into a 64-bit unsigned integer."""
_, script, _ = get_locale_parts(locale)
return ((pack_to_uint32(locale) << 32) |
(ord(script[0]) << 24) |
(ord(script[1]) << 16) |
(ord(script[2]) << 8) |
ord(script[3]))
def dump_representative_locales(representative_locales):
"""Dump the set of representative locales."""
print
print 'std::unordered_set<uint64_t> REPRESENTATIVE_LOCALES({'
for locale in sorted(representative_locales):
print ' 0x%08XLLU, // %s' % (
pack_to_uint64(locale),
locale)
print '});'
def read_and_dump_likely_data(icu_data_dir):
"""Read and dump the likely-script data."""
likely_subtags_txt = os.path.join(icu_data_dir, 'misc', 'likelySubtags.txt')
likely_script_dict, representative_locales = read_likely_subtags(
likely_subtags_txt)
all_scripts = list(set(likely_script_dict.values()))
assert len(all_scripts) <= 256
all_scripts.sort()
dump_script_codes(all_scripts)
dump_script_data(likely_script_dict, all_scripts)
dump_representative_locales(representative_locales)
return likely_script_dict
def read_parent_data(icu_data_dir):
"""Read locale parent data from ICU data files."""
all_icu_data_files = glob.glob(os.path.join(icu_data_dir, '*', '*.txt'))
parent_dict = {}
for data_file in all_icu_data_files:
locale = os.path.splitext(os.path.basename(data_file))[0]
with open(data_file) as input_file:
for line in input_file:
if '%%Parent' in line:
parent = line[line.index('"')+1:line.rindex('"')]
if locale in parent_dict:
# Different files shouldn't have different parent info
assert parent_dict[locale] == parent
else:
parent_dict[locale] = parent
elif locale.startswith('ar_') and 'default{"latn"}' in line:
# Arabic parent overrides for ASCII digits. Since
# Unicode extensions are not supported in ResourceTypes,
# we will use ar-015 (Arabic, Northern Africa) instead
# of the more correct ar-u-nu-latn.
parent_dict[locale] = 'ar_015'
return parent_dict
def get_likely_script(locale, likely_script_dict):
"""Find the likely script for a locale, given the likely-script dictionary.
"""
if locale.count('_') == 2:
# it already has a script
return locale.split('_')[1]
elif locale in likely_script_dict:
return likely_script_dict[locale]
else:
language = locale.split('_')[0]
return likely_script_dict[language]
def dump_parent_data(script_organized_dict):
"""Dump information for parents of locales."""
sorted_scripts = sorted(script_organized_dict.keys())
print
for script in sorted_scripts:
parent_dict = script_organized_dict[script]
print ('const std::unordered_map<uint32_t, uint32_t> %s_PARENTS({'
% script.upper())
for locale in sorted(parent_dict.keys()):
parent = parent_dict[locale]
print ' {0x%08Xu, 0x%08Xu}, // %s -> %s' % (
pack_to_uint32(locale),
pack_to_uint32(parent),
locale.replace('_', '-'),
parent.replace('_', '-'))
print '});'
print
print 'const struct {'
print ' const char script[4];'
print ' const std::unordered_map<uint32_t, uint32_t>* map;'
print '} SCRIPT_PARENTS[] = {'
for script in sorted_scripts:
print " {{'%c', '%c', '%c', '%c'}, &%s_PARENTS}," % (
script[0], script[1], script[2], script[3],
script.upper())
print '};'
def dump_parent_tree_depth(parent_dict):
"""Find and dump the depth of the parent tree."""
max_depth = 1
for locale, _ in parent_dict.items():
depth = 1
while locale in parent_dict:
locale = parent_dict[locale]
depth += 1
max_depth = max(max_depth, depth)
assert max_depth < 5 # Our algorithms assume small max_depth
print
print 'const size_t MAX_PARENT_DEPTH = %d;' % max_depth
def read_and_dump_parent_data(icu_data_dir, likely_script_dict):
"""Read parent data from ICU and dump it."""
parent_dict = read_parent_data(icu_data_dir)
script_organized_dict = collections.defaultdict(dict)
for locale in parent_dict:
parent = parent_dict[locale]
if parent == 'root':
continue
script = get_likely_script(locale, likely_script_dict)
script_organized_dict[script][locale] = parent_dict[locale]
dump_parent_data(script_organized_dict)
dump_parent_tree_depth(parent_dict)
def main():
"""Read the data files from ICU and dump the output to a C++ file."""
source_root = sys.argv[1]
icu_data_dir = os.path.join(
source_root,
'external', 'icu', 'icu4c', 'source', 'data')
print '// Auto-generated by %s' % sys.argv[0]
print
likely_script_dict = read_and_dump_likely_data(icu_data_dir)
read_and_dump_parent_data(icu_data_dir, likely_script_dict)
if __name__ == '__main__':
main()
|
hail/python/hail/typecheck/check.py | tdeboer-ilmn/hail | 789 | 11131565 | <gh_stars>100-1000
import re
import inspect
import abc
import collections
from decorator import decorator
class TypecheckFailure(Exception):
pass
def identity(x):
return x
def extract(t):
m = re.match("<(type|class) '(.*)'>", str(t))
if m:
return m.groups()[1]
else:
return str(t)
class TypeChecker(object):
__metaclass__ = abc.ABCMeta
def __init__(self):
pass
@abc.abstractmethod
def check(self, x, caller, param):
...
@abc.abstractmethod
def expects(self):
...
def format(self, arg):
return f"{extract(type(arg))}: {arg}"
class DeferredChecker(TypeChecker):
def __init__(self, f):
super().__init__()
self.f = f
self._tc = None
@property
def tc(self):
if self._tc is None:
t = self.f()
if isinstance(t, type):
self._tc = LiteralChecker(t)
elif isinstance(t, TypeChecker):
self._tc = t
else:
raise RuntimeError("deferred typechecker must return 'type' or 'TypeChecker', found '%s'" % type(t))
return self._tc
def check(self, x, caller, param):
return self.tc.check(x, caller, param)
def expects(self):
return self.tc.expects()
class MultipleTypeChecker(TypeChecker):
def __init__(self, checkers):
flat_checkers = []
for c in checkers:
if isinstance(c, MultipleTypeChecker):
for cc in c.checkers:
flat_checkers.append(cc)
else:
flat_checkers.append(c)
self.checkers = flat_checkers
super(MultipleTypeChecker, self).__init__()
def check(self, x, caller, param):
for tc in self.checkers:
try:
return tc.check(x, caller, param)
except TypecheckFailure:
pass
raise TypecheckFailure()
def expects(self):
return '(' + ' or '.join([c.expects() for c in self.checkers]) + ')'
class SequenceChecker(TypeChecker):
def __init__(self, element_checker):
self.ec = element_checker
super(SequenceChecker, self).__init__()
def check(self, x, caller, param):
# reject str because of errors due to sequenceof(strlike) permitting str
if not isinstance(x, collections.abc.Sequence) or isinstance(x, str):
raise TypecheckFailure
x_ = []
tc = self.ec
for elt in x:
elt_ = tc.check(elt, caller, param)
x_.append(elt_)
return x_
def expects(self):
return 'Sequence[%s]' % (self.ec.expects())
class SetChecker(TypeChecker):
def __init__(self, element_checker):
self.ec = element_checker
super(SetChecker, self).__init__()
def check(self, x, caller, param):
if not isinstance(x, set):
raise TypecheckFailure
x_ = set()
tc = self.ec
for elt in x:
elt_ = tc.check(elt, caller, param)
x_.add(elt_)
return x_
def expects(self):
return 'set[%s]' % (self.ec.expects())
class TupleChecker(TypeChecker):
def __init__(self, element_checker):
self.ec = element_checker
super(TupleChecker, self).__init__()
def check(self, x, caller, param):
if not isinstance(x, tuple):
raise TypecheckFailure
x_ = []
tc = self.ec
for elt in x:
elt_ = tc.check(elt, caller, param)
x_.append(elt_)
return tuple(x_)
def expects(self):
return 'tuple[%s]' % (self.ec.expects())
class DictChecker(TypeChecker):
def __init__(self, key_checker, value_checker):
self.kc = key_checker
self.vc = value_checker
super(DictChecker, self).__init__()
def check(self, x, caller, param):
if not isinstance(x, collections.abc.Mapping):
raise TypecheckFailure
x_ = {}
kc = self.kc
vc = self.vc
for k, v in x.items():
k_ = kc.check(k, caller, param)
v_ = vc.check(v, caller, param)
x_[k_] = v_
return x_
def expects(self):
return 'Mapping[%s, %s]' % (self.kc.expects(), self.vc.expects())
def coerce(self, x):
kc = self.kc
vc = self.vc
return {kc.coerce(k): vc.coerce(v) for k, v in x}
class SizedTupleChecker(TypeChecker):
def __init__(self, *elt_checkers):
self.ec = elt_checkers
self.n = len(elt_checkers)
super(SizedTupleChecker, self).__init__()
def check(self, x, caller, param):
if not (isinstance(x, tuple) and len(x) == len(self.ec)):
raise TypecheckFailure
x_ = []
for tc, elt in zip(self.ec, x):
elt_ = tc.check(elt, caller, param)
x_.append(elt_)
return tuple(x_)
def expects(self):
return 'tuple[' + ','.join(["{}".format(ec.expects()) for ec in self.ec]) + ']'
class SliceChecker(TypeChecker):
def __init__(self, start_checker, stop_checker, step_checker):
self.startc = start_checker
self.stopc = stop_checker
self.stepc = step_checker
super(SliceChecker, self).__init__()
def check(self, x, caller, param):
if not isinstance(x, slice):
raise TypecheckFailure
start_ = self.startc.check(x.start, caller, param)
stop_ = self.stopc.check(x.stop, caller, param)
step_ = self.stepc.check(x.step, caller, param)
return slice(start_, stop_, step_)
def expects(self):
return f'slice[{self.startc.expects()}, {self.stopc.expects()}, {self.stepc.expects()}]'
class LinkedListChecker(TypeChecker):
def __init__(self, type):
self.type = type
super(LinkedListChecker, self).__init__()
def check(self, x, caller, param):
from hail.utils import LinkedList
if not isinstance(x, LinkedList):
raise TypecheckFailure
if x.type is not self.type:
raise TypecheckFailure
return x
def expects(self):
return 'linkedlist[%s]' % self.type
class AnyChecker(TypeChecker):
def __init__(self):
super(AnyChecker, self).__init__()
def check(self, x, caller, param):
return x
def expects(self):
return 'any'
class CharChecker(TypeChecker):
def __init__(self):
super(CharChecker, self).__init__()
def check(self, x, caller, param):
if isinstance(x, str) and len(x) == 1:
return x
else:
raise TypecheckFailure
def expects(self):
return 'char'
class LiteralChecker(TypeChecker):
def __init__(self, t):
self.t = t
super(LiteralChecker, self).__init__()
def check(self, x, caller, param):
if isinstance(x, self.t):
return x
raise TypecheckFailure
def expects(self):
return extract(self.t)
class LazyChecker(TypeChecker):
def __init__(self):
self.t = None
super(LazyChecker, self).__init__()
def set(self, t):
self.t = t
def check(self, x, caller, param):
if not self.t:
raise RuntimeError("LazyChecker not initialized. Use 'set' to provide the expected type")
if isinstance(x, self.t):
return x
else:
raise TypecheckFailure
def expects(self):
if not self.t:
raise RuntimeError("LazyChecker not initialized. Use 'set' to provide the expected type")
return extract(self.t)
class ExactlyTypeChecker(TypeChecker):
def __init__(self, v, reference_equality=False):
self.v = v
self.reference_equality = reference_equality
super(ExactlyTypeChecker, self).__init__()
def check(self, x, caller, param):
if self.reference_equality and x is self.v:
return x
elif not self.reference_equality and x == self.v:
return x
else:
raise TypecheckFailure
def expects(self):
return repr(self.v)
class CoercionChecker(TypeChecker):
"""Type checker that performs argument transformations.
The `fs` argument should be a varargs of 2-tuples that each contain a
TypeChecker and a lambda function, e.g.:
((only(int), lambda x: x * 2),
sequenceof(int), lambda x: x[0]))
"""
def __init__(self, *fs):
self.fs = fs
super(CoercionChecker, self).__init__()
def check(self, x, caller, param):
for tc, f in self.fs:
try:
return f(tc.check(x, caller, param))
except TypecheckFailure:
pass
raise TypecheckFailure
def expects(self):
return '(' + ' or '.join([c.expects() for c, _ in self.fs]) + ')'
class AnyFuncChecker(TypeChecker):
def __init__(self):
super(AnyFuncChecker, self).__init__()
def check(self, x, caller, param):
if not callable(x):
raise TypecheckFailure
return x
def expects(self):
return 'function'
class FunctionChecker(TypeChecker):
def __init__(self, nargs, ret_checker):
self.nargs = nargs
self.ret_checker = ret_checker
super(FunctionChecker, self).__init__()
def check(self, x, caller, param):
if not callable(x):
raise TypecheckFailure
spec = inspect.signature(x)
if not len(spec.parameters) == self.nargs:
raise TypecheckFailure
def f(*args):
ret = x(*args)
try:
return self.ret_checker.check(ret, caller, param)
except TypecheckFailure:
raise TypeError("'{caller}': '{param}': expected return type {expected}, found {found}".format(
caller=caller,
param=param,
expected=self.ret_checker.expects(),
found=self.ret_checker.format(ret)
))
return f
def expects(self):
return '{}-argument function'.format(self.nargs)
def format(self, arg):
if not callable(arg):
return super(FunctionChecker, self).format(arg)
spec = inspect.getfullargspec(arg)
return '{}-argument function'.format(len(spec.args))
def only(t):
if isinstance(t, type):
return LiteralChecker(t)
elif callable(t):
return DeferredChecker(t)
elif isinstance(t, TypeChecker):
return t
else:
raise RuntimeError("invalid typecheck signature: expected 'type', 'lambda', or 'TypeChecker', found '%s'" % type(t))
def exactly(v, reference_equality=False):
return ExactlyTypeChecker(v, reference_equality)
def oneof(*args):
return MultipleTypeChecker([only(x) for x in args])
def enumeration(*args):
return MultipleTypeChecker([exactly(x) for x in args])
def nullable(t):
return oneof(exactly(None, reference_equality=True), t)
def sequenceof(t):
return SequenceChecker(only(t))
def tupleof(t):
return TupleChecker(only(t))
def sized_tupleof(*args):
return SizedTupleChecker(*[only(x) for x in args])
def sliceof(startt, stopt, stept):
return SliceChecker(only(startt), only(stopt), only(stept))
def linked_list(t):
return LinkedListChecker(t)
def setof(t):
return SetChecker(only(t))
def dictof(k, v):
return DictChecker(only(k), only(v))
def func_spec(n, tc):
return FunctionChecker(n, only(tc))
anyfunc = AnyFuncChecker()
def transformed(*tcs):
fs = []
for tc, f in tcs:
tc = only(tc)
fs.append((tc, f))
return CoercionChecker(*fs)
def lazy():
return LazyChecker()
anytype = AnyChecker()
numeric = oneof(int, float)
char = CharChecker()
table_key_type = nullable(
oneof(
transformed((str, lambda x: [x])),
sequenceof(str)))
def get_signature(f) -> inspect.Signature:
if hasattr(f, '__memo'):
return f.__memo
else:
signature = inspect.signature(f)
f.__memo = signature
return signature
def check_meta(f, checks, is_method):
if hasattr(f, '__checked'):
return
else:
spec = get_signature(f)
params = list(spec.parameters)
if is_method:
params = params[1:]
signature_namespace = set(params)
tc_namespace = set(checks.keys())
# ensure that the typecheck signature is appropriate and matches the function signature
if signature_namespace != tc_namespace:
unmatched_tc = list(tc_namespace - signature_namespace)
unmatched_sig = list(signature_namespace - tc_namespace)
if unmatched_sig or unmatched_tc:
msg = ''
if unmatched_tc:
msg += 'unmatched typecheck arguments: %s' % unmatched_tc
if unmatched_sig:
if msg:
msg += ', and '
msg += 'function parameters with no defined type: %s' % unmatched_sig
raise RuntimeError('%s: invalid typecheck signature: %s' % (f.__name__, msg))
f.__checked = True
def check_all(f, args, kwargs, checks, is_method):
spec = get_signature(f)
check_meta(f, checks, is_method)
name = f.__name__
arg_list = list(args)
args_ = []
kwargs_ = {}
has_varargs = any(param.kind == param.VAR_POSITIONAL for param in spec.parameters.values())
n_pos_args = len(
list(filter(
lambda p: p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD),
spec.parameters.values())))
if not has_varargs and len(args) > n_pos_args:
raise TypeError(f"'{name}' takes {n_pos_args} positional arguments, found {len(args)}")
for i, (arg_name, param) in enumerate(spec.parameters.items()):
if i == 0 and is_method:
if not isinstance(arg_list[0], object):
raise RuntimeError("no class found as first argument. Did you mean to use 'typecheck' "
"instead of 'typecheck_method'?")
args_.append(args[i])
continue
checker = checks[arg_name]
assert isinstance(param, inspect.Parameter)
keyword_passed_as_positional = param.kind == param.POSITIONAL_OR_KEYWORD and i < len(args)
necessarily_positional = param.kind == param.POSITIONAL_ONLY
if necessarily_positional or keyword_passed_as_positional:
if i >= len(args):
raise TypeError(
f'Expected {n_pos_args} positional arguments, found {len(args)}')
args_.append(arg_check(args[i], name, arg_name, checker))
elif param.kind in (param.KEYWORD_ONLY, param.POSITIONAL_OR_KEYWORD):
arg = kwargs.pop(arg_name, param.default)
if arg is inspect._empty:
raise TypeError(
f"{name}() missing required keyword-only argument '{arg_name}'")
kwargs_[arg_name] = arg_check(arg, name, arg_name, checker)
elif param.kind == param.VAR_POSITIONAL:
# consume the rest of the positional arguments
varargs = args[i:]
for j, arg in enumerate(varargs):
args_.append(args_check(arg, name, arg_name, j, len(varargs), checker))
else:
assert param.kind == param.VAR_KEYWORD
# kwargs now holds all variable kwargs
for kwarg_name, arg in kwargs.items():
kwargs_[kwarg_name] = kwargs_check(arg, name, kwarg_name, checker)
return args_, kwargs_
def typecheck_method(**checkers):
return _make_dec(checkers, is_method=True)
def typecheck(**checkers):
return _make_dec(checkers, is_method=False)
def _make_dec(checkers, is_method):
checkers = {k: only(v) for k, v in checkers.items()}
@decorator
def wrapper(__original_func, *args, **kwargs):
args_, kwargs_ = check_all(__original_func, args, kwargs, checkers, is_method=is_method)
return __original_func(*args_, **kwargs_)
return wrapper
def arg_check(arg, function_name: str, arg_name: str, checker: TypeChecker):
try:
return checker.check(arg, function_name, arg_name)
except TypecheckFailure as e:
raise TypeError("{fname}: parameter '{argname}': "
"expected {expected}, found {found}".format(
fname=function_name,
argname=arg_name,
expected=checker.expects(),
found=checker.format(arg)
)) from e
def args_check(arg,
function_name: str,
arg_name: str,
index: int,
total_varargs: int,
checker: TypeChecker):
try:
return checker.check(arg, function_name, arg_name)
except TypecheckFailure as e:
raise TypeError("{fname}: parameter '*{argname}' (arg {idx} of {tot}): "
"expected {expected}, found {found}".format(
fname=function_name,
argname=arg_name,
idx=index,
tot=total_varargs,
expected=checker.expects(),
found=checker.format(arg)
)) from e
def kwargs_check(arg, function_name: str, kwarg_name: str, checker: TypeChecker):
try:
return checker.check(arg, function_name, kwarg_name)
except TypecheckFailure as e:
raise TypeError("{fname}: keyword argument '{argname}': "
"expected {expected}, found {found}".format(
fname=function_name,
argname=kwarg_name,
expected=checker.expects(),
found=checker.format(arg))) from e
|
sportsipy/decorators.py | MArtinherz/sportsipy | 221 | 11131570 | <reponame>MArtinherz/sportsipy<filename>sportsipy/decorators.py
from functools import wraps
def int_property_decorator(func):
@property
@wraps(func)
def wrapper(*args):
value = func(*args)
try:
return int(value)
except (TypeError, ValueError):
# If there is no value, default to None. None is statistically
# different from 0 as a player/team who played an entire game and
# contributed nothing is different from one who didn't play at all.
# This enables flexibility for end-users to decide whether they
# want to fill the empty value with any specific number (such as 0
# or an average/median for the category) or keep it empty depending
# on their use-case.
return None
return wrapper
def float_property_decorator(func):
@property
@wraps(func)
def wrapper(*args):
value = func(*args)
try:
return float(value)
except (TypeError, ValueError):
# If there is no value, default to None. None is statistically
# different from 0 as a player/team who played an entire game and
# contributed nothing is different from one who didn't play at all.
# This enables flexibility for end-users to decide whether they
# want to fill the empty value with any specific number (such as 0
# or an average/median for the category) or keep it empty depending
# on their use-case.
return None
return wrapper
|
tests/graphics/test_base.py | paradoxcell/jcvi | 517 | 11131592 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import pytest
@pytest.mark.parametrize(
"s,expected",
[
("a short name", "a short name"),
(
"a really really long name for you a really really long name for you",
"a really...e for you",
),
("These colors look lovely together", "These co... together"),
],
)
def test_shorten(s, expected):
from jcvi.graphics.base import shorten
assert shorten(s) == expected, "Expect {}".format(expected)
@pytest.mark.parametrize(
"s,expected",
[
("grape_grape vs peach_peach", "grape\_grape vs peach\_peach"),
],
)
def test_latex(s, expected):
from jcvi.graphics.base import latex
assert latex(s) == expected, "Expect {}".format(expected)
|
tests/reraise.py | ZYAZP/python2 | 1,062 | 11131615 | <gh_stars>1000+
try:
try:
0/0
except Exception:
raise
except Exception as e:
print repr(e)
|
py_eureka_client/netint_utils.py | dev-89/python-eureka-client | 143 | 11131619 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
Copyright (c) 2018 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import socket
from typing import Tuple
from ifaddr import get_adapters
import ipaddress
from py_eureka_client.logger import get_logger
_logger = get_logger("netint_utils")
def get_host_by_ip(ip):
try:
return socket.gethostbyaddr(ip)[0]
except:
_logger.warn("Error when getting host by ip", exc_info=True)
return ip
def get_ip_by_host(host):
try:
return socket.gethostbyname(host)
except:
_logger.warn("Error when getting ip by host", exc_info=True)
return host
def get_first_non_loopback_ip(network: str = "") -> str:
adapters = get_adapters()
for adapter in adapters:
for iface in adapter.ips:
if iface.is_IPv4:
_ip = iface.ip
if network:
if ipaddress.ip_address(_ip) in ipaddress.ip_network(network):
return _ip
elif _ip != "127.0.0.1":
return _ip
return ""
def get_ip_and_host(network: str = "") -> Tuple[str, str]:
ip = get_first_non_loopback_ip(network=network)
if not ip:
host = socket.gethostname()
ip = socket.gethostbyname(host)
else:
host = get_host_by_ip(ip)
return ip, host
|
blacklist/custom.py | yilang0001/gfw_whitelist | 3,462 | 11131622 | #!/usr/bin/python
# -*- coding: utf-8 -*-
def getlist():
liststr = """
cloudfront.net
googlecode.com
verisign.com
qpic.cn
translate-tab.com
layervault.com
list-manage.com
goagent.co
goo.gl
"""
return set(liststr.splitlines(False))
|
bin/snn.py | ppmdatix/rtdl | 298 | 11131631 | <filename>bin/snn.py
# %%
import math
import typing as ty
from pathlib import Path
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import zero
from torch import Tensor
import lib
# %%
class SNN(nn.Module):
def __init__(
self,
*,
d_in: int,
d_layers: ty.List[int],
dropout: float,
d_out: int,
categories: ty.Optional[ty.List[int]],
d_embedding: int,
) -> None:
super().__init__()
assert d_layers
if categories is not None:
d_in += len(categories) * d_embedding
category_offsets = torch.tensor([0] + categories[:-1]).cumsum(0)
self.register_buffer('category_offsets', category_offsets)
self.category_embeddings = nn.Embedding(sum(categories), d_embedding)
nn.init.kaiming_uniform_(self.category_embeddings.weight, a=math.sqrt(5))
print(f'{self.category_embeddings.weight.shape=}')
self.layers = (
nn.ModuleList(
[
nn.Linear(d_layers[i - 1] if i else d_in, x)
for i, x in enumerate(d_layers)
]
)
if d_layers
else None
)
self.normalizations = None
self.activation = nn.SELU()
self.dropout = dropout
self.head = nn.Linear(d_layers[-1] if d_layers else d_in, d_out)
# Ensure correct initialization
def init_weights(m):
if isinstance(m, nn.Linear):
nn.init.kaiming_normal_(
m.weight.data, mode='fan_in', nonlinearity='linear'
)
nn.init.zeros_(m.bias.data)
self.apply(init_weights)
@property
def d_embedding(self) -> int:
return self.head.id_in # type: ignore[code]
def encode(self, x_num, x_cat):
x = []
if x_num is not None:
x.append(x_num)
if x_cat is not None:
x.append(
self.category_embeddings(x_cat + self.category_offsets[None]).view(
x_cat.size(0), -1
)
)
x = torch.cat(x, dim=-1)
layers = self.layers or []
for i, m in enumerate(layers):
x = m(x)
if self.normalizations:
x = self.normalizations[i](x)
x = self.activation(x)
if self.dropout:
x = F.alpha_dropout(x, self.dropout, self.training)
return x
def calculate_output(self, x: Tensor) -> Tensor:
x = self.head(x)
x = x.squeeze(-1)
return x
def forward(self, x_num: Tensor, x_cat) -> Tensor:
return self.calculate_output(self.encode(x_num, x_cat))
# %%
args, output = lib.load_config()
# %%
zero.set_randomness(args['seed'])
dataset_dir = lib.get_path(args['data']['path'])
stats: ty.Dict[str, ty.Any] = {
'dataset': dataset_dir.name,
'algorithm': Path(__file__).stem,
**lib.load_json(output / 'stats.json'),
}
timer = zero.Timer()
timer.run()
D = lib.Dataset.from_dir(dataset_dir)
X = D.build_X(
normalization=args['data'].get('normalization'),
num_nan_policy='mean',
cat_nan_policy='new',
cat_policy=args['data'].get('cat_policy', 'indices'),
cat_min_frequency=args['data'].get('cat_min_frequency', 0.0),
seed=args['seed'],
)
if not isinstance(X, tuple):
X = (X, None)
zero.set_randomness(args['seed'])
Y, y_info = D.build_y(args['data'].get('y_policy'))
lib.dump_pickle(y_info, output / 'y_info.pickle')
X = tuple(None if x is None else lib.to_tensors(x) for x in X)
Y = lib.to_tensors(Y)
device = lib.get_device()
if device.type != 'cpu':
X = tuple(None if x is None else {k: v.to(device) for k, v in x.items()} for x in X)
Y_device = {k: v.to(device) for k, v in Y.items()}
else:
Y_device = Y
X_num, X_cat = X
if not D.is_multiclass:
Y_device = {k: v.float() for k, v in Y_device.items()}
train_size = D.size(lib.TRAIN)
batch_size, epoch_size = (
stats['batch_size'],
stats['epoch_size'],
) = lib.get_epoch_parameters(train_size, args['training'].get('batch_size', 'v3'))
loss_fn = (
F.binary_cross_entropy_with_logits
if D.is_binclass
else F.cross_entropy
if D.is_multiclass
else F.mse_loss
)
args['model'].setdefault('d_embedding', None)
model = SNN(
d_in=0 if X_num is None else X_num['train'].shape[1],
d_out=D.info['n_classes'] if D.is_multiclass else 1,
categories=lib.get_categories(X_cat),
**args['model'],
).to(device)
stats['n_parameters'] = lib.get_n_parameters(model)
optimizer = lib.make_optimizer(
args['training']['optimizer'],
model.parameters(),
args['training']['lr'],
args['training']['weight_decay'],
)
stream = zero.Stream(lib.IndexLoader(train_size, batch_size, True, device))
progress = zero.ProgressTracker(args['training']['patience'])
training_log = {lib.TRAIN: [], lib.VAL: [], lib.TEST: []}
timer = zero.Timer()
checkpoint_path = output / 'checkpoint.pt'
def print_epoch_info():
print(f'\n>>> Epoch {stream.epoch} | {lib.format_seconds(timer())} | {output}')
print(
' | '.join(
f'{k} = {v}'
for k, v in {
'lr': lib.get_lr(optimizer),
'batch_size': batch_size,
'epoch_size': stats['epoch_size'],
'n_parameters': stats['n_parameters'],
}.items()
)
)
@torch.no_grad()
def predict(m, part):
m.eval()
return torch.cat(
[
model(
None if X_num is None else X_num[part][idx],
None if X_cat is None else X_cat[part][idx],
)
for idx in lib.IndexLoader(
D.size(part), args['training']['eval_batch_size'], False, device
)
]
).cpu()
@torch.no_grad()
def evaluate(parts):
model.eval()
metrics = {}
predictions = {}
for part in parts:
predictions[part] = (
torch.cat(
[
model(
None if X_num is None else X_num[part][idx],
None if X_cat is None else X_cat[part][idx],
)
for idx in lib.IndexLoader(
D.size(part),
args['training']['eval_batch_size'],
False,
device,
)
]
)
.cpu()
.numpy()
)
try:
metrics[part] = lib.calculate_metrics(
D.info['task_type'],
Y[part].numpy(), # type: ignore[code]
predictions[part], # type: ignore[code]
'logits',
y_info,
)
except ValueError:
metrics[part] = {'score': -999999999.0}
for part, part_metrics in metrics.items():
print(f'[{part:<5}]', lib.make_summary(part_metrics))
return metrics, predictions
def save_checkpoint(final):
torch.save(
{
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'stream': stream.state_dict(),
'random_state': zero.get_random_state(),
**{
x: globals()[x]
for x in [
'progress',
'stats',
'timer',
'training_log',
]
},
},
checkpoint_path,
)
lib.dump_stats(stats, output, final)
lib.backup_output(output)
# %%
timer.run()
for epoch in stream.epochs(args['training']['n_epochs']):
print_epoch_info()
model.train()
epoch_losses = []
for batch_idx in epoch:
optimizer.zero_grad()
loss = loss_fn(
model(
None if X_num is None else X_num[lib.TRAIN][batch_idx],
None if X_cat is None else X_cat[lib.TRAIN][batch_idx],
),
Y_device[lib.TRAIN][batch_idx],
)
loss.backward()
optimizer.step()
epoch_losses.append(loss.detach())
epoch_losses = torch.stack(epoch_losses).tolist()
training_log[lib.TRAIN].extend(epoch_losses)
print(f'[{lib.TRAIN}] loss = {round(sum(epoch_losses) / len(epoch_losses), 3)}')
metrics, predictions = evaluate([lib.VAL, lib.TEST])
for k, v in metrics.items():
training_log[k].append(v)
progress.update(metrics[lib.VAL]['score'])
if progress.success:
print('New best epoch!')
stats['best_epoch'] = stream.epoch
stats['metrics'] = metrics
save_checkpoint(False)
for k, v in predictions.items():
np.save(output / f'p_{k}.npy', v)
elif progress.fail:
break
# %%
print('\nRunning the final evaluation...')
model.load_state_dict(torch.load(checkpoint_path)['model'])
stats['metrics'], predictions = evaluate(lib.PARTS)
for k, v in predictions.items():
np.save(output / f'p_{k}.npy', v)
stats['time'] = lib.format_seconds(timer())
save_checkpoint(True)
print('Done!')
|
tests/integration/test_org_it.py | corylevine/okta-sdk-python | 145 | 11131632 | import copy
import pytest
from datetime import datetime
from tests.mocks import MockOktaClient
from okta.models import (OrgSetting,
OrgContactType,
OrgContactTypeObj,
OrgContactUser,
OrgPreferences,
OrgOktaCommunicationSetting,
OrgOktaSupportSettingsObj,
OrgOktaSupportSetting,
UserIdString)
class TestOrgResource:
"""
Integration Tests for the Org Resource
"""
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_get_org_settings(self, fs):
client = MockOktaClient(fs)
org_settings, _, err = await client.get_org_settings()
assert err is None
assert isinstance(org_settings, OrgSetting)
assert org_settings.status == 'ACTIVE'
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_partial_setting_update(self, fs):
client = MockOktaClient(fs)
get_org_settings, _, err = await client.get_org_settings()
assert err is None
updated_setting = {'supportPhoneNumber': '1234567890'}
try:
updated_org_settings, _, err = await client.partial_update_org_setting(updated_setting)
assert err is None
assert updated_org_settings.support_phone_number == '1234567890'
finally:
updated_setting = {'supportPhoneNumber': get_org_settings.support_phone_number}
updated_org_settings, _, err = await client.partial_update_org_setting(updated_setting, keep_empty_params=True)
assert err is None
assert updated_org_settings.support_phone_number == get_org_settings.support_phone_number
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_setting_update(self, fs):
client = MockOktaClient(fs)
get_org_settings, resp, err = await client.get_org_settings()
new_org_settings = copy.deepcopy(get_org_settings)
new_org_settings.support_phone_number = '1234567890'
new_org_settings.company_name = 'NewOrgName'
try:
updated_org_settings, _, err = await client.update_org_setting(new_org_settings)
assert err is None
assert updated_org_settings.support_phone_number == '1234567890'
assert updated_org_settings.company_name == 'NewOrgName'
finally:
updated_org_settings, _, err = await client.update_org_setting(get_org_settings, keep_empty_params=True)
assert err is None
assert updated_org_settings.support_phone_number == get_org_settings.support_phone_number
assert updated_org_settings.company_name == get_org_settings.company_name
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_get_org_contact_types(self, fs):
client = MockOktaClient(fs)
org_contact_types, _, err = await client.get_org_contact_types()
assert err is None
for item in org_contact_types:
assert isinstance(item, OrgContactTypeObj)
assert isinstance(item.contact_type, OrgContactType)
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_get_org_contact_user(self, fs):
client = MockOktaClient(fs)
contact_type = OrgContactType('BILLING')
org_contact_user, _, err = await client.get_org_contact_user(contact_type)
assert err is None
assert isinstance(org_contact_user, OrgContactUser)
assert org_contact_user.user_id is not None
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_update_org_contact_user(self, fs):
client = MockOktaClient(fs)
contact_type = OrgContactType('BILLING')
org_contact_user, _, err = await client.get_org_contact_user(contact_type)
assert err is None
new_contact_type = OrgContactType('TECHNICAL')
try:
updated_user, _, err = await client.update_org_contact_user(new_contact_type,
UserIdString({'userId': org_contact_user.user_id}))
assert err is None
finally:
updated_user, _, err = await client.update_org_contact_user(contact_type,
UserIdString({'userId': org_contact_user.user_id}))
assert err is None
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_get_org_preferences(self, fs):
client = MockOktaClient(fs)
org_preferences, _, err = await client.get_org_preferences()
assert err is None
assert isinstance(org_preferences, OrgPreferences)
assert isinstance(org_preferences.show_end_user_footer, bool)
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_hide_okta_ui_footer(self, fs):
client = MockOktaClient(fs)
org_preferences, _, err = await client.hide_okta_ui_footer()
assert err is None
assert isinstance(org_preferences, OrgPreferences)
assert not org_preferences.show_end_user_footer
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_show_okta_ui_footer(self, fs):
client = MockOktaClient(fs)
org_preferences, _, err = await client.show_okta_ui_footer()
assert err is None
assert isinstance(org_preferences, OrgPreferences)
assert org_preferences.show_end_user_footer
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_get_okta_communication_settings(self, fs):
client = MockOktaClient(fs)
org_communication_setting, _, err = await client.get_okta_communication_settings()
assert err is None
assert isinstance(org_communication_setting, OrgOktaCommunicationSetting)
assert isinstance(org_communication_setting.opt_out_email_users, bool)
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_opt_in_users_to_okta_communication_emails(self, fs):
client = MockOktaClient(fs)
org_communication_setting, _, err = await client.opt_in_users_to_okta_communication_emails()
assert err is None
assert isinstance(org_communication_setting, OrgOktaCommunicationSetting)
assert not org_communication_setting.opt_out_email_users
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_opt_out_users_from_okta_communication_emails(self, fs):
client = MockOktaClient(fs)
org_communication_setting, _, err = await client.opt_out_users_from_okta_communication_emails()
assert err is None
assert isinstance(org_communication_setting, OrgOktaCommunicationSetting)
assert org_communication_setting.opt_out_email_users
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_get_okta_support_settings(self, fs):
client = MockOktaClient(fs)
org_okta_support_setting, _, err = await client.get_org_okta_support_settings()
assert err is None
assert isinstance(org_okta_support_setting, OrgOktaSupportSettingsObj)
assert isinstance(org_okta_support_setting.support, OrgOktaSupportSetting)
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_grant_revoke_okta_support(self, fs):
client = MockOktaClient(fs)
try:
org_okta_support_setting, _, err = await client.grant_okta_support()
assert err is None
assert isinstance(org_okta_support_setting, OrgOktaSupportSettingsObj)
assert isinstance(org_okta_support_setting.support, OrgOktaSupportSetting)
assert org_okta_support_setting.support == 'ENABLED'
finally:
org_okta_support_setting, _, err = await client.revoke_okta_support()
assert err is None
assert isinstance(org_okta_support_setting, OrgOktaSupportSettingsObj)
assert isinstance(org_okta_support_setting.support, OrgOktaSupportSetting)
assert org_okta_support_setting.support == 'DISABLED'
@pytest.mark.vcr()
@pytest.mark.asyncio
async def test_extend_okta_support(self, fs):
client = MockOktaClient(fs)
try:
org_okta_support_setting, _, err = await client.grant_okta_support()
assert err is None
assert org_okta_support_setting.support == 'ENABLED'
extended_org_okta_support_setting, _, err = await client.extend_okta_support()
assert err is None
date1 = datetime.strptime(org_okta_support_setting.expiration, "%Y-%m-%dT%H:%M:%S.%fZ")
date2 = datetime.strptime(extended_org_okta_support_setting.expiration, "%Y-%m-%dT%H:%M:%S.%fZ")
# should be 24h
assert round((date2 - date1).total_seconds() / 3600) == 24
finally:
org_okta_support_setting, _, err = await client.revoke_okta_support()
assert err is None
|
frelatage/report/report.py | Rog3rSm1th/Frelatage | 130 | 11131644 | <reponame>Rog3rSm1th/Frelatage
from typing import Any
class Report:
def __init__(
self,
trace: Any,
input: list,
error: bool,
timeout: bool,
new_error_instruction: bool,
reached_instructions_count: int,
new_reached_instructions_count: int,
instructions_pairs_count: int,
new_instructions_pairs_count: int,
) -> None:
"""
Report of the behavior of a function with given input
"""
self.trace = trace
self.input = input
self.error = error
self.timeout = timeout
self.new_error_instruction = new_error_instruction
self.reached_instructions_count = reached_instructions_count
self.new_reached_instructions_count = new_reached_instructions_count
self.instructions_pairs_count = instructions_pairs_count
self.new_instructions_pairs_count = new_instructions_pairs_count
self.score = self.compute_score()
def compute_score(self) -> int:
"""
The score of a report is used to select the most efficient inputs for the fuzzer's genetic algorithm mutations.
The parameters are : the number of instructions reached, the number of new instructions reached, the number of
pairs of instructions reached, the occurrence of an error/timeout.
"""
result = (
self.reached_instructions_count
+ self.new_reached_instructions_count
+ self.instructions_pairs_count
+ self.new_instructions_pairs_count
)
return result
def __eq__(self, other):
"""
Reports are sorted by score
"""
return self.score == other.score
def __lt__(self, other):
"""
Reports are sorted by score
"""
return self.score < other.score
|
tests/commands/conftest.py | jaromil/bigchaindb | 4,196 | 11131653 | <reponame>jaromil/bigchaindb
# Copyright © 2020 Interplanetary Database Association e.V.,
# BigchainDB and IPDB software contributors.
# SPDX-License-Identifier: (Apache-2.0 AND CC-BY-4.0)
# Code is Apache-2.0 and docs are CC-BY-4.0
from argparse import Namespace
import pytest
@pytest.fixture
def mock_run_configure(monkeypatch):
from bigchaindb.commands import bigchaindb
monkeypatch.setattr(bigchaindb, 'run_configure', lambda *args, **kwargs: None)
@pytest.fixture
def mock_write_config(monkeypatch):
from bigchaindb import config_utils
monkeypatch.setattr(config_utils, 'write_config', lambda *args: None)
@pytest.fixture
def mock_db_init_with_existing_db(monkeypatch):
from bigchaindb.commands import bigchaindb
monkeypatch.setattr(bigchaindb, '_run_init', lambda: None)
@pytest.fixture
def mock_processes_start(monkeypatch):
from bigchaindb import start
monkeypatch.setattr(start, 'start', lambda *args: None)
@pytest.fixture
def mock_generate_key_pair(monkeypatch):
monkeypatch.setattr('bigchaindb.common.crypto.generate_key_pair', lambda: ('privkey', 'pubkey'))
@pytest.fixture
def mock_bigchaindb_backup_config(monkeypatch):
config = {
'database': {'host': 'host', 'port': 12345, 'name': 'adbname'},
}
monkeypatch.setattr('bigchaindb._config', config)
@pytest.fixture
def run_start_args(request):
param = getattr(request, 'param', {})
return Namespace(
config=param.get('config'),
skip_initialize_database=param.get('skip_initialize_database', False),
)
@pytest.fixture
def mocked_setup_logging(mocker):
return mocker.patch(
'bigchaindb.log.setup_logging',
autospec=True,
spec_set=True,
)
|
mozillians/common/monkeypatches.py | divyamoncy/mozillians | 202 | 11131654 | # Code based on funfactory with some modification in order to work with django 1.7
# https://github.com/mozilla/funfactory/blob/master/funfactory/monkeypatches.py
import logging
from django.conf import settings
__all__ = ['patch']
# Idempotence! http://en.wikipedia.org/wiki/Idempotence
_has_patched = False
def patch():
global _has_patched
if _has_patched:
return
# Monkey-patch Django's csrf_protect decorator to use session-based CSRF
# tokens:
if 'session_csrf' in settings.INSTALLED_APPS:
import session_csrf
session_csrf.monkeypatch()
logging.debug("Note: funfactory monkey patches executed in %s" % __file__)
# prevent it from being run again later
_has_patched = True
|
docs/conf.py | eendebakpt/symengine.py | 133 | 11131669 | <filename>docs/conf.py
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
import symengine
# -- Project information -----------------------------------------------------
project = 'symengine'
copyright = '2021, SymEngine development team <<EMAIL>>'
author = 'SymEngine development team <<EMAIL>>'
# The full version, including alpha/beta/rc tags
release = symengine.__version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc", # Consumes docstrings
"sphinx.ext.napoleon", # Allows for Google Style Docs
"sphinx.ext.viewcode", # Links to source code
"sphinx.ext.intersphinx", # Connects to other documentation
"sphinx.ext.todo", # Show TODO details
"sphinx.ext.imgconverter", # Handle svg images
"sphinx.ext.duration", # Shows times in the processing pipeline
"sphinx.ext.mathjax", # Need math support
"sphinx.ext.githubpages", # Puts the .nojekyll and CNAME files
"sphinxcontrib.apidoc", # Automatically sets up sphinx-apidoc
# "recommonmark", # Parses markdown
"m2r2", # Parses markdown in rst
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# API Doc settings
apidoc_module_dir = "../"
apidoc_output_dir = "source"
apidoc_excluded_paths = ["tests"]
apidoc_separate_modules = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_book_theme"
html_title = "Symengine Python Bindings"
# html_logo = "path/to/logo.png"
# html_favicon = "path/to/favicon.ico"
html_theme_options = {
"repository_url": "https://github.com/symengine/symengine.py",
"use_repository_button": True,
"use_issues_button": True,
"use_edit_page_button": True,
"path_to_docs": "docs",
"use_download_button": True,
"home_page_in_toc": True
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
util/detr_weights_to_vistr.py | lukadvisor/VisTR | 646 | 11131687 | '''
convert detr pretrained weights to vistr format
'''
import sys
import torch
import collections
if __name__ == "__main__":
input_path = sys.argv[1]
detr_weights = torch.load(input_path)['model']
vistr_weights = collections.OrderedDict()
for k,v in detr_weights.items():
if k.startswith("detr"):
k = k.replace("detr","vistr")
vistr_weights[k]=v
res = {"model":vistr_weights}
torch.save(res,sys.argv[2])
|
simple_settings/dynamic_settings/database_reader.py | pichatelli/simple-settings | 213 | 11131707 | <filename>simple_settings/dynamic_settings/database_reader.py
from .base import BaseReader
try:
from sqlalchemy import Column, String, engine_from_config
from sqlalchemy.ext.declarative import as_declarative
from sqlalchemy.orm import sessionmaker
except ImportError: # pragma: no cover
raise ImportError(
'To use "database" dynamic settings reader\n'
'you need to install simple-settings with database dependency:\n'
'pip install simple-settings[database] or pip install SQLAlchemy'
)
@as_declarative()
class Base:
"""Base class for declarative class definitions
"""
class SimpleSettings(Base):
"""Database table representation
"""
__tablename__ = 'simple_settings'
key = Column(String, primary_key=True)
value = Column(String)
class DatabaseOperations:
"""Wrapper for database operations
"""
def __init__(self, database_config):
self.db = engine_from_config(database_config)
self.session = sessionmaker(bind=self.db)()
Base.metadata.create_all(self.db)
def _get(self, key):
return self.session.query(SimpleSettings).get(key)
def set(self, key, value):
setting = self._get(key)
if not setting:
setting = SimpleSettings(key=key)
setting.value = value
self.session.add(setting)
self.session.commit()
def get(self, key):
data = self._get(key)
if data:
return data.value
def flush(self):
self.session.query(SimpleSettings).delete()
self.session.commit()
class Reader(BaseReader):
"""
Database settings Reader
A simple orm using getter
"""
_default_conf = {
'sqlalchemy.url': 'sqlite:///:memory:'
}
def __init__(self, conf):
super(Reader, self).__init__(conf)
self.db = DatabaseOperations(self.conf)
def _get(self, key):
return self.db.get(key)
def _set(self, key, value):
self.db.set(key, value)
|
src/python/txtai/pipeline/tensors.py | malywonsz/txtai | 1,893 | 11131710 | <reponame>malywonsz/txtai<filename>src/python/txtai/pipeline/tensors.py
"""
Tensor processing framework module
"""
import torch
from .base import Pipeline
class Tensors(Pipeline):
"""
Pipeline backed by a tensor processing framework. Currently supports PyTorch.
"""
def quantize(self, model):
"""
Quantizes input model and returns. This only is supported for CPU devices.
Args:
model: torch model
Returns:
quantized torch model
"""
# pylint: disable=E1101
return torch.quantization.quantize_dynamic(model, {torch.nn.Linear}, dtype=torch.qint8)
def tensor(self, data):
"""
Creates a tensor array.
Args:
data: input data
Returns:
tensor
"""
# pylint: disable=E1102
return torch.tensor(data)
def tensortype(self):
"""
Returns the tensor processing framework code.
Returns:
tensor processing framework code
"""
return "pt"
def argmax(self, data, dimension):
"""
Calls argmax on data using the tensor processing framework.
Args:
data: input data
dimension: dimension to derive argmax
Returns:
argmax
"""
# pylint: disable=E1101
return torch.argmax(data, dim=dimension)
def context(self):
"""
Defines a context used to wrap processing with the tensor processing framework.
Returns:
processing context
"""
# pylint: disable=E1101
return torch.no_grad()
|
tests/perf/test_long_cycles_nbrows_cycle_length_41000_80.py | shaido987/pyaf | 377 | 11131713 | <filename>tests/perf/test_long_cycles_nbrows_cycle_length_41000_80.py
import tests.perf.test_cycles_full_long_long as gen
gen.test_nbrows_cycle(41000 , 80)
|
922 Sort Array By Parity II.py | krishna13052001/LeetCode | 872 | 11131733 | <filename>922 Sort Array By Parity II.py
#!/usr/bin/python3
"""
Given an array A of non-negative integers, half of the integers in A are odd,
and half of the integers are even.
Sort the array so that whenever A[i] is odd, i is odd; and whenever A[i] is even
, i is even.
You may return any answer array that satisfies this condition.
Example 1:
Input: [4,2,5,7]
Output: [4,5,2,7]
Explanation: [4,7,2,5], [2,5,4,7], [2,7,4,5] would also have been accepted.
Note:
2 <= A.length <= 20000
A.length % 2 == 0
0 <= A[i] <= 1000
"""
from typing import List
class Solution:
def sortArrayByParityII(self, A: List[int]) -> List[int]:
even_idx = 0
for odd_idx in range(1, len(A), 2):
if A[odd_idx] % 2 == 0:
while A[even_idx] % 2 == 0:
even_idx += 2
A[odd_idx], A[even_idx] = A[even_idx], A[odd_idx]
return A
def sortArrayByParityII_complex(self, A: List[int]) -> List[int]:
"""
in-place two passes
"""
closed = -1
n = len(A)
for i in range(n):
if A[i] % 2 == 0:
closed += 1
A[i], A[closed] = A[closed], A[i]
j = closed + 1
if j % 2 == 1:
j += 1
for i in range(1, closed + 1, 2):
A[i], A[j] = A[j], A[i]
j += 2
return A
if __name__ == "__main__":
assert Solution().sortArrayByParityII([4,1,1,0,1,0]) == [4,1,0,1,0,1]
|
python/graphscope/nx/convert.py | LI-Mingyu/GraphScope-MY | 1,521 | 11131751 | <gh_stars>1000+
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file convert.py is referred and derived from project NetworkX,
#
# https://github.com/networkx/networkx/blob/master/networkx/convert.py
#
# which has the following license:
#
# Copyright (C) 2004-2020, NetworkX Developers
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# All rights reserved.
#
# This file is part of NetworkX.
#
# NetworkX is distributed under a BSD license; see LICENSE.txt for more
# information.
#
import warnings
import networkx.convert
from networkx.convert import from_dict_of_dicts
from networkx.convert import from_dict_of_lists
from networkx.convert import from_edgelist
from graphscope import nx
from graphscope.framework.dag_utils import arrow_to_dynamic
from graphscope.nx.utils.compat import import_as_graphscope_nx
import_as_graphscope_nx(networkx.convert)
def to_nx_graph(data, create_using=None, multigraph_input=False): # noqa: C901
"""Make a graph from a known data structure.
The preferred way to call this is automatically
from the class constructor
>>> d = {0: {1: {'weight':1}}} # dict-of-dicts single edge (0,1)
>>> G = nx.Graph(d)
instead of the equivalent
>>> G = nx.from_dict_of_dicts(d)
Parameters
----------
data : object to be converted
Current known types are:
any NetworkX graph
dict-of-dicts
dict-of-lists
container (ie set, list, tuple, iterator) of edges
Pandas DataFrame (row per edge)
numpy matrix
numpy ndarray
scipy sparse matrix
create_using : nx graph constructor, optional (default=nx.Graph)
Graph type to create. If graph instance, then cleared before populated.
multigraph_input : bool (default False)
If True and data is a dict_of_dicts,
try to create a multigraph assuming dict_of_dict_of_lists.
If data and create_using are both multigraphs then create
a multigraph from a multigraph.
"""
# networkx graph or graphscope.nx graph
if hasattr(data, "adj"):
try:
result = from_dict_of_dicts(
data.adj,
create_using=create_using,
multigraph_input=data.is_multigraph(),
)
if hasattr(data, "graph"): # data.graph should be dict-like
result.graph.update(data.graph)
if hasattr(data, "nodes"): # data.nodes should be dict-like
result.add_nodes_from(data.nodes.items())
return result
except Exception as e:
raise nx.NetworkXError("Input is not a correct NetworkX-like graph.") from e
# dict of dicts/lists
if isinstance(data, dict):
try:
return from_dict_of_dicts(
data, create_using=create_using, multigraph_input=multigraph_input
)
except Exception:
try:
return from_dict_of_lists(data, create_using=create_using)
except Exception as e:
raise TypeError("Input is not known type.") from e
# list or generator of edges
if isinstance(data, (list, tuple)) or any(
hasattr(data, attr) for attr in ["_adjdict", "next", "__next__"]
):
try:
return from_edgelist(data, create_using=create_using)
except Exception as e:
raise nx.NetworkXError("Input is not a valid edge list") from e
# Pandas DataFrame
try:
import pandas as pd
if isinstance(data, pd.DataFrame):
if data.shape[0] == data.shape[1]:
try:
return nx.from_pandas_adjacency(data, create_using=create_using)
except Exception as e:
msg = "Input is not a correct Pandas DataFrame adjacency matrix."
raise nx.NetworkXError(msg) from e
else:
try:
return nx.from_pandas_edgelist(
data, edge_attr=True, create_using=create_using
)
except Exception as e:
msg = "Input is not a correct Pandas DataFrame edge-list."
raise nx.NetworkXError(msg) from e
except ImportError:
msg = "pandas not found, skipping conversion test."
warnings.warn(msg, ImportWarning)
# numpy matrix or ndarray
try:
import numpy
if isinstance(data, (numpy.matrix, numpy.ndarray)):
try:
return nx.from_numpy_matrix(data, create_using=create_using)
except Exception as e:
raise nx.NetworkXError(
"Input is not a correct numpy matrix or array."
) from e
except ImportError:
warnings.warn("numpy not found, skipping conversion test.", ImportWarning)
# scipy sparse matrix - any format
try:
import scipy
if hasattr(data, "format"):
try:
return nx.from_scipy_sparse_matrix(data, create_using=create_using)
except Exception as e:
raise nx.NetworkXError(
"Input is not a correct scipy sparse matrix type."
) from e
except ImportError:
warnings.warn("scipy not found, skipping conversion test.", ImportWarning)
raise nx.NetworkXError("Input is not a known data type for conversion.")
def to_networkx_graph(nx_graph):
import networkx
if not nx_graph.is_directed() and not nx_graph.is_multigraph():
g = networkx.Graph()
edges = nx_graph.edges.data()
elif nx_graph.is_directed() and not nx_graph.is_multigraph():
g = networkx.DiGraph()
edges = nx_graph.edges.data()
elif not nx_graph.is_directed() and nx_graph.is_multigraph():
g = networkx.MultiGraph()
edges = nx_graph.edges.data(keys=True)
else:
g = networkx.MultiDiGraph()
edges = nx_graph.edges.data(keys=True)
nodes = nx_graph.nodes.data()
g.update(edges, nodes)
g.graph.update(nx_graph.graph)
return g
|
Cogs/PciUsb.py | cheesycod/CorpBot.py | 368 | 11131755 | <reponame>cheesycod/CorpBot.py
import discord
from discord.ext import commands
from Cogs import DL
from Cogs import Message
def setup(bot):
# Add the bot
bot.add_cog(PciUsb(bot))
class PciUsb(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot):
self.bot = bot
@commands.command(pass_context=True)
async def pci(self, ctx, ven_dev = None):
"""Searches pci-ids.ucw.cz for the passed PCI ven:dev id."""
if not ven_dev:
await ctx.send("Usage: `{}pci vvvv:dddd` where `vvvv` is the vendor id, and `dddd` is the device id.".format(ctx.prefix))
return
try:
v,i = ven_dev.split(":")
except:
await ctx.send("Usage: `{}pci vvvv:dddd` where `vvvv` is the vendor id, and `dddd` is the device id.".format(ctx.prefix))
return
if not (len(v)==len(i)==4):
await ctx.send("Usage: `{}pci vvvv:dddd` where `vvvv` is the vendor id, and `dddd` is the device id.".format(ctx.prefix))
return
if not v.isalnum() and not i.isalnum():
await ctx.send("Ven and dev ids must be alphanumeric.")
return
url = "http://pci-ids.ucw.cz/read/PC/{}".format(v)
try:
html = await DL.async_text(url)
except:
await ctx.send("No data returned.")
return
vendor = None
for line in html.split("\n"):
if '<div class="name">' in line:
try:
vendor = line.split("Name: ")[1].split("<")[0].replace("&","&").replace(""",'"').replace("'","'").replace(">",">").replace("<","<")
break
except:
pass
vendor = v if not vendor else vendor
url = "http://pci-ids.ucw.cz/read/PC/{}/{}".format(v,i)
try:
html = await DL.async_text(url)
except:
await ctx.send("No data returned.")
return
out = ""
for line in html.split("\n"):
if "itemname" in line.lower():
out += "Name: ".join(line.split("Name: ")[1:]).replace("&","&").replace(""",'"').replace("'","'").replace(">",">").replace("<","<")
out += "\n"
if not len(out):
await ctx.send("No name found.")
return
# Got data
await Message.EmbedText(description="`{}`\n\n{}".format(ven_dev,out),title="{} PCI Device Results".format(vendor),footer="Powered by http://pci-ids.ucw.cz",color=ctx.author).send(ctx)
@commands.command(pass_context=True)
async def usb(self, ctx, ven_dev = None):
"""Searches usb-ids.gowdy.us for the passed USB ven:dev id."""
if not ven_dev:
await ctx.send("Usage: `{}usb vvvv:dddd` where `vvvv` is the vendor id, and `dddd` is the device id.".format(ctx.prefix))
return
try:
v,i = ven_dev.split(":")
except:
await ctx.send("Usage: `{}usb vvvv:dddd` where `vvvv` is the vendor id, and `dddd` is the device id.".format(ctx.prefix))
return
if not (len(v)==len(i)==4):
await ctx.send("Usage: `{}usb vvvv:dddd` where `vvvv` is the vendor id, and `dddd` is the device id.".format(ctx.prefix))
return
if not v.isalnum() and not i.isalnum():
await ctx.send("Ven and dev ids must be alphanumeric.")
return
url = "https://usb-ids.gowdy.us/read/UD/{}".format(v)
try:
html = await DL.async_text(url)
except:
await ctx.send("No data returned.")
return
vendor = None
for line in html.split("\n"):
if '<div class="name">' in line:
try:
vendor = line.split("Name: ")[1].split("<")[0].replace("&","&").replace(""",'"').replace("'","'").replace(">",">").replace("<","<")
break
except:
pass
vendor = v if not vendor else vendor
url = "https://usb-ids.gowdy.us/read/UD/{}/{}".format(v,i)
try:
html = await DL.async_text(url)
except:
await ctx.send("No data returned.")
return
out = ""
for line in html.split("\n"):
if "itemname" in line.lower():
out += "Name: ".join(line.split("Name: ")[1:]).replace("&","&").replace(""",'"').replace("'","'").replace(">",">").replace("<","<")
out += "\n"
if not len(out):
await ctx.send("No name found.")
return
# Got data
await Message.EmbedText(description="`{}`\n\n{}".format(ven_dev,out),title="{} USB Device Results".format(vendor),footer="Powered by https://usb-ids.gowdy.us",color=ctx.author).send(ctx)
|
DQM/Integration/python/clients/es_dqm_sourceclient-live_cfg.py | malbouis/cmssw | 852 | 11131783 | from __future__ import print_function
import FWCore.ParameterSet.Config as cms
import sys
process = cms.Process("ESDQM")
unitTest = False
if 'unitTest=True' in sys.argv:
unitTest=True
process.load('Configuration/StandardSequences/Services_cff')
process.load('FWCore/MessageService/MessageLogger_cfi')
process.load("FWCore.Modules.preScaler_cfi")
if unitTest:
process.load("DQM.Integration.config.unittestinputsource_cfi")
from DQM.Integration.config.unittestinputsource_cfi import options
else:
# for live online DQM in P5
process.load("DQM.Integration.config.inputsource_cfi")
from DQM.Integration.config.inputsource_cfi import options
# for testing in lxplus
#process.load("DQM.Integration.config.fileinputsource_cfi")
#from DQM.Integration.config.fileinputsource_cfi import options
# Condition for P5 cluster
process.load("DQM.Integration.config.FrontierCondition_GT_cfi")
# Condition for lxplus: change and possibly customise the GT
#from Configuration.AlCa.GlobalTag import GlobalTag as gtCustomise
#process.GlobalTag = gtCustomise(process.GlobalTag, 'auto:run2_data', '')
process.load("EventFilter.ESRawToDigi.esRawToDigi_cfi")
#process.ecalPreshowerDigis = EventFilter.ESRawToDigi.esRawToDigi_cfi.esRawToDigi.clone()
process.esRawToDigi.sourceTag = 'source'
process.esRawToDigi.debugMode = False
process.load('RecoLocalCalo/EcalRecProducers/ecalPreshowerRecHit_cfi')
process.ecalPreshowerRecHit.ESdigiCollection = "esRawToDigi"
process.ecalPreshowerRecHit.ESRecoAlgo = 0
process.preScaler.prescaleFactor = 1
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
#process.dqmInfoES = DQMEDAnalyzer('DQMEventInfo',
# subSystemFolder = cms.untracked.string('EcalPreshower')
# )
#process.load("DQMServices.Core.DQM_cfg")
process.load("DQM.Integration.config.environment_cfi")
process.dqmEnv.subSystemFolder = 'EcalPreshower'
process.dqmSaver.tag = 'EcalPreshower'
process.dqmSaver.runNumber = options.runNumber
process.dqmSaverPB.tag = 'EcalPreshower'
process.dqmSaverPB.runNumber = options.runNumber
# for local test
#process.dqmSaver.path = '.'
#process.dqmSaverPB.path = './pb'
process.load("DQM/EcalPreshowerMonitorModule/EcalPreshowerMonitorTasks_cfi")
process.ecalPreshowerIntegrityTask.ESDCCCollections = "esRawToDigi"
process.ecalPreshowerIntegrityTask.ESKChipCollections = "esRawToDigi"
process.ecalPreshowerIntegrityTask.ESDCCCollections = "esRawToDigi"
process.ecalPreshowerIntegrityTask.ESKChipCollections = "esRawToDigi"
process.ecalPreshowerOccupancyTask.DigiLabel = "esRawToDigi"
process.ecalPreshowerPedestalTask.DigiLabel = "esRawToDigi"
process.ecalPreshowerRawDataTask.ESDCCCollections = "esRawToDigi"
process.ecalPreshowerTimingTask.DigiLabel = "esRawToDigi"
process.ecalPreshowerTrendTask.ESDCCCollections = "esRawToDigi"
process.load("DQM/EcalPreshowerMonitorClient/EcalPreshowerMonitorClient_cfi")
del process.dqmInfoES
process.p = cms.Path(process.preScaler*
process.esRawToDigi*
process.ecalPreshowerRecHit*
process.ecalPreshowerDefaultTasksSequence*
process.dqmEnv*
process.ecalPreshowerMonitorClient*
process.dqmSaver*
process.dqmSaverPB)
process.esRawToDigi.sourceTag = "rawDataCollector"
process.ecalPreshowerRawDataTask.FEDRawDataCollection = "rawDataCollector"
#--------------------------------------------------
# Heavy Ion Specific Fed Raw Data Collection Label
#--------------------------------------------------
print("Running with run type = ", process.runType.getRunType())
if (process.runType.getRunType() == process.runType.hi_run):
process.esRawToDigi.sourceTag = "rawDataRepacker"
process.ecalPreshowerRawDataTask.FEDRawDataCollection = "rawDataRepacker"
### process customizations included here
from DQM.Integration.config.online_customizations_cfi import *
print("Final Source settings:", process.source)
process = customise(process)
|
tests/requests/invalid/003.py | ashishmjn/gunicorn | 6,851 | 11131807 | from gunicorn.http.errors import InvalidRequestMethod
request = InvalidRequestMethod |
timemachines/skaters/pypi.py | microprediction/timemachines | 253 | 11131811 | PYPI = {'tsa':'statsmodels',
'fbprophet':'prophet',
'pmd':'pmdarima',
'rvr':'river',
'nprophet':'neuralprophet',
'dlm':'pydlm',
'divine':'divinity',
'orbit':'orbit-ml',
'bats':'tbats',
'glu':'gluonts',
'flux':'pyflux',
'sk':'sktime',
'smdk':'simdkalman',
'gk':'greykite',
'tcn':'keras-tcn',
'darts':'darts',
'kts':'kats',
'ats':'auto_ts',
'suc':'successor',
'mrln':'salesforce-merlion',
'pycrt':'pycaret'}
def pypi_from_name(name):
stem = name.split('_')[0]
short_name = PYPI.get(stem)
stub = 'https://pypi.org/project/'
return stub+short_name if short_name else stub+'timemachines'
if __name__=='__main__':
from timemachines.skaters.localskaters import LOCAL_SKATERS
from pprint import pprint
pprint([(sk.__name__,pypi_from_name(sk.__name__)) for sk in LOCAL_SKATERS])
print(len(LOCAL_SKATERS))
|
v3/as_demos/gather.py | Dilepa/micropython-async | 443 | 11131821 | # gather.py Demo of Gatherable coroutines. Includes 3 cases:
# 1. A normal coro
# 2. A coro with a timeout
# 3. A cancellable coro
import uasyncio as asyncio
async def barking(n):
print('Start normal coro barking()')
for _ in range(6):
await asyncio.sleep(1)
print('Done barking.')
return 2 * n
async def foo(n):
print('Start timeout coro foo()')
try:
while True:
await asyncio.sleep(1)
n += 1
except asyncio.CancelledError:
print('Trapped foo timeout.')
raise
return n
async def bar(n):
print('Start cancellable bar()')
try:
while True:
await asyncio.sleep(1)
n += 1
except asyncio.CancelledError: # Demo of trapping
print('Trapped bar cancellation.')
raise
return n
async def do_cancel(task):
await asyncio.sleep(5)
print('About to cancel bar')
task.cancel()
async def main(rex):
bar_task = asyncio.create_task(bar(70)) # Note args here
tasks = []
tasks.append(barking(21))
tasks.append(asyncio.wait_for(foo(10), 7))
asyncio.create_task(do_cancel(bar_task))
try:
res = await asyncio.gather(*tasks, return_exceptions=rex)
except asyncio.TimeoutError:
print('foo timed out.')
res = 'No result'
print('Result: ', res)
exp_false = '''Test runs for 10s. Expected output:
Start cancellable bar()
Start normal coro barking()
Start timeout coro foo()
About to cancel bar
Trapped bar cancellation.
Done barking.
Trapped foo timeout.
foo timed out.
Result: No result
'''
exp_true = '''Test runs for 10s. Expected output:
Start cancellable bar()
Start normal coro barking()
Start timeout coro foo()
About to cancel bar
Trapped bar cancellation.
Done barking.
Trapped foo timeout.
Result: [42, TimeoutError()]
'''
def printexp(st):
print('\x1b[32m')
print(st)
print('\x1b[39m')
def test(rex):
st = exp_true if rex else exp_false
printexp(st)
try:
asyncio.run(main(rex))
except KeyboardInterrupt:
print('Interrupted')
finally:
asyncio.new_event_loop()
print()
print('as_demos.gather.test() to run again.')
print('as_demos.gather.test(True) to see effect of return_exceptions.')
test(rex=False)
|
source/agent/webrtc/rtcConn/binding.gyp | yfdandy/owt-server | 890 | 11131826 | {
'targets': [{
'target_name': 'rtcConn',
'variables': {
'source_rel_dir': '../../..', # relative source dir path
'source_abs_dir%': '<(module_root_dir)/../../..', # absolute source dir path
},
'sources': [
'addon.cc',
'WebRtcConnection.cc',
'ThreadPool.cc',
'IOThreadPool.cc',
"MediaStream.cc",
'conn_handler/WoogeenHandler.cpp',
'erizo/src/erizo/DtlsTransport.cpp',
'erizo/src/erizo/IceConnection.cpp',
'erizo/src/erizo/LibNiceConnection.cpp',
'erizo/src/erizo/SdpInfo.cpp',
'erizo/src/erizo/SrtpChannel.cpp',
'erizo/src/erizo/Stats.cpp',
'erizo/src/erizo/StringUtil.cpp',
'erizo/src/erizo/WebRtcConnection.cpp',
'erizo/src/erizo/MediaStream.cpp',
'erizo/src/erizo/lib/LibNiceInterfaceImpl.cpp',
'erizo/src/erizo/thread/IOThreadPool.cpp',
'erizo/src/erizo/thread/IOWorker.cpp',
'erizo/src/erizo/thread/Scheduler.cpp',
'erizo/src/erizo/thread/ThreadPool.cpp',
'erizo/src/erizo/thread/Worker.cpp',
'erizo/src/erizo/rtp/PacketBufferService.cpp',
'erizo/src/erizo/rtp/RtpUtils.cpp',
'erizo/src/erizo/rtp/QualityManager.cpp',
'erizo/src/erizo/rtp/RtpExtensionProcessor.cpp',
'<!@(find erizo/src/erizo/dtls -name "*.cpp")',
'<!@(find erizo/src/erizo/dtls -name "*.c")',
'<!@(find erizo/src/erizo/pipeline -name "*.cpp")',
'<!@(find erizo/src/erizo/stats -name "*.cpp")'
],
'cflags_cc': ['-DWEBRTC_POSIX', '-DWEBRTC_LINUX', '-DLINUX', '-DNOLINUXIF', '-DNO_REG_RPC=1', '-DHAVE_VFPRINTF=1', '-DRETSIGTYPE=void', '-DNEW_STDIO', '-DHAVE_STRDUP=1', '-DHAVE_STRLCPY=1', '-DHAVE_LIBM=1', '-DHAVE_SYS_TIME_H=1', '-DTIME_WITH_SYS_TIME_H=1'],
'include_dirs': [
"<!(node -e \"require('nan')\")",
'conn_handler',
'erizo/src/erizo',
'erizo/src/erizo/lib',
'erizo/src/erizo/dtls',
'erizo/src/erizo/pipeline',
'erizo/src/erizo/rtp',
'erizo/src/erizo/thread',
'erizo/src/erizo/stats',
'<(source_rel_dir)/core/common',
'<(source_rel_dir)/core/owt_base',
'$(DEFAULT_DEPENDENCY_PATH)/include',
'$(CUSTOM_INCLUDE_PATH)',
'<!@(pkg-config glib-2.0 --cflags-only-I | sed s/-I//g)',
],
'libraries': [
'-L$(DEFAULT_DEPENDENCY_PATH)/lib',
'-L$(CUSTOM_LIBRARY_PATH)',
'-lsrtp2',
'-lssl',
'-ldl',
'-lcrypto',
'-llog4cxx',
'-lboost_thread',
'-lboost_system',
'-lnice',
#'-L<(webrtc_abs_dir)', '-lwebrtc',
],
'conditions': [
[ 'OS=="mac"', {
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES', # -fno-exceptions
'MACOSX_DEPLOYMENT_TARGET': '10.7', # from MAC OS 10.7
'OTHER_CFLAGS': ['-g -O$(OPTIMIZATION_LEVEL) -stdlib=libc++']
},
}, { # OS!="mac"
'cflags!' : ['-fno-exceptions'],
'cflags' : ['-D__STDC_CONSTANT_MACROS'],
'cflags_cc' : ['-Wall', '-O3', '-g' , '-std=c++11', '-fexceptions'],
'cflags_cc!' : ['-fno-exceptions'],
'cflags_cc!' : ['-fno-rtti']
}],
]
}]
}
|
fbrp/tests/fbrp/runtime/test_base.py | ali-senguel/fairo | 669 | 11131828 | <reponame>ali-senguel/fairo
import asyncio
import unittest
from fbrp.life_cycle import Ask, ProcInfo, State
from fbrp.process import ProcDef
from fbrp.runtime.base import BaseLauncher
from unittest import IsolatedAsyncioTestCase
from unittest.mock import MagicMock, Mock, patch
class AsyncIter:
def __init__(self, items):
self.items = items
async def __aiter__(self):
for item in self.items:
yield item
def async_return(result):
f = asyncio.Future()
f.set_result(result)
return f
class TestBaseLauncher(IsolatedAsyncioTestCase):
@patch("argparse.Namespace")
async def test_run(self, mock_namespace):
mock_proc_def = Mock(spec=ProcDef(None, None, None, None, None, None, None))
base_launcher = BaseLauncher()
with self.assertRaises(NotImplementedError):
await base_launcher.run('', mock_proc_def, mock_namespace)
def test_get_pid(self):
base_launcher = BaseLauncher()
with self.assertRaises(NotImplementedError):
base_launcher.get_pid()
@patch("fbrp.life_cycle.aio_proc_info_watcher")
async def test_down_watcher_1(self, mock_proc_info_watcher):
mock_ondown = MagicMock(return_value=async_return("on down called"))
proc_info = ProcInfo(Ask.DOWN, State.STARTED, 0, True)
mock_proc_info_watcher.return_value = AsyncIter([proc_info])
base_launcher = BaseLauncher()
base_launcher.name = 'TEST_BASE'
await base_launcher.down_watcher(mock_ondown)
mock_proc_info_watcher.assert_called_once()
mock_ondown.assert_called_once()
@patch("fbrp.life_cycle.aio_proc_info_watcher")
async def test_down_watcher_2(self, mock_proc_info_watcher):
mock_ondown = MagicMock(return_value=async_return("on down called"))
proc_info = ProcInfo(Ask.UP, State.STOPPED, 0, False)
mock_proc_info_watcher.return_value = AsyncIter([proc_info])
base_launcher = BaseLauncher()
base_launcher.name = 'TEST_BASE'
await base_launcher.down_watcher(mock_ondown)
mock_proc_info_watcher.assert_called_once()
mock_ondown.assert_not_called()
@patch("fbrp.life_cycle.aio_proc_info_watcher")
async def test_down_watcher_3(self, mock_proc_info_watcher):
mock_ondown = MagicMock(return_value=async_return("on down called"))
proc_info_1 = ProcInfo(Ask.UP, State.STOPPED, 0, False)
proc_info_2 = ProcInfo(Ask.DOWN, State.STARTED, 0, True)
mock_proc_info_watcher.return_value = AsyncIter([proc_info_1, proc_info_2])
base_launcher = BaseLauncher()
base_launcher.name = 'TEST_BASE'
await base_launcher.down_watcher(mock_ondown)
mock_proc_info_watcher.assert_called_once()
mock_ondown.assert_called_once()
if __name__ == '__main__':
unittest.main() |
slovnet/visitor.py | harri-pltr/slovnet | 147 | 11131833 |
class Visitor(object):
def resolve_method(self, item):
for cls in item.__class__.__mro__:
name = 'visit_' + cls.__name__
method = getattr(self, name, None)
if method:
return method
raise ValueError('no method for {type!r}'.format(
type=type(item)
))
def visit(self, item):
return self.resolve_method(item)(item)
def __call__(self, item):
return self.visit(item)
|
etc/pending_ugens/PartConv.py | butayama/supriya | 191 | 11131835 | <reponame>butayama/supriya
import collections
from supriya.enums import CalculationRate
from supriya.synthdefs import UGen
class PartConv(UGen):
"""
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> part_conv = supriya.ugens.PartConv.ar(
... fftsize=fftsize,
... irbufnum=irbufnum,
... source=source,
... )
>>> part_conv
PartConv.ar()
"""
### CLASS VARIABLES ###
_ordered_input_names = collections.OrderedDict(
'source',
'fftsize',
'irbufnum',
)
_valid_calculation_rates = None
### INITIALIZER ###
def __init__(
self,
calculation_rate=None,
fftsize=None,
irbufnum=None,
source=None,
):
UGen.__init__(
self,
calculation_rate=calculation_rate,
fftsize=fftsize,
irbufnum=irbufnum,
source=source,
)
### PUBLIC METHODS ###
@classmethod
def ar(
cls,
fftsize=None,
irbufnum=None,
source=None,
):
"""
Constructs an audio-rate PartConv.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> part_conv = supriya.ugens.PartConv.ar(
... fftsize=fftsize,
... irbufnum=irbufnum,
... source=source,
... )
>>> part_conv
PartConv.ar()
Returns ugen graph.
"""
import supriya.synthdefs
calculation_rate = supriya.CalculationRate.AUDIO
ugen = cls._new_expanded(
calculation_rate=calculation_rate,
fftsize=fftsize,
irbufnum=irbufnum,
source=source,
)
return ugen
# def calcBufSize(): ...
# def calcNumPartitions(): ...
### PUBLIC PROPERTIES ###
@property
def fftsize(self):
"""
Gets `fftsize` input of PartConv.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> part_conv = supriya.ugens.PartConv.ar(
... fftsize=fftsize,
... irbufnum=irbufnum,
... source=source,
... )
>>> part_conv.fftsize
Returns ugen input.
"""
index = self._ordered_input_names.index('fftsize')
return self._inputs[index]
@property
def irbufnum(self):
"""
Gets `irbufnum` input of PartConv.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> part_conv = supriya.ugens.PartConv.ar(
... fftsize=fftsize,
... irbufnum=irbufnum,
... source=source,
... )
>>> part_conv.irbufnum
Returns ugen input.
"""
index = self._ordered_input_names.index('irbufnum')
return self._inputs[index]
@property
def source(self):
"""
Gets `source` input of PartConv.
::
>>> source = supriya.ugens.In.ar(bus=0)
>>> part_conv = supriya.ugens.PartConv.ar(
... fftsize=fftsize,
... irbufnum=irbufnum,
... source=source,
... )
>>> part_conv.source
OutputProxy(
source=In(
bus=0.0,
calculation_rate=CalculationRate.AUDIO,
channel_count=1
),
output_index=0
)
Returns ugen input.
"""
index = self._ordered_input_names.index('source')
return self._inputs[index]
|
Document-Word-Detection/Word_detection.py | A-kriti/Amazing-Python-Scripts | 930 | 11131853 | <reponame>A-kriti/Amazing-Python-Scripts
'''
output : word detection on document (Simple OCR type of application)
'''
import cv2
import numpy as np
import imutils
# frame read
frame = cv2.imread('test.jpeg')
# resize
frame = cv2.resize(frame, (600, 600))
# grayscale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# remove noise
blur = cv2.GaussianBlur(gray, (5, 5), 0)
# otsu thresh (bimodel thresold)
thresh = cv2.threshold(blur, 0, 255,
cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# get structuring element
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 1))
vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 25))
print('horizontal kernel : {}'.format(horizontal_kernel))
print('vertical kernel : {}'.format(vertical_kernel))
# opening (erosion followed by dilation)
horizontal_lines = cv2.morphologyEx(thresh,
cv2.MORPH_OPEN,
horizontal_kernel,
iterations=2)
vertical_lines = cv2.morphologyEx(thresh,
cv2.MORPH_OPEN,
vertical_kernel,
iterations=2)
# contours apply on detected lines
# First one is source image, second is contour retrieval mode, third is contour approximation method
cnts = cv2.findContours(horizontal_lines, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cntsv = cv2.findContours(vertical_lines, cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# find contours
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
cntsv = cntsv[0] if len(cntsv) == 2 else cntsv[1]
for c in cnts:
cv2.drawContours(frame, [c], -1, (255, 255, 255), 2)
for c in cntsv:
cv2.drawContours(frame, [c], -1, (255, 255, 255), 2)
# imshow
cv2.imshow('thresh', thresh)
cv2.imshow('horizontal_lines', horizontal_lines)
cv2.imshow('vertical_lines', vertical_lines)
cv2.imshow('frame', frame)
# grayscale
gray1 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
thresh1 = cv2.adaptiveThreshold(gray1, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 23, 30)
canny = imutils.auto_canny(thresh1)
output = cv2.bitwise_not(canny)
kernel = np.ones((5, 5), np.uint8)
opening = cv2.morphologyEx(canny, cv2.MORPH_CLOSE, kernel)
dilation = cv2.dilate(canny, kernel, iterations=1)
contour, hierachy = cv2.findContours(dilation, cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
for i in contour:
area = cv2.contourArea(i)
if area > 20:
x, y, w, h = cv2.boundingRect(i)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 120, 255), 2)
cv2.imshow('output', output)
cv2.imshow('dilate', dilation)
cv2.imshow('opening', opening)
cv2.imshow('original_frame', frame)
cv2.imshow('canny', canny)
cv2.imshow('thresh1', thresh1)
# Saving output image
cv2.imwrite('output.jpg', frame)
# destroy all window
cv2.waitKey(0)
cv2.destroyAllWindows()
|
droidbot/adapter/minicap.py | RRua/droidbot | 563 | 11131857 | import logging
import socket
import subprocess
import time
import os
from datetime import datetime
from .adapter import Adapter
MINICAP_REMOTE_ADDR = "localabstract:minicap"
ROTATION_CHECK_INTERVAL_S = 1 # Check rotation once per second
class MinicapException(Exception):
"""
Exception in minicap connection
"""
pass
class Minicap(Adapter):
"""
a connection with target device through minicap.
"""
def __init__(self, device=None):
"""
initiate a minicap connection
:param device: instance of Device
:return:
"""
self.logger = logging.getLogger(self.__class__.__name__)
self.host = "localhost"
if device is None:
from droidbot.device import Device
device = Device()
self.device = device
self.port = self.device.get_random_port()
self.remote_minicap_path = "/data/local/tmp/minicap-devel"
self.sock = None
self.connected = False
self.minicap_process = None
self.banner = None
self.width = -1
self.height = -1
self.orientation = -1
self.last_screen = None
self.last_screen_time = None
self.last_views = []
self.last_rotation_check_time = datetime.now()
def set_up(self):
device = self.device
try:
minicap_files = device.adb.shell("ls %s 2>/dev/null" % self.remote_minicap_path).split()
if "minicap.so" in minicap_files and ("minicap" in minicap_files or "minicap-nopie" in minicap_files):
self.logger.debug("minicap was already installed.")
return
except:
pass
if device is not None:
# install minicap
import pkg_resources
local_minicap_path = pkg_resources.resource_filename("droidbot", "resources/minicap")
try:
device.adb.shell("mkdir %s" % self.remote_minicap_path)
except Exception:
pass
abi = device.adb.get_property('ro.product.cpu.abi')
sdk = device.get_sdk_version()
if sdk >= 16:
minicap_bin = "minicap"
else:
minicap_bin = "minicap-nopie"
minicap_bin_path = os.path.join(local_minicap_path, 'libs', abi, minicap_bin)
device.push_file(local_file=minicap_bin_path, remote_dir=self.remote_minicap_path)
minicap_so_path = os.path.join(local_minicap_path, 'jni', 'libs', f'android-{sdk}', abi, 'minicap.so')
device.push_file(local_file=minicap_so_path, remote_dir=self.remote_minicap_path)
self.logger.debug("minicap installed.")
def tear_down(self):
try:
delete_minicap_cmd = "adb -s %s shell rm -r %s" % (self.device.serial, self.remote_minicap_path)
p = subprocess.Popen(delete_minicap_cmd.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = p.communicate()
except Exception:
pass
def connect(self):
device = self.device
display = device.get_display_info(refresh=True)
if 'width' not in display or 'height' not in display or 'orientation' not in display:
self.logger.warning("Cannot get the size of current device.")
return
w = display['width']
h = display['height']
if w > h:
temp = w
w = h
h = temp
o = display['orientation'] * 90
self.width = w
self.height = h
self.orientation = o
size_opt = "%dx%d@%dx%d/%d" % (w, h, w, h, o)
grant_minicap_perm_cmd = "adb -s %s shell chmod -R a+x %s" % \
(device.serial, self.remote_minicap_path)
start_minicap_cmd = "adb -s %s shell LD_LIBRARY_PATH=%s %s/minicap -P %s" % \
(device.serial, self.remote_minicap_path, self.remote_minicap_path, size_opt)
self.logger.debug("starting minicap: " + start_minicap_cmd)
p = subprocess.Popen(grant_minicap_perm_cmd.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = p.communicate()
self.minicap_process = subprocess.Popen(start_minicap_cmd.split(),
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# Wait 2 seconds for starting minicap
time.sleep(2)
self.logger.debug("minicap started.")
try:
# forward host port to remote port
forward_cmd = "adb -s %s forward tcp:%d %s" % (device.serial, self.port, MINICAP_REMOTE_ADDR)
subprocess.check_call(forward_cmd.split())
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
import threading
listen_thread = threading.Thread(target=self.listen_messages)
listen_thread.start()
except socket.error as e:
self.connected = False
self.logger.warning(e)
raise MinicapException()
def listen_messages(self):
self.logger.debug("start listening minicap images ...")
CHUNK_SIZE = 4096
readBannerBytes = 0
bannerLength = 2
readFrameBytes = 0
frameBodyLength = 0
frameBody = bytearray()
banner = {
"version": 0,
"length": 0,
"pid": 0,
"realWidth": 0,
"realHeight": 0,
"virtualWidth": 0,
"virtualHeight": 0,
"orientation": 0,
"quirks": 0,
}
self.connected = True
while self.connected:
chunk = bytearray(self.sock.recv(CHUNK_SIZE))
if not chunk:
continue
chunk_len = len(chunk)
cursor = 0
while cursor < chunk_len and self.connected:
if readBannerBytes < bannerLength:
if readBannerBytes == 0:
banner['version'] = chunk[cursor]
elif readBannerBytes == 1:
banner['length'] = bannerLength = chunk[cursor]
elif 2 <= readBannerBytes <= 5:
banner['pid'] += (chunk[cursor] << ((readBannerBytes - 2) * 8))
elif 6 <= readBannerBytes <= 9:
banner['realWidth'] += (chunk[cursor] << ((readBannerBytes - 6) * 8))
elif 10 <= readBannerBytes <= 13:
banner['realHeight'] += (chunk[cursor] << ((readBannerBytes - 10) * 8))
elif 14 <= readBannerBytes <= 17:
banner['virtualWidth'] += (chunk[cursor] << ((readBannerBytes - 14) * 8))
elif 18 <= readBannerBytes <= 21:
banner['virtualHeight'] += (chunk[cursor] << ((readBannerBytes - 18) * 8))
elif readBannerBytes == 22:
banner['orientation'] += chunk[cursor] * 90
elif readBannerBytes == 23:
banner['quirks'] = chunk[cursor]
cursor += 1
readBannerBytes += 1
if readBannerBytes == bannerLength:
self.banner = banner
self.logger.debug("minicap initialized: %s" % banner)
elif readFrameBytes < 4:
frameBodyLength += (chunk[cursor] << (readFrameBytes * 8))
cursor += 1
readFrameBytes += 1
else:
if chunk_len - cursor >= frameBodyLength:
frameBody += chunk[cursor: cursor + frameBodyLength]
self.handle_image(frameBody)
cursor += frameBodyLength
frameBodyLength = readFrameBytes = 0
frameBody = bytearray()
else:
frameBody += chunk[cursor:]
frameBodyLength -= chunk_len - cursor
readFrameBytes += chunk_len - cursor
cursor = chunk_len
print("[CONNECTION] %s is disconnected" % self.__class__.__name__)
def handle_image(self, frameBody):
# Sanity check for JPG header, only here for debugging purposes.
if frameBody[0] != 0xFF or frameBody[1] != 0xD8:
self.logger.warning("Frame body does not start with JPG header")
self.last_screen = frameBody
self.last_screen_time = datetime.now()
self.last_views = None
self.logger.debug("Received an image at %s" % self.last_screen_time)
self.check_rotation()
def check_rotation(self):
current_time = datetime.now()
if (current_time - self.last_rotation_check_time).total_seconds() < ROTATION_CHECK_INTERVAL_S:
return
display = self.device.get_display_info(refresh=True)
if 'orientation' in display:
cur_orientation = display['orientation'] * 90
if cur_orientation != self.orientation:
self.device.handle_rotation()
self.last_rotation_check_time = current_time
def check_connectivity(self):
"""
check if droidbot app is connected
:return: True for connected
"""
if not self.connected:
return False
if self.last_screen_time is None:
return False
return True
def disconnect(self):
"""
disconnect telnet
"""
self.connected = False
if self.sock is not None:
try:
self.sock.close()
except Exception as e:
print(e)
if self.minicap_process is not None:
try:
self.minicap_process.terminate()
except Exception as e:
print(e)
try:
forward_remove_cmd = "adb -s %s forward --remove tcp:%d" % (self.device.serial, self.port)
p = subprocess.Popen(forward_remove_cmd.split(), stderr=subprocess.PIPE, stdout=subprocess.PIPE)
out, err = p.communicate()
except Exception as e:
print(e)
def get_views(self):
"""
get UI views using cv module
opencv-python need to be installed for this function
:return: a list of views
"""
if not self.last_screen:
self.logger.warning("last_screen is None")
return None
if self.last_views:
return self.last_views
from . import cv
img = cv.load_image_from_buf(self.last_screen)
view_bounds = cv.find_views(img)
root_view = {
"class": "CVViewRoot",
"bounds": [[0, 0], [self.width, self.height]],
"enabled": True,
"temp_id": 0
}
views = [root_view]
temp_id = 1
for x,y,w,h in view_bounds:
view = {
"class": "CVView",
"bounds": [[x,y], [x+w, y+h]],
"enabled": True,
"temp_id": temp_id,
"signature": cv.calculate_dhash(img[y:y+h, x:x+w]),
"parent": 0,
"children": []
}
views.append(view)
temp_id += 1
root_view["children"] = list(range(1, temp_id))
self.last_views = views
return views
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
minicap = Minicap()
try:
minicap.set_up()
minicap.connect()
except:
minicap.disconnect()
minicap.tear_down()
minicap.device.disconnect()
|
src/virtual-wan/azext_vwan/vendored_sdks/v2018_08_01/v2018_08_01/models/available_providers_list_state.py | Mannan2812/azure-cli-extensions | 207 | 11131866 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AvailableProvidersListState(Model):
"""State details.
:param state_name: The state name.
:type state_name: str
:param providers: A list of Internet service providers.
:type providers: list[str]
:param cities: List of available cities or towns in the state.
:type cities:
list[~azure.mgmt.network.v2018_08_01.models.AvailableProvidersListCity]
"""
_attribute_map = {
'state_name': {'key': 'stateName', 'type': 'str'},
'providers': {'key': 'providers', 'type': '[str]'},
'cities': {'key': 'cities', 'type': '[AvailableProvidersListCity]'},
}
def __init__(self, **kwargs):
super(AvailableProvidersListState, self).__init__(**kwargs)
self.state_name = kwargs.get('state_name', None)
self.providers = kwargs.get('providers', None)
self.cities = kwargs.get('cities', None)
|
homeassistant/components/google_assistant/logbook.py | MrDelik/core | 22,481 | 11131872 | """Describe logbook events."""
from homeassistant.core import callback
from .const import DOMAIN, EVENT_COMMAND_RECEIVED, SOURCE_CLOUD
COMMON_COMMAND_PREFIX = "action.devices.commands."
@callback
def async_describe_events(hass, async_describe_event):
"""Describe logbook events."""
@callback
def async_describe_logbook_event(event):
"""Describe a logbook event."""
commands = []
for command_payload in event.data["execution"]:
command = command_payload["command"]
if command.startswith(COMMON_COMMAND_PREFIX):
command = command[len(COMMON_COMMAND_PREFIX) :]
commands.append(command)
message = f"sent command {', '.join(commands)}"
if event.data["source"] != SOURCE_CLOUD:
message += f" (via {event.data['source']})"
return {"name": "Google Assistant", "message": message}
async_describe_event(DOMAIN, EVENT_COMMAND_RECEIVED, async_describe_logbook_event)
|
generate-xml.py | canarduck/Fontawesome-Icons-for-Pencil | 293 | 11131875 | from lxml import etree
from pathlib import Path
import cairosvg
import os
import shutil
import re
sprite_files = sorted(list(Path('./MaterialDesign/icons/svg/').glob('*.svg')), key=lambda file: file.as_posix())
files_element = etree.Element("files")
# set up the output dir
out = Path('gen/')
icons_out = Path(out, 'icons/')
if out.exists and out.is_dir():
shutil.rmtree(str(out))
icons_out.mkdir(parents=True)
for f in sprite_files:
element = etree.SubElement(files_element, 'file')
element.text = f.as_posix()
element.set('id', f.stem)
element.set('name', (f.stem.replace('-', ' ')).title())
icon_name = f.stem + '.png'
element.set('icon', icons_out.relative_to(out).joinpath(icon_name).as_posix()) # Pencil uses Unix-style paths for icons
with open(os.path.join(str(icons_out), icon_name),'wb+') as icon_out:
thumb = cairosvg.svg2png(file_obj=str(f), write_to=icon_out)
stylesheet = etree.parse('stylesheet.xsl')
transform = etree.XSLT(stylesheet)
result = transform(files_element)
result.write(os.path.join(str(out), 'Definition.xml'), pretty_print=True)
|
caliban/cli.py | Anon-Artist/caliban | 425 | 11131886 | #!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command line parser for the Caliban app."""
import argparse
import os
import sys
from argparse import REMAINDER
from typing import Any, Dict, List, Optional, Union
import google.auth._cloud_sdk as csdk
from absl.flags import argparse_flags
import caliban.config as conf
import caliban.config.experiment as ce
import caliban.docker.build as b
import caliban.platform.cloud.types as ct
import caliban.platform.gke as gke
import caliban.platform.gke.constants as gke_k
import caliban.platform.gke.types as gke_t
import caliban.platform.gke.util as gke_u
import caliban.util as u
import caliban.util.argparse as ua
import caliban.util.schema as us
from caliban import __version__
def _job_mode(use_gpu: bool, gpu_spec: Optional[ct.GPUSpec],
tpu_spec: Optional[ct.TPUSpec]) -> conf.JobMode:
"""Encapsulates the slightly-too-complicated logic around the default job mode
to choose based on the values of three incoming parameters.
"""
if not use_gpu and gpu_spec is not None:
# This should never happen, due to our CLI validation.
raise AssertionError("gpu_spec isn't allowed for CPU only jobs!")
# Base mode.
mode = conf.JobMode.GPU if use_gpu else conf.JobMode.CPU
# For the specific case where there's no GPU specified and a TPU is, set the
# mode back to CPU and don't attach a GPU.
if gpu_spec is None and tpu_spec is not None:
mode = conf.JobMode.CPU
return mode
def resolve_job_mode(args: Dict[str, Any]) -> conf.JobMode:
"""Similar to job_mode above; plucks the values out of a parsed CLI arg map vs
taking them directory.
"""
use_gpu = args.get("use_gpu", True)
gpu_spec = args.get("gpu_spec")
tpu_spec = args.get("tpu_spec")
return _job_mode(use_gpu, gpu_spec, tpu_spec)
def validate_script_args(argv: List[str], items: List[str]) -> List[str]:
"""This validation catches errors where argparse slurps up anything after the
required argument as a script_arg, EVEN if it's not separated by a --.
We do this instead of just parsing them directly so that we can still have a
nice help string provided by argparse.
"""
# items before the double-dashes, expected script_args after.
pre_args, expected = u.split_by(argv, "--")
if items == expected:
return items
# get the extra arguments parsed BEFORE the dash. These were probably meant
# to be options to caliban itself.
pre_dashes, _ = u.split_by(items, "--")
joined = " ".join(pre_dashes)
expected_s = " ".join(expected)
# caliban arguments before these unexpected arguments.
before_pre_dashes = pre_args[:-len(pre_dashes)]
pwas = "was" if len(pre_dashes) == 1 else "were"
parg = "argument" if len(pre_dashes) == 1 else "arguments"
u.err(
"""\nThe {} '{}' {} supplied after required arguments but before the '--' separator and {} not properly parsed.\n\n"""
.format(parg, joined, pwas, pwas))
u.err("if you meant to pass these as script_args, try "
"moving them after the --, like this:\n\n")
u.err("caliban {} -- {} {}\n\n".format(' '.join(before_pre_dashes), joined,
expected_s))
u.err("Otherwise, if these are in fact caliban keyword arguments, "
"please move them before the python script/module name argument.\n\n")
sys.exit(1)
def add_script_args(parser):
"""Adds an argument group that, paired with the validation above, slurps up all
arguments provided after a '--'.
"""
parser.add_argument_group("pass-through arguments").add_argument(
"script_args",
nargs=REMAINDER,
default=[],
metavar="-- YOUR_ARGS",
help=
"""This is a catch-all for arguments you want to pass through to your script.
any arguments after '--' will pass through.""")
def require_module(parser):
parser.add_argument(
"module",
type=ua.validated_package,
help=
"Code to execute, in either trainer.train' or 'trainer/train.py' format. "
"Accepts python scripts, modules or a path to an arbitrary script.")
def setup_extras(parser):
parser.add_argument("--extras",
action="append",
help="setup.py dependency keys.")
def no_cache_arg(parser):
parser.add_argument("--no_cache",
help="Disable Docker's caching mechanism and force"
"a rebuild of the container from scratch.",
action="store_true")
def docker_run_arg(parser):
"""Adds a command that accepts arguments to pass directly to `docker run`."""
parser.add_argument("--docker_run_args",
type=lambda s: s.split(),
help="String of args to add to Docker.")
def extra_dirs(parser):
parser.add_argument(
"-d",
"--dir",
action="append",
type=ua.argparse_schema(us.Directory),
help="Extra directories to include. List these from large to small "
"to take full advantage of Docker's build cache.")
def no_gpu_flag(parser):
parser.add_argument("--nogpu",
dest="use_gpu",
help="Disable GPU mode and force CPU-only.",
action="store_false")
def project_id_arg(parser):
parser.add_argument(
"--project_id",
help="ID of the GCloud AI Platform/GKE project to use for Cloud job "
"submission and image persistence. (Defaults to $PROJECT_ID; errors if "
"both the argument and $PROJECT_ID are empty.)")
def region_arg(parser):
regions = u.enum_vals(ct.valid_regions())
parser.add_argument(
"--region",
type=ct.parse_region,
help="Region to use for Cloud job submission and image persistence. " +
"Must be one of {}. ".format(regions) +
"(Defaults to $REGION or '{}'.)".format(conf.DEFAULT_REGION.value))
def cloud_key_arg(parser):
parser.add_argument("--cloud_key",
type=ua.argparse_schema(us.File),
help="Path to GCloud service account key. "
"(Defaults to $GOOGLE_APPLICATION_CREDENTIALS.)")
def image_id_arg(parser):
parser.add_argument(
"--image_id",
help="Docker image ID accessible in the local Docker registry. "
"If supplied, Caliban will skip the 'docker build' step and use this image."
)
def image_tag_arg(parser):
parser.add_argument(
"--image_tag",
help="Docker image tag accessible via Container Registry. If supplied, "
"Caliban will skip the build and push steps and use this image tag.")
def machine_type_arg(parser):
machine_types = u.enum_vals(ct.MachineType)
cpu_default = conf.DEFAULT_MACHINE_TYPE[conf.JobMode.CPU].value
gpu_default = conf.DEFAULT_MACHINE_TYPE[conf.JobMode.GPU].value
parser.add_argument("--machine_type",
type=ct.parse_machine_type,
help="Cloud machine type to request. Must be one of " +
"{}. Defaults to '{}' in GPU mode, or '{}' ".format(
machine_types, gpu_default, cpu_default) +
"if --nogpu is passed.")
# Parsers for each command supported by Caliban.
def base_parser(base):
"Configures options that every command needs."
no_gpu_flag(base)
cloud_key_arg(base)
setup_extras(base)
no_cache_arg(base)
def building_parser(base):
"""Augments the supplied base with options required by any parser that builds a
docker image.
"""
base_parser(base)
require_module(base)
extra_dirs(base)
def executing_parser(base):
"""Augments the supplied base with options required by any parser that executes
code vs running some interactive process.
"""
building_parser(base)
add_script_args(base)
experiment_config_arg(base)
dry_run_arg(base)
def shell_parser(base):
"""Configure the Shell subparser."""
parser = base.add_parser(
"shell", help="Start an interactive shell with this dir mounted.")
base_parser(parser)
image_id_arg(parser)
docker_run_arg(parser)
parser.add_argument(
"--shell",
choices=b.Shell,
type=b.Shell,
help=
"""This argument sets the shell used inside the container to one of Caliban's
supported shells. Defaults to the shell specified by the $SHELL environment
variable, or 'bash' if your shell isn't supported.""")
parser.add_argument(
"--bare",
action="store_true",
help="Skip mounting the $HOME directory; load a bare shell.")
def notebook_parser(base):
"""Configure the notebook subparser."""
parser = base.add_parser("notebook",
help="Run a local Jupyter notebook instance.")
base_parser(parser)
docker_run_arg(parser)
# Custom notebook arguments.
parser.add_argument(
"-p",
"--port",
type=int,
help="Port to use for Jupyter, inside container and locally.")
parser.add_argument("-jv",
"--jupyter_version",
help="Jupyter or Jupyterlab version to install via pip.")
parser.add_argument(
"--lab",
action="store_true",
help="run 'jupyter lab', vs the default 'jupyter notebook'.")
parser.add_argument(
"--bare",
action="store_true",
help="Skip mounting the $HOME directory; run an isolated Jupyter lab.")
def local_build_parser(base):
"""Configure the subparser for `caliban run`."""
parser = base.add_parser(
"build",
help="Build a Docker image without submitting or running any code.")
building_parser(parser)
def local_run_parser(base):
"""Configure the subparser for `caliban run`."""
parser = base.add_parser("run", help="Run a job inside a Docker container.")
executing_parser(parser)
image_id_arg(parser)
docker_run_arg(parser)
xgroup_submit_arg(parser)
def gpu_spec_arg(parser, validate_count: bool = False):
parser.add_argument(
"--gpu_spec",
metavar=ct.GPUSpec.METAVAR,
type=lambda x: ct.GPUSpec.parse_arg(x, validate_count=validate_count),
help="Type and number of GPUs to use for each AI Platform/GKE " +
"submission. Defaults to 1x{} in GPU mode ".format(
conf.DEFAULT_GPU.name) + "or None if --nogpu is passed.")
def tpu_spec_arg(parser, validate_count: bool = True):
parser.add_argument(
"--tpu_spec",
metavar=ct.TPUSpec.METAVAR,
type=lambda x: ct.TPUSpec.parse_arg(x, validate_count=validate_count),
help="Type and number of TPUs to request for each "
"AI Platform/GKE submission. Defaults to None.")
def force_arg(parser):
parser.add_argument(
"--force",
action="store_true",
help="Force past validations and submit the job as specified.")
def job_name_arg(parser):
parser.add_argument("--name",
help="Set a job name for AI Platform or GKE jobs.")
def experiment_config_arg(parser):
parser.add_argument(
"--experiment_config",
type=ce.load_experiment_config,
help="Path to an experiment config, or 'stdin' to read from stdin.")
def label_arg(parser):
parser.add_argument("-l",
"--label",
metavar="KEY=VALUE",
action="append",
type=ua.parse_kv_pair,
help="Extra label k=v pair to submit to Cloud.")
def dry_run_arg(parser):
parser.add_argument(
conf.DRY_RUN_FLAG,
action="store_true",
help="Don't actually submit; log everything that's going to happen.")
def container_parser(parser):
executing_parser(parser)
image_tag_arg(parser)
project_id_arg(parser)
region_arg(parser)
machine_type_arg(parser)
gpu_spec_arg(parser)
tpu_spec_arg(parser)
force_arg(parser)
job_name_arg(parser)
label_arg(parser)
xgroup_submit_arg(parser)
def cloud_parser(base):
parser = base.add_parser("cloud", help="Submit AI platform jobs to Cloud.")
container_parser(parser)
return
def caliban_parser():
"""Creates and returns the argparse instance for the entire Caliban app."""
parser = argparse_flags.ArgumentParser(description="""Docker and AI
Platform model training and development script. For detailed
documentation, visit https://github.com/google/caliban""",
prog="caliban")
parser.add_argument('--version',
action='version',
version="%(prog)s {}".format(__version__))
subparser = parser.add_subparsers(dest="command")
subparser.required = True
shell_parser(subparser)
notebook_parser(subparser)
local_build_parser(subparser)
local_run_parser(subparser)
cloud_parser(subparser)
cluster_parser(subparser)
status_parser(subparser)
stop_parser(subparser)
resubmit_parser(subparser)
return parser
# Validations that require access to multiple arguments at once.
def mac_gpu_check(job_mode: conf.JobMode, command: str) -> None:
"""If the command depends on 'docker run' and is running on a Mac, fail fast."""
if conf.gpu(job_mode) and command in ("shell", "notebook", "run"):
u.err("\n'caliban {}' doesn't support GPU usage on Macs! Please pass ".
format(command) + "--nogpu to use this command.\n\n")
u.err(
"(GPU mode is fine for 'caliban cloud' from a Mac; just nothing that runs "
"locally.)\n\n")
sys.exit(1)
def _validate_no_gpu_type(use_gpu: bool, gpu_spec: Optional[ct.GPUSpec]):
"""Prevents a user from submitting a Cloud job using a CPU image when they've
explicitly attempted to set a GPU spec.
"""
gpu_disabled = not use_gpu
if gpu_disabled and gpu_spec is not None:
u.err("\n'--nogpu' is incompatible with an explicit --gpu_spec option. "
"Please remove one or the other!\n\n")
sys.exit(1)
def _validate_machine_type(gpu_spec: Optional[ct.GPUSpec],
machine_type: Optional[ct.MachineType]):
"""If both args are provided,makes sure that Cloud supports this particular
combination of GPU count, type and machine type.
"""
if gpu_spec is not None and machine_type is not None:
if not gpu_spec.valid_machine_type(machine_type):
# Show a list of the allowed types, sorted so that at least the machine
# prefixes stick together.
allowed = u.enum_vals(gpu_spec.allowed_machine_types())
allowed.sort()
u.err(f"\n'{machine_type.value}' isn't a valid machine type " +
f"for {gpu_spec.count} {gpu_spec.gpu.name} GPUs.\n\n")
u.err(ct.with_advice_suffix("gpu", f"Try one of these: {allowed}"))
u.err("\n")
sys.exit(1)
def _validate_accelerator_region(spec: Optional[Union[ct.GPUSpec, ct.TPUSpec]],
region: ct.Region):
"""Check that the supplied region is valid for the accelerator specification,
if supplied.
"""
if spec is not None:
accel = spec.accelerator_type
if not spec.valid_region(region):
# Show a list of the allowed types, sorted so that at least the machine
# prefixes stick together.
allowed = u.enum_vals(spec.allowed_regions())
allowed.sort()
u.err("\n'{}' isn't a valid region ".format(region.value) +
"for {}s of type {}.\n\n".format(accel, spec.name))
u.err("Try one of these: {}\n\n".format(allowed))
u.err("See this page for more info about regional " +
"support for {}s: https://cloud.google.com/ml-engine/docs/regions\n"
.format(accel))
sys.exit(1)
def validate_across_args(args) -> None:
"""Apply validations that need combinations of arguments to work."""
m = vars(args)
command = m["command"]
if u.is_mac():
job_mode = resolve_job_mode(m)
mac_gpu_check(job_mode, command)
if command == "cloud" and not m.get("force"):
use_gpu = m.get("use_gpu")
region = conf.extract_region(vars(args))
gpu_spec = args.gpu_spec
tpu_spec = args.tpu_spec
_validate_no_gpu_type(use_gpu, gpu_spec)
# A TPU is valid with or without an attached GPU.
_validate_accelerator_region(tpu_spec, region)
if use_gpu:
_validate_machine_type(gpu_spec, args.machine_type)
_validate_accelerator_region(gpu_spec, region)
return args
def parse_flags(argv):
"""Function required by absl.app.run.
Internally generates a parser and returns
the results of parsing caliban arguments.
"""
args = argv[1:]
ret = caliban_parser().parse_args(args)
# Validate that extra script args were properly parsed.
validate_script_args(args, vars(ret).get("script_args", []))
return validate_across_args(ret)
def generate_docker_args(job_mode: conf.JobMode,
args: Dict[str, Any]) -> Dict[str, Any]:
"""gemerate docker args from args and job mode"""
# Get extra dependencies in case you want to install your requirements via a
# setup.py file.
setup_extras = b.base_extras(job_mode, "setup.py", args.get("extras"))
# Google application credentials, from the CLI or from an env variable.
creds_path = conf.extract_cloud_key(args)
# Application default credentials location.
adc_loc = csdk.get_application_default_credentials_path()
adc_path = adc_loc if os.path.isfile(adc_loc) else None
# TODO we may want to take custom paths, here, in addition to detecting them.
reqs = "requirements.txt"
conda_env = "environment.yml"
# Arguments that make their way down to caliban.docker.build.build_image.
docker_args = {
"extra_dirs": args.get("dir"),
"requirements_path": reqs if os.path.exists(reqs) else None,
"conda_env_path": conda_env if os.path.exists(conda_env) else None,
"caliban_config": conf.caliban_config(),
"credentials_path": creds_path,
"adc_path": adc_path,
"setup_extras": setup_extras,
"no_cache": args.get("no_cache", False),
'build_path': os.getcwd(),
}
return docker_args
# ----------------------------------------------------------------------------
def cluster_parser(base):
"""cli parser for cluster commands"""
parser = base.add_parser("cluster",
description="cluster commands",
help="cluster-related commands")
subparser = parser.add_subparsers(dest="cluster_cmd")
cluster_ls_cmd(subparser)
cluster_pod_parser(subparser)
cluster_job_parser(subparser)
cluster_node_pool_parser(subparser)
cluster_create_cmd(subparser)
cluster_delete_cmd(subparser)
# ----------------------------------------------------------------------------
def cluster_ls_cmd(base):
"""caliban cluster ls"""
parser = base.add_parser(
"ls",
description="list clusters",
help="list clusters",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
project_id_arg(parser)
cloud_key_arg(parser)
zone_arg(parser)
# ----------------------------------------------------------------------------
def cluster_name_arg(parser):
parser.add_argument("--cluster_name", help="cluster name", type=str)
# ----------------------------------------------------------------------------
def zone_arg(parser, default=None, help='zone'):
parser.add_argument("--zone", help=help, type=str, default=default)
# ----------------------------------------------------------------------------
def cluster_pod_parser(base):
parser = base.add_parser(
"pod",
description="pod commands",
help="pod commands",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
subparser = parser.add_subparsers(dest="pod_cmd")
cluster_pod_ls_cmd(subparser)
# ----------------------------------------------------------------------------
def cluster_pod_ls_cmd(base):
parser = base.add_parser("ls", description="list pods", help="list pods")
project_id_arg(parser)
cloud_key_arg(parser)
cluster_name_arg(parser)
zone_arg(parser)
# ----------------------------------------------------------------------------
def cluster_job_parser(base):
parser = base.add_parser(
"job",
description="job commands",
help="job commands",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
subparser = parser.add_subparsers(dest="job_cmd")
cluster_job_ls_cmd(subparser)
cluster_job_submit_cmd(subparser)
cluster_job_submit_file_cmd(subparser)
# ----------------------------------------------------------------------------
def cluster_job_ls_cmd(base):
parser = base.add_parser(
"ls",
description="list jobs",
help="list jobs",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
project_id_arg(parser)
cloud_key_arg(parser)
cluster_name_arg(parser)
zone_arg(parser)
# ----------------------------------------------------------------------------
def cluster_job_submit_cmd(base):
parser = base.add_parser(
"submit",
description="submit cluster job(s)",
help="submit cluster job(s)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
cluster_name_arg(parser)
no_gpu_flag(parser)
cloud_key_arg(parser)
setup_extras(parser)
extra_dirs(parser)
image_tag_arg(parser)
project_id_arg(parser)
min_cpu_arg(parser)
min_mem_arg(parser)
gpu_spec_arg(parser, validate_count=False)
tpu_spec_arg(parser, validate_count=False)
tpu_driver_arg(parser)
nonpreemptible_tpu_arg(parser)
force_arg(parser)
job_name_arg(parser)
experiment_config_arg(parser)
label_arg(parser)
nonpreemptible_arg(parser)
dry_run_arg(parser)
job_export_arg(parser)
xgroup_submit_arg(parser)
require_module(parser)
add_script_args(parser)
# ----------------------------------------------------------------------------
def job_file_arg(parser):
parser.add_argument('job_file',
type=gke_u.validate_job_filename,
help='kubernetes k8s job file {}'.format(
gke_k.VALID_JOB_FILE_EXT))
# ----------------------------------------------------------------------------
def job_export_arg(parser):
parser.add_argument(
'--export',
type=gke_u.validate_job_filename,
help=('Export job spec(s) to file, extension must be one of ' +
'{} (for example: --export my-job-spec.yaml) '.format(
gke_k.VALID_JOB_FILE_EXT) +
'For multiple jobs (i.e. in an experiment config scenario), ' +
'multiple files will be generated with an index inserted ' +
'(for example: --export my-job-spec.yaml would yield ' +
'my-job-spec_0.yaml, my-job-spec_1.yaml...)'))
# ----------------------------------------------------------------------------
def cluster_job_submit_file_cmd(base):
parser = base.add_parser(
"submit_file",
description='submit gke job from yaml/json file',
help='submit gke job from yaml/json file',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
cluster_name_arg(parser)
cloud_key_arg(parser)
project_id_arg(parser)
dry_run_arg(parser)
job_file_arg(parser)
# ----------------------------------------------------------------------------
def tpu_driver_arg(parser):
parser.add_argument("--tpu_driver",
type=str,
help="tpu driver",
default=gke.constants.DEFAULT_TPU_DRIVER)
# ----------------------------------------------------------------------------
def nonpreemptible_tpu_arg(parser):
parser.add_argument(
"--nonpreemptible_tpu",
action="store_true",
help=("use non-preemptible tpus: "
"note this only applies to v2-8 and v3-8 tpus currently, see: "
"https://cloud.google.com/tpu/docs/preemptible"))
# ----------------------------------------------------------------------------
def nonpreemptible_arg(parser):
parser.add_argument(
"--nonpreemptible",
action="store_true",
help=
("use non-preemptible VM instance: "
"please note that you may need to upgrade your "
"cluster to a recent version/use the rapid release "
"channel for preemptible VMs to be supported with node autoprovisioning: "
"https://cloud.google.com/kubernetes-engine/docs/release-notes-rapid#december_13_2019"
))
# ----------------------------------------------------------------------------
def cluster_node_pool_parser(base):
parser = base.add_parser(
"node_pool",
description="node pool commands",
help="node pool commands",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
subparser = parser.add_subparsers(dest="node_pool_cmd")
cluster_node_pool_ls_cmd(subparser)
# ----------------------------------------------------------------------------
def cluster_node_pool_ls_cmd(base):
parser = base.add_parser(
"ls",
description="list node pools",
help="list node pools",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
project_id_arg(parser)
cloud_key_arg(parser)
cluster_name_arg(parser)
zone_arg(parser)
# ----------------------------------------------------------------------------
def cluster_create_cmd(base):
"""caliban cluster create"""
parser = base.add_parser(
"create",
description="create cluster",
help="create cluster",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
project_id_arg(parser)
cloud_key_arg(parser)
cluster_name_arg(parser)
zone_arg(
parser,
help='for a single-zone cluster, this specifies the zone '
'for the cluster control plane and all worker nodes, while for a '
'multi-zone cluster this specifies only the zone for the control plane, '
'while worker nodes may be created in any zone within the same region as '
'the control plane. The single_zone argument specifies whether to create '
'a single- or multi- zone cluster.')
dry_run_arg(parser)
release_channel_arg(parser)
single_zone_arg(parser)
# ----------------------------------------------------------------------------
def cluster_delete_cmd(base):
"""caliban cluster delete"""
parser = base.add_parser(
"delete",
description="delete cluster",
help="delete cluster",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
project_id_arg(parser)
cloud_key_arg(parser)
cluster_name_arg(parser)
zone_arg(parser)
# ----------------------------------------------------------------------------
def release_channel_arg(parser):
parser.add_argument(
"--release_channel",
metavar=[x.value for x in gke_t.ReleaseChannel],
choices=gke_t.ReleaseChannel,
type=gke_t.ReleaseChannel,
help="cluster release channel, see "
"https://cloud.google.com/kubernetes-engine/docs/concepts/release-channels",
default=gke.constants.DEFAULT_RELEASE_CHANNEL.value)
# ----------------------------------------------------------------------------
def single_zone_arg(parser):
parser.add_argument(
"--single_zone",
action="store_true",
help=
('create a single-zone cluster if set, otherwise create a multi-zone '
'cluster: see https://cloud.google.com/kubernetes-engine/docs/concepts/'
'types-of-clusters#cluster_availability_choices'))
# ----------------------------------------------------------------------------
def min_cpu_arg(parser):
parser.add_argument(
'--min_cpu',
type=int,
help='Minimum cpu needed by job, in milli-cpus. If not specified, then '
'this value defaults to {} for gpu/tpu jobs, and {} for cpu jobs. Please '
'note that gke daemon processes utilize a small amount of cpu on each node, '
'so if you want to have your job run on a specific machine type, say a 2-cpu '
'machine, then if you specify a minimum cpu of 2000, then your job will '
'not be schedulable on a 2-cpu machine as the daemon processes will push '
'the total cpu needed to more than two full cpus.'.format(
gke_k.DEFAULT_MIN_CPU_ACCEL, gke_k.DEFAULT_MIN_CPU_CPU))
# ----------------------------------------------------------------------------
def min_mem_arg(parser):
parser.add_argument(
'--min_mem',
type=int,
help='Minimum memory needed by job, in MB. Please note that gke '
'daemon processes utilize a small amount of memory on each node, so if '
'you want to have your job run on a specific machine type, say a machine '
'with 8GB total memory, then if you specify a minimum memory of 8000MB, '
'then your job will not be schedulable on a 8GB machine as the daemon '
'processes will push the total memory needed to more than 8GB.')
# ----------------------------------------------------------------------------
def xgroup_arg(parser, helpstr: str):
parser.add_argument(
'--xgroup',
type=str,
help=helpstr,
)
def xgroup_submit_arg(parser):
xgroup_arg(
parser,
helpstr=
f'This specifies an experiment group, which ties experiments and job '
f'instances together. If you do not specify a group, then a new one will be '
f'created. If you specify an existing experiment group here, then new '
f'experiments and jobs you create will be added to the group you specify.',
)
# ----------------------------------------------------------------------------
def status_parser(base):
'''cli parser for status command'''
parser = base.add_parser("status", help="get status for caliban jobs")
xgroup_arg(parser, helpstr='experiment group')
max_jobs_arg(parser)
# ----------------------------------------------------------------------------
def stop_parser(base):
'''cli parser for stop command'''
parser = base.add_parser('stop', help='stop running caliban jobs')
xgroup_arg(parser, helpstr='experiment group')
dry_run_arg(parser)
# ----------------------------------------------------------------------------
def all_jobs_arg(parser):
parser.add_argument(
'--all_jobs',
action='store_true',
help=(f'resubmit all jobs regardless of current state, otherwise only '
f'jobs that are in FAILED or STOPPED state will be resubmitted'))
# ----------------------------------------------------------------------------
def resubmit_parser(base):
'''cli parser for resubmit command'''
parser = base.add_parser('resubmit', help='resubmit caliban jobs')
xgroup_arg(parser, helpstr='experiment group')
dry_run_arg(parser)
all_jobs_arg(parser)
project_id_arg(parser)
cloud_key_arg(parser)
# ----------------------------------------------------------------------------
def max_jobs_arg(parser):
parser.add_argument(
'--max_jobs',
type=int,
help=(f'Maximum number of jobs to view. If you specify an experiment '
f'group, then this specifies the maximum number of jobs per '
f'experiment to view. If you do not specify an experiment group, '
f'then this specifies the total number of jobs to return, ordered '
f'by creation date, or all jobs if max_jobs==0.'),
)
|
release/stubs.min/Grasshopper/Kernel/Undo/Actions.py | htlcnn/ironpython-stubs | 182 | 11131894 | <reponame>htlcnn/ironpython-stubs
# encoding: utf-8
# module Grasshopper.Kernel.Undo.Actions calls itself Actions
# from Grasshopper,Version=1.0.0.20,Culture=neutral,PublicKeyToken=dda4f5ec2cd80803
# by generator 1.145
""" NamespaceTracker represent a CLS namespace. """
# no imports
# no functions
# classes
class GH_AddObjectAction(GH_UndoAction,IGH_UndoAction,GH_ISerializable):
""" GH_AddObjectAction(obj: IGH_DocumentObject) """
def Internal_Redo(self,*args):
""" Internal_Redo(self: GH_AddObjectAction,doc: GH_Document) """
pass
def Internal_Undo(self,*args):
""" Internal_Undo(self: GH_AddObjectAction,doc: GH_Document) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,obj):
""" __new__(cls: type,obj: IGH_DocumentObject) """
pass
ExpiresSolution=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExpiresSolution(self: GH_AddObjectAction) -> bool
"""
class GH_AddStateAction(GH_ArchivedUndoAction,IGH_UndoAction,GH_ISerializable):
""" GH_AddStateAction(index: int,state: GH_State) """
def Deserialize(self,*args):
""" Deserialize(self: GH_ArchivedUndoAction,obj: GH_ISerializable) """
pass
def Internal_Redo(self,*args):
""" Internal_Redo(self: GH_AddStateAction,doc: GH_Document) """
pass
def Internal_Undo(self,*args):
""" Internal_Undo(self: GH_AddStateAction,doc: GH_Document) """
pass
def Read(self,reader):
""" Read(self: GH_AddStateAction,reader: GH_IReader) -> bool """
pass
def Serialize(self,*args):
""" Serialize(self: GH_ArchivedUndoAction,obj: GH_ISerializable) """
pass
def SerializeToByteArray(self,*args):
""" SerializeToByteArray(self: GH_ArchivedUndoAction,obj: GH_ISerializable) -> Array[Byte] """
pass
def Write(self,writer):
""" Write(self: GH_AddStateAction,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,index,state):
""" __new__(cls: type,index: int,state: GH_State) """
pass
m_data=None
class GH_DataMatchingAction(GH_ObjectUndoAction,IGH_UndoAction,GH_ISerializable):
""" GH_DataMatchingAction(obj: IGH_Component) """
def Internal_Redo(self,*args):
""" Internal_Redo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Internal_Undo(self,*args):
""" Internal_Undo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Object_Redo(self,*args):
""" Object_Redo(self: GH_DataMatchingAction,doc: GH_Document,obj: IGH_DocumentObject) """
pass
def Object_Undo(self,*args):
""" Object_Undo(self: GH_DataMatchingAction,doc: GH_Document,obj: IGH_DocumentObject) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,obj):
""" __new__(cls: type,obj: IGH_Component) """
pass
ExpiresSolution=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExpiresSolution(self: GH_DataMatchingAction) -> bool
"""
class GH_DataModificationAction(GH_ObjectUndoAction,IGH_UndoAction,GH_ISerializable):
""" GH_DataModificationAction(obj: IGH_Param) """
def Internal_Redo(self,*args):
""" Internal_Redo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Internal_Undo(self,*args):
""" Internal_Undo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Object_Redo(self,*args):
""" Object_Redo(self: GH_DataModificationAction,doc: GH_Document,obj: IGH_DocumentObject) """
pass
def Object_Undo(self,*args):
""" Object_Undo(self: GH_DataModificationAction,doc: GH_Document,obj: IGH_DocumentObject) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,obj):
""" __new__(cls: type,obj: IGH_Param) """
pass
ExpiresSolution=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExpiresSolution(self: GH_DataModificationAction) -> bool
"""
class GH_GenericObjectAction(GH_ArchivedUndoAction,IGH_UndoAction,GH_ISerializable):
""" GH_GenericObjectAction(obj: IGH_DocumentObject) """
def Deserialize(self,*args):
""" Deserialize(self: GH_ArchivedUndoAction,obj: GH_ISerializable) """
pass
def Internal_Redo(self,*args):
""" Internal_Redo(self: GH_GenericObjectAction,doc: GH_Document) """
pass
def Internal_Undo(self,*args):
""" Internal_Undo(self: GH_GenericObjectAction,doc: GH_Document) """
pass
def Serialize(self,*args):
""" Serialize(self: GH_ArchivedUndoAction,obj: GH_ISerializable) """
pass
def SerializeToByteArray(self,*args):
""" SerializeToByteArray(self: GH_ArchivedUndoAction,obj: GH_ISerializable) -> Array[Byte] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,obj):
""" __new__(cls: type,obj: IGH_DocumentObject) """
pass
ExpiresSolution=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExpiresSolution(self: GH_GenericObjectAction) -> bool
"""
m_data=None
class GH_HiddenAction(GH_ObjectUndoAction,IGH_UndoAction,GH_ISerializable):
""" GH_HiddenAction(obj: IGH_ActiveObject) """
def Internal_Redo(self,*args):
""" Internal_Redo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Internal_Undo(self,*args):
""" Internal_Undo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Object_Redo(self,*args):
""" Object_Redo(self: GH_HiddenAction,doc: GH_Document,obj: IGH_DocumentObject) """
pass
def Object_Undo(self,*args):
""" Object_Undo(self: GH_HiddenAction,doc: GH_Document,obj: IGH_DocumentObject) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,obj):
""" __new__(cls: type,obj: IGH_ActiveObject) """
pass
ExpiresDisplay=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExpiresDisplay(self: GH_HiddenAction) -> bool
"""
class GH_IconDisplayAction(GH_ObjectUndoAction,IGH_UndoAction,GH_ISerializable):
""" GH_IconDisplayAction(obj: IGH_DocumentObject) """
def Internal_Redo(self,*args):
""" Internal_Redo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Internal_Undo(self,*args):
""" Internal_Undo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Object_Redo(self,*args):
""" Object_Redo(self: GH_IconDisplayAction,doc: GH_Document,obj: IGH_DocumentObject) """
pass
def Object_Undo(self,*args):
""" Object_Undo(self: GH_IconDisplayAction,doc: GH_Document,obj: IGH_DocumentObject) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,obj):
""" __new__(cls: type,obj: IGH_DocumentObject) """
pass
class GH_IconOverrideAction(GH_ObjectUndoAction,IGH_UndoAction,GH_ISerializable):
""" GH_IconOverrideAction(obj: IGH_DocumentObject) """
def Internal_Redo(self,*args):
""" Internal_Redo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Internal_Undo(self,*args):
""" Internal_Undo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Object_Redo(self,*args):
""" Object_Redo(self: GH_IconOverrideAction,doc: GH_Document,obj: IGH_DocumentObject) """
pass
def Object_Undo(self,*args):
""" Object_Undo(self: GH_IconOverrideAction,doc: GH_Document,obj: IGH_DocumentObject) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,obj):
""" __new__(cls: type,obj: IGH_DocumentObject) """
pass
class GH_LayoutAction(GH_ObjectUndoAction,IGH_UndoAction,GH_ISerializable):
""" GH_LayoutAction(obj: IGH_DocumentObject) """
def Internal_Redo(self,*args):
""" Internal_Redo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Internal_Undo(self,*args):
""" Internal_Undo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Object_Redo(self,*args):
""" Object_Redo(self: GH_LayoutAction,doc: GH_Document,obj: IGH_DocumentObject) """
pass
def Object_Undo(self,*args):
""" Object_Undo(self: GH_LayoutAction,doc: GH_Document,obj: IGH_DocumentObject) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,obj):
""" __new__(cls: type,obj: IGH_DocumentObject) """
pass
class GH_LockedAction(GH_ObjectUndoAction,IGH_UndoAction,GH_ISerializable):
""" GH_LockedAction(obj: IGH_ActiveObject) """
def Internal_Redo(self,*args):
""" Internal_Redo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Internal_Undo(self,*args):
""" Internal_Undo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Object_Redo(self,*args):
""" Object_Redo(self: GH_LockedAction,doc: GH_Document,obj: IGH_DocumentObject) """
pass
def Object_Undo(self,*args):
""" Object_Undo(self: GH_LockedAction,doc: GH_Document,obj: IGH_DocumentObject) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,obj):
""" __new__(cls: type,obj: IGH_ActiveObject) """
pass
ExpiresSolution=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExpiresSolution(self: GH_LockedAction) -> bool
"""
class GH_NickNameAction(GH_ObjectUndoAction,IGH_UndoAction,GH_ISerializable):
""" GH_NickNameAction(obj: IGH_DocumentObject) """
def Internal_Redo(self,*args):
""" Internal_Redo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Internal_Undo(self,*args):
""" Internal_Undo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Object_Redo(self,*args):
""" Object_Redo(self: GH_NickNameAction,doc: GH_Document,obj: IGH_DocumentObject) """
pass
def Object_Undo(self,*args):
""" Object_Undo(self: GH_NickNameAction,doc: GH_Document,obj: IGH_DocumentObject) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,obj):
""" __new__(cls: type,obj: IGH_DocumentObject) """
pass
class GH_PersistentDataAction(GH_ObjectUndoAction,IGH_UndoAction,GH_ISerializable):
""" GH_PersistentDataAction[T](obj: GH_PersistentParam[T]) """
def Internal_Redo(self,*args):
""" Internal_Redo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Internal_Undo(self,*args):
""" Internal_Undo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Object_Redo(self,*args):
""" Object_Redo(self: GH_PersistentDataAction[T],doc: GH_Document,obj: IGH_DocumentObject) """
pass
def Object_Undo(self,*args):
""" Object_Undo(self: GH_PersistentDataAction[T],doc: GH_Document,obj: IGH_DocumentObject) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,obj):
""" __new__(cls: type,obj: GH_PersistentParam[T]) """
pass
ExpiresSolution=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExpiresSolution(self: GH_PersistentDataAction[T]) -> bool
"""
class GH_PivotAction(GH_ObjectUndoAction,IGH_UndoAction,GH_ISerializable):
""" GH_PivotAction(obj: IGH_DocumentObject) """
def Internal_Redo(self,*args):
""" Internal_Redo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Internal_Undo(self,*args):
""" Internal_Undo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Object_Redo(self,*args):
""" Object_Redo(self: GH_PivotAction,doc: GH_Document,obj: IGH_DocumentObject) """
pass
def Object_Undo(self,*args):
""" Object_Undo(self: GH_PivotAction,doc: GH_Document,obj: IGH_DocumentObject) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,obj):
""" __new__(cls: type,obj: IGH_DocumentObject) """
pass
class GH_RemoveObjectAction(GH_ArchivedUndoAction,IGH_UndoAction,GH_ISerializable):
""" GH_RemoveObjectAction(obj: IGH_DocumentObject) """
def Deserialize(self,*args):
""" Deserialize(self: GH_ArchivedUndoAction,obj: GH_ISerializable) """
pass
def Internal_Redo(self,*args):
""" Internal_Redo(self: GH_RemoveObjectAction,doc: GH_Document) """
pass
def Internal_Undo(self,*args):
""" Internal_Undo(self: GH_RemoveObjectAction,doc: GH_Document) """
pass
def Serialize(self,*args):
""" Serialize(self: GH_ArchivedUndoAction,obj: GH_ISerializable) """
pass
def SerializeToByteArray(self,*args):
""" SerializeToByteArray(self: GH_ArchivedUndoAction,obj: GH_ISerializable) -> Array[Byte] """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,obj):
""" __new__(cls: type,obj: IGH_DocumentObject) """
pass
ExpiresSolution=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExpiresSolution(self: GH_RemoveObjectAction) -> bool
"""
m_data=None
class GH_RemoveStateAction(GH_ArchivedUndoAction,IGH_UndoAction,GH_ISerializable):
""" GH_RemoveStateAction(index: int,state: GH_State) """
def Deserialize(self,*args):
""" Deserialize(self: GH_ArchivedUndoAction,obj: GH_ISerializable) """
pass
def Internal_Redo(self,*args):
""" Internal_Redo(self: GH_RemoveStateAction,doc: GH_Document) """
pass
def Internal_Undo(self,*args):
""" Internal_Undo(self: GH_RemoveStateAction,doc: GH_Document) """
pass
def Read(self,reader):
""" Read(self: GH_RemoveStateAction,reader: GH_IReader) -> bool """
pass
def Serialize(self,*args):
""" Serialize(self: GH_ArchivedUndoAction,obj: GH_ISerializable) """
pass
def SerializeToByteArray(self,*args):
""" SerializeToByteArray(self: GH_ArchivedUndoAction,obj: GH_ISerializable) -> Array[Byte] """
pass
def Write(self,writer):
""" Write(self: GH_RemoveStateAction,writer: GH_IWriter) -> bool """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,index,state):
""" __new__(cls: type,index: int,state: GH_State) """
pass
m_data=None
class GH_WireAction(GH_UndoAction,IGH_UndoAction,GH_ISerializable):
""" GH_WireAction(param: IGH_Param) """
def Internal_Redo(self,*args):
""" Internal_Redo(self: GH_WireAction,doc: GH_Document) """
pass
def Internal_Undo(self,*args):
""" Internal_Undo(self: GH_WireAction,doc: GH_Document) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,param):
""" __new__(cls: type,param: IGH_Param) """
pass
ExpiresSolution=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: ExpiresSolution(self: GH_WireAction) -> bool
"""
class GH_WireDisplayAction(GH_ObjectUndoAction,IGH_UndoAction,GH_ISerializable):
""" GH_WireDisplayAction(obj: IGH_Param) """
def Internal_Redo(self,*args):
""" Internal_Redo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Internal_Undo(self,*args):
""" Internal_Undo(self: GH_ObjectUndoAction,doc: GH_Document) """
pass
def Object_Redo(self,*args):
""" Object_Redo(self: GH_WireDisplayAction,doc: GH_Document,obj: IGH_DocumentObject) """
pass
def Object_Undo(self,*args):
""" Object_Undo(self: GH_WireDisplayAction,doc: GH_Document,obj: IGH_DocumentObject) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self,obj):
""" __new__(cls: type,obj: IGH_Param) """
pass
|
docs/10/recursion.py | 15921483570/python_data_structures_and_algorithms | 2,468 | 11131895 | # -*- coding: utf-8 -*-
def fact(n):
if n == 0:
return 1
else:
return n * fact(n - 1)
def print_num(n):
for i in range(1, n + 1): # 注意很多编程语言使用的都是 从 0 开始的左闭右开区间, python 也不例外
print(i)
def print_num_recursive(n):
if n > 0:
print_num_recursive(n - 1)
print(n)
def print_num_recursive_revserve(n):
if n > 0:
print(n)
print_num_recursive_revserve(n - 1)
from collections import deque
class Stack(object):
def __init__(self):
self._deque = deque()
def push(self, value):
return self._deque.append(value)
def pop(self):
return self._deque.pop()
def is_empty(self):
return len(self._deque) == 0
def print_num_use_stack(n):
s = Stack()
while n > 0: # 不断将参数入栈
s.push(n)
n -= 1
while not s.is_empty(): # 参数弹出
print(s.pop())
def hanoi_move(n, source, dest, intermediate):
if n >= 1: # 递归出口,只剩一个盘子
hanoi_move(n - 1, source, intermediate, dest)
print("Move %s -> %s" % (source, dest))
hanoi_move(n - 1, intermediate, dest, source)
def flatten(rec_list):
for i in rec_list:
if isinstance(i, list):
for i in flatten(i):
yield i
else:
yield i
def test_flatten():
assert list(flatten([[[1], 2, 3], [1, 2, 3]])) == [1, 2, 3, 1, 2, 3]
|
inclearn/convnet/cifar_resnet.py | Zotkin/incremental_learning.pytorch | 277 | 11131899 | ''' Incremental-Classifier Learning
Authors : <NAME>, <NAME>
Maintainer : <NAME>
Lab : TUKL-SEECS R&D Lab
Email : <EMAIL> '''
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
class DownsampleA(nn.Module):
def __init__(self, nIn, nOut, stride):
super(DownsampleA, self).__init__()
assert stride == 2
self.avg = nn.AvgPool2d(kernel_size=1, stride=stride)
def forward(self, x):
x = self.avg(x)
return torch.cat((x, x.mul(0)), 1)
class ResNetBasicblock(nn.Module):
expansion = 1
"""
RexNet basicblock (https://github.com/facebook/fb.resnet.torch/blob/master/models/resnet.lua)
"""
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(ResNetBasicblock, self).__init__()
self.conv_a = nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn_a = nn.BatchNorm2d(planes)
self.conv_b = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_b = nn.BatchNorm2d(planes)
self.downsample = downsample
self.featureSize = 64
def forward(self, x):
residual = x
basicblock = self.conv_a(x)
basicblock = self.bn_a(basicblock)
basicblock = F.relu(basicblock, inplace=True)
basicblock = self.conv_b(basicblock)
basicblock = self.bn_b(basicblock)
if self.downsample is not None:
residual = self.downsample(x)
return F.relu(residual + basicblock, inplace=True)
class CifarResNet(nn.Module):
"""
ResNet optimized for the Cifar Dataset, as specified in
https://arxiv.org/abs/1512.03385.pdf
"""
def __init__(self, block, depth, num_classes, channels=3):
""" Constructor
Args:
depth: number of layers.
num_classes: number of classes
base_width: base width
"""
super(CifarResNet, self).__init__()
self.featureSize = 64
# Model type specifies number of layers for CIFAR-10 and CIFAR-100 model
assert (depth - 2) % 6 == 0, 'depth should be one of 20, 32, 44, 56, 110'
layer_blocks = (depth - 2) // 6
self.num_classes = num_classes
self.conv_1_3x3 = nn.Conv2d(channels, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn_1 = nn.BatchNorm2d(16)
self.inplanes = 16
self.stage_1 = self._make_layer(block, 16, layer_blocks, 1)
self.stage_2 = self._make_layer(block, 32, layer_blocks, 2)
self.stage_3 = self._make_layer(block, 64, layer_blocks, 2)
self.avgpool = nn.AvgPool2d(8)
self.out_dim = 64 * block.expansion
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
# m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
init.kaiming_normal(m.weight)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = DownsampleA(self.inplanes, planes * block.expansion, stride)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, feature=False, T=1, labels=False, scale=None, keep=None):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
def forwardFeature(self, x):
pass
def resnet20(num_classes=10):
"""Constructs a ResNet-20 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 20, num_classes)
return model
def resnet10mnist(num_classes=10):
"""Constructs a ResNet-20 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 10, num_classes, 1)
return model
def resnet20mnist(num_classes=10):
"""Constructs a ResNet-20 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 20, num_classes, 1)
return model
def resnet32mnist(num_classes=10, channels=1):
model = CifarResNet(ResNetBasicblock, 32, num_classes, channels)
return model
def resnet32(num_classes=10):
"""Constructs a ResNet-32 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 32, num_classes)
return model
def resnet44(num_classes=10):
"""Constructs a ResNet-44 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 44, num_classes)
return model
def resnet56(num_classes=10):
"""Constructs a ResNet-56 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 56, num_classes)
return model
def resnet110(num_classes=10):
"""Constructs a ResNet-110 model for CIFAR-10 (by default)
Args:
num_classes (uint): number of classes
"""
model = CifarResNet(ResNetBasicblock, 110, num_classes)
return model
|
crawl.py | aosp-caf-upstream/platform_external_python_futures | 244 | 11131918 | <filename>crawl.py
"""Compare the speed of downloading URLs sequentially vs. using futures."""
import functools
import time
import timeit
import sys
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
from concurrent.futures import (as_completed, ThreadPoolExecutor,
ProcessPoolExecutor)
URLS = ['http://www.google.com/',
'http://www.apple.com/',
'http://www.ibm.com',
'http://www.thisurlprobablydoesnotexist.com',
'http://www.slashdot.org/',
'http://www.python.org/',
'http://www.bing.com/',
'http://www.facebook.com/',
'http://www.yahoo.com/',
'http://www.youtube.com/',
'http://www.blogger.com/']
def load_url(url, timeout):
kwargs = {'timeout': timeout} if sys.version_info >= (2, 6) else {}
return urlopen(url, **kwargs).read()
def download_urls_sequential(urls, timeout=60):
url_to_content = {}
for url in urls:
try:
url_to_content[url] = load_url(url, timeout=timeout)
except:
pass
return url_to_content
def download_urls_with_executor(urls, executor, timeout=60):
try:
url_to_content = {}
future_to_url = dict((executor.submit(load_url, url, timeout), url)
for url in urls)
for future in as_completed(future_to_url):
try:
url_to_content[future_to_url[future]] = future.result()
except:
pass
return url_to_content
finally:
executor.shutdown()
def main():
for name, fn in [('sequential',
functools.partial(download_urls_sequential, URLS)),
('processes',
functools.partial(download_urls_with_executor,
URLS,
ProcessPoolExecutor(10))),
('threads',
functools.partial(download_urls_with_executor,
URLS,
ThreadPoolExecutor(10)))]:
sys.stdout.write('%s: ' % name.ljust(12))
start = time.time()
url_map = fn()
sys.stdout.write('%.2f seconds (%d of %d downloaded)\n' %
(time.time() - start, len(url_map), len(URLS)))
if __name__ == '__main__':
main()
|
tutorials/SnippetsForStaticAndSkeletalMeshes_Assets/threejs_importer.py | avivazran/UnrealEnginePython | 2,350 | 11131954 | <filename>tutorials/SnippetsForStaticAndSkeletalMeshes_Assets/threejs_importer.py
import json
import unreal_engine as ue
from unreal_engine.classes import Skeleton, AnimSequence, SkeletalMesh, Material, MorphTarget, AnimSequence, AnimSequenceFactory
from unreal_engine import FTransform, FVector, FRotator, FQuat, FSoftSkinVertex, FMorphTargetDelta, FRawAnimSequenceTrack
from unreal_engine.structs import SkeletalMaterial, MeshUVChannelInfo, FloatCurve, RichCurve, RichCurveKey, SmartName, RawCurveTracks
from collections import OrderedDict
import numpy
class ThreeJSLoader:
def __init__(self, filename, scale=1.0):
# we need ordered json dictionaries
with open(filename) as json_file:
self.model = json.load(json_file, object_pairs_hook=OrderedDict)
self.scale = scale
# ask the user where to generate the new assets
new_path = ue.create_modal_save_asset_dialog('Choose destination path')
package_name = ue.object_path_to_package_name(new_path)
object_name = ue.get_base_filename(new_path)
self.skeleton = self.build_skeleton(package_name, object_name)
# this is the list of soft skin vertices (they contains mesh data as well as bone influences)
self.vertices = []
# this contain mapping between the soft skin vertices and the json file vertex index (this is required for building morph targets)
self.vertex_map = []
self.mesh = self.build_mesh(package_name, object_name)
self.curves = self.build_morph_targets()
self.animation = self.build_animation(package_name, object_name)
def build_skeleton(self, pkg_name, obj_name):
pkg = ue.get_or_create_package('{0}_Skeleton'.format(pkg_name))
skel = Skeleton('{0}_Skeleton'.format(obj_name), pkg)
# add a root bone from which all of the others will descend
# (this trick will avoid generating an invalid skeleton [and a crash], as in UE4 only one root can exist)
skel.skeleton_add_bone('root', -1, FTransform())
# iterate bones in the json file, note that we move from opengl axis to UE4
# (y on top, x right, z forward right-handed) to (y right, x forward left-handed, z on top)
for bone in self.model['bones']:
# assume no rotation
quat = FQuat()
# give priority to quaternions
# remember to negate x and y axis, as we invert z on position
if 'rotq' in bone:
quat = FQuat(bone['rotq'][2], bone['rotq'][0] * -1,
bone['rotq'][1] * -1, bone['rotq'][3])
elif 'rot' in bone:
quat = FRotator(bone['rot'][2], bone['rot'][0] - 180
, bone['rot'][1] - 180).quaternion()
pos = FVector(bone['pos'][2] * -1, bone['pos'][0],
bone['pos'][1]) * self.scale
# always set parent+1 as we added the root bone before
skel.skeleton_add_bone(
bone['name'], bone['parent'] + 1, FTransform(pos, quat))
skel.save_package()
return skel
def build_soft_vertex(self, index):
# create a new soft skin vertex, holding tangents, normal, uvs, influences...
v = FSoftSkinVertex()
v_index = self.model['faces'][index] * 3
# here we assume 2 bone influences, technically we should honour what the json influencesPerVertex field exposes
b_index = self.model['faces'][index] * 2
v.position = FVector(self.model['vertices'][v_index + 2] * -1, self.model['vertices']
[v_index], self.model['vertices'][v_index + 1]) * self.scale
v.influence_weights = (
self.model['skinWeights'][b_index], self.model['skinWeights'][b_index + 1])
v.influence_bones = (
self.model['skinIndices'][b_index] + 1, self.model['skinIndices'][b_index + 1] + 1)
# return the json index too, as we will need it later for computing morph targets
return (v, v_index)
def get_normals(self, index):
n_index = self.model['faces'][index] * 3
return FVector(self.model['normals'][n_index + 2] * -1, self.model['normals'][n_index], self.model['normals'][n_index + 1])
def build_mesh(self, pkg_name, obj_name):
index = 0
# this supports only format 3 (now deprecated, https://github.com/mrdoob/three.js/wiki/JSON-Model-format-3)
while index < len(self.model['faces']):
face = self.model['faces'][index]
index += 1
points = 3
v0 = v1 = v2 = v3 = None
if face & 1 == 0:
# triangle
v0, v0_index = self.build_soft_vertex(index)
v1, v1_index = self.build_soft_vertex(index + 1)
v2, v2_index = self.build_soft_vertex(index + 2)
else:
# quad
v0, v0_index = self.build_soft_vertex(index)
v1, v1_index = self.build_soft_vertex(index + 1)
v2, v2_index = self.build_soft_vertex(index + 2)
v3, v3_index = self.build_soft_vertex(index + 3)
if v3:
points = 4
index += points
if face & 2:
index += 1
if face & 4:
index += 1
if face & 8:
index += points
if face & 16:
index += 1
if face & 32:
v0.tangent_z = self.get_normals(index)
v1.tangent_z = self.get_normals(index + 1)
v2.tangent_z = self.get_normals(index + 2)
if v3:
v3.tangent_z = self.get_normals(index + 3)
index += points
if face & 64:
index += 1
if face & 128:
index += points
if points == 3:
# we need to fix winding, from OpenGL (counterwise) to UE4 (clockwise)
self.vertices.append(v2)
self.vertex_map.append(v2_index)
self.vertices.append(v0)
self.vertex_map.append(v0_index)
self.vertices.append(v1)
self.vertex_map.append(v1_index)
else:
# we have a quad, generate two triangles
# we need to fix winding, from OpenGL (counterwise) to UE4 (clockwise)
self.vertices.append(v3)
self.vertex_map.append(v3_index)
self.vertices.append(v0)
self.vertex_map.append(v0_index)
self.vertices.append(v1)
self.vertex_map.append(v1_index)
self.vertices.append(v2)
self.vertex_map.append(v2_index)
self.vertices.append(v3)
self.vertex_map.append(v3_index)
self.vertices.append(v1)
self.vertex_map.append(v1_index)
pkg = ue.get_or_create_package(pkg_name)
sm = SkeletalMesh(obj_name, pkg)
sm.skeletal_mesh_set_skeleton(self.skeleton)
# generate the LOD from the list of soft skin vertices
sm.skeletal_mesh_build_lod(self.vertices)
sm.save_package()
return sm
def build_morph_targets(self):
# when we build the skeletal mesh LOD by passing soft skin vertices
# UE4 will internally optimize the vertices to reduce duplicates
# for this reason the vertex index we built is different from the one stored into UE4
# the skeletal_mesh_to_import_vertex_map() returns the original mapping given the new one
import_map = self.mesh.skeletal_mesh_to_import_vertex_map()
# we will fill animation curves for later usage
curves = []
for morph_item in self.model['morphTargets']:
# ensure the MorphTarget has the SkeletalMesh as outer
morph = MorphTarget('', self.mesh)
deltas = []
for idx, import_index in enumerate(import_map):
# get the original json vertex index
vertex_index = self.vertex_map[import_index]
# get the original soft skin vertex
vdata = self.vertices[import_index]
x = morph_item['vertices'][vertex_index + 2] * -1
y = morph_item['vertices'][vertex_index]
z = morph_item['vertices'][vertex_index + 1]
delta = FMorphTargetDelta()
delta.source_idx = idx
# store the difference between original vertex position and the morph target one
delta.position_delta = (
FVector(x, y, z) * self.scale) - vdata.position
deltas.append(delta)
# check for the return value, as sometimes morph targets
# in json files do not generate any kind of modification
# so unreal will skip it
if morph.morph_target_populate_deltas(deltas):
# register the morph target
self.mesh.skeletal_mesh_register_morph_target(morph)
# add curve, not required, we can use it later for skeletal-based animations
curves.append(FloatCurve(Name=SmartName(DisplayName=morph.get_name()), FloatCurve=RichCurve(
Keys=[RichCurveKey(Time=0.0, Value=0.0), RichCurveKey(Time=1.0, Value=1.0)])))
self.mesh.save_package()
return curves
def build_animation(self, pkg_name, obj_name):
factory = AnimSequenceFactory()
factory.TargetSkeleton = self.skeleton
new_anim = factory.factory_create_new('{0}_Animation'.format(pkg_name))
new_anim.NumFrames = self.model['animation']['length'] * \
self.model['animation']['fps']
new_anim.SequenceLength = self.model['animation']['length']
# each bone maps to a track in UE4 animations
for bone_index, track in enumerate(self.model['animation']['hierarchy']):
# retrieve the bone/track name from the index (remember to add 1 as we have the additional root bone)
bone_name = self.skeleton.skeleton_get_bone_name(bone_index + 1)
positions = []
rotations = []
scales = []
for key in track['keys']:
t = key['time']
if 'pos' in key:
positions.append(
(t, FVector(key['pos'][2] * -1, key['pos'][0], key['pos'][1]) * 100))
if 'rotq' in key:
rotations.append((t, FQuat(
key['rotq'][2], key['rotq'][0] * -1, key['rotq'][1] * -1, key['rotq'][3])))
elif 'rot' in key:
# is it a quaternion ?
if len(key['rot']) == 4:
rotations.append(
(t, FQuat(key['rot'][2], key['rot'][0] * -1, key['rot'][1] * -1, key['rot'][3])))
else:
rotations.append(
(t, FRotator(key['rot'][2], key['rot'][0] - 180, key['rot'][1] - 180).quaternion()))
pos_keys = []
rot_keys = []
# generate the right number of frames
for t in numpy.arange(0, self.model['animation']['length'], 1.0 / self.model['animation']['fps']):
pos_keys.append(self.interpolate_vector(positions, t))
rot_keys.append(self.interpolate_quaternion(
rotations, t).get_normalized())
track_data = FRawAnimSequenceTrack()
track_data.pos_keys = pos_keys
track_data.rot_keys = rot_keys
new_anim.add_new_raw_track(bone_name, track_data)
# if we have curves, just add them to the animation
if self.curves:
new_anim.RawCurveData = RawCurveTracks(FloatCurves=self.curves)
new_anim.save_package()
return new_anim
def interpolate_vector(self, timeline, t):
keys = []
x_values = []
y_values = []
z_values = []
for key, value in timeline:
keys.append(key)
x_values.append(value[0])
y_values.append(value[1])
z_values.append(value[2])
x = numpy.interp(t, keys, x_values)
y = numpy.interp(t, keys, y_values)
z = numpy.interp(t, keys, z_values)
return FVector(x, y, z)
def interpolate_quaternion(self, timeline, t):
keys = []
x_values = []
y_values = []
z_values = []
w_values = []
for key, value in timeline:
keys.append(key)
x_values.append(value[0])
y_values.append(value[1])
z_values.append(value[2])
w_values.append(value[3])
x = numpy.interp(t, keys, x_values)
y = numpy.interp(t, keys, y_values)
z = numpy.interp(t, keys, z_values)
w = numpy.interp(t, keys, w_values)
return FQuat(x, y, z, w)
filename = ue.open_file_dialog('Choose a three.js file')[0]
threejs = ThreeJSLoader(filename, 100)
ue.open_editor_for_asset(threejs.animation)
|
AppServer/lib/django-1.2/tests/regressiontests/delete_regress/models.py | loftwah/appscale | 790 | 11131988 | from django.db import models
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
class Award(models.Model):
name = models.CharField(max_length=25)
object_id = models.PositiveIntegerField()
content_type = models.ForeignKey(ContentType)
content_object = generic.GenericForeignKey()
class AwardNote(models.Model):
award = models.ForeignKey(Award)
note = models.CharField(max_length=100)
class Person(models.Model):
name = models.CharField(max_length=25)
awards = generic.GenericRelation(Award)
class Book(models.Model):
pagecount = models.IntegerField()
class Toy(models.Model):
name = models.CharField(max_length=50)
class Child(models.Model):
name = models.CharField(max_length=50)
toys = models.ManyToManyField(Toy, through='PlayedWith')
class PlayedWith(models.Model):
child = models.ForeignKey(Child)
toy = models.ForeignKey(Toy)
date = models.DateField(db_column='date_col')
class PlayedWithNote(models.Model):
played = models.ForeignKey(PlayedWith)
note = models.TextField()
|
test/python/server_port/asgi.py | afxcn/unit | 2,633 | 11131990 | <reponame>afxcn/unit
async def application(scope, receive, send):
assert scope['type'] == 'http'
await send(
{
'type': 'http.response.start',
'status': 200,
'headers': [
(b'content-length', b'0'),
(b'server-port', str(scope['server'][1]).encode()),
],
}
)
|
3d-tracking/model/model_cen.py | vietnhatthai/3d-vehicle-tracking | 603 | 11132003 | import numpy as np
import torch
import torch.nn as nn
import utils.network_utils as nu
import utils.tracking_utils as tu
from model import dla_up
from lib.model.roi_layers import ROIAlign, ROIPool
class Model(nn.Module):
def __init__(self, arch_name, roi_name, down_ratio, roi_kernel):
super(Model, self).__init__()
self.base = dla_up.__dict__[arch_name](
pretrained_base='imagenet', down_ratio=down_ratio)
num_channel = self.base.channels[int(np.log2(down_ratio))]
# We use roialign with kernel size = 7 in our experiments
assert ('align' in roi_name or 'pool' in roi_name)
assert (roi_kernel == 7)
if 'align' in roi_name:
print('Using RoIAlign')
self.roi_pool = ROIAlign(
(roi_kernel, roi_kernel),
1.0 / down_ratio,
0)
elif 'pool' in roi_name:
print('Using RoIPool')
self.roi_pool = ROIPool(
(roi_kernel, roi_kernel),
1.0 / down_ratio)
self.dim = nn.Sequential(
nn.Conv2d(num_channel, num_channel,
kernel_size=3, stride=1, padding=0, bias=False),
nn.BatchNorm2d(num_channel),
nn.ReLU(inplace=True),
nn.Conv2d(num_channel, num_channel,
kernel_size=3, stride=1, padding=0, bias=False),
nn.BatchNorm2d(num_channel),
nn.ReLU(inplace=True),
nn.Conv2d(num_channel, num_channel,
kernel_size=3, stride=1, padding=0, bias=False),
nn.BatchNorm2d(num_channel),
nn.ReLU(inplace=True),
nn.Conv2d(num_channel, 3, kernel_size=1,
stride=1, padding=0, bias=True)) # 3 dim
self.rot = nn.Sequential(
nn.Conv2d(num_channel, num_channel,
kernel_size=3, stride=1, padding=0, bias=False),
nn.BatchNorm2d(num_channel),
nn.ReLU(inplace=True),
nn.Conv2d(num_channel, num_channel,
kernel_size=3, stride=1, padding=0, bias=False),
nn.BatchNorm2d(num_channel),
nn.ReLU(inplace=True),
nn.Conv2d(num_channel, num_channel,
kernel_size=3, stride=1, padding=0, bias=False),
nn.BatchNorm2d(num_channel),
nn.ReLU(inplace=True),
nn.Conv2d(num_channel, 8, kernel_size=1,
stride=1, padding=0, bias=True)) # 1 + 1 + 2
self.dep = nn.Sequential(
nn.Conv2d(num_channel, num_channel,
kernel_size=3, stride=1, padding=0, bias=False),
nn.BatchNorm2d(num_channel),
nn.ReLU(inplace=True),
nn.Conv2d(num_channel, num_channel,
kernel_size=3, stride=1, padding=0, bias=False),
nn.BatchNorm2d(num_channel),
nn.ReLU(inplace=True),
nn.Conv2d(num_channel, num_channel,
kernel_size=3, stride=1, padding=0, bias=False),
nn.BatchNorm2d(num_channel),
nn.ReLU(inplace=True),
nn.Conv2d(num_channel, 1, kernel_size=1,
stride=1, padding=0, bias=True),
nn.Sigmoid())
self.cen = nn.Sequential(
nn.Conv2d(num_channel, num_channel,
kernel_size=3, stride=1, padding=0, bias=False),
nn.BatchNorm2d(num_channel),
nn.ReLU(inplace=True),
nn.Conv2d(num_channel, num_channel,
kernel_size=3, stride=1, padding=0, bias=False),
nn.BatchNorm2d(num_channel),
nn.ReLU(inplace=True),
nn.Conv2d(num_channel, num_channel,
kernel_size=3, stride=1, padding=0, bias=False),
nn.BatchNorm2d(num_channel),
nn.ReLU(inplace=True),
nn.Conv2d(num_channel, 2, kernel_size=1,
stride=1, padding=0, bias=True))
nu.init_module(self.base)
nu.init_module(self.dim)
nu.init_module(self.rot)
nu.init_module(self.dep)
nu.init_module(self.cen)
def forward(self, image, box_info, device, phase):
# for 3D
rois = box_info['rois_pd']
# Get box info
num_imgs = image.size(0)
n_gt_box = box_info['n_box'].cpu().numpy()
n_pd_box = torch.sum(rois[:, :, 4] > 0, dim=1).cpu().numpy()
# Check number of boxes
num_rois = int(np.sum(n_gt_box)) # get n_gt_box of this frame
if (n_gt_box == 0).any(): print("GT is empty")
num_det = int(np.sum(n_pd_box)) # get n_pd_box of this frame
if (n_pd_box == 0).any(): print("Prediction is empty")
# Make sure if n_gt_box and n_pd_box are the same during training
if phase in ['train', 'val']:
assert (n_pd_box == n_gt_box).any(), \
"Number of pred. bbox ({}) not equals to gt ({})".format(
n_pd_box, n_gt_box)
# Init
image = image.to(device)
boxes = torch.zeros([num_det, 5]).to(device)
cen_pd = torch.zeros([num_det, 2]).to(device)
rois_pd = torch.zeros([num_det, 5]).to(device)
rois_gt = torch.zeros([num_rois, 5]).to(device)
dim_gt = torch.zeros([num_rois, 3]).to(device)
dep_gt = torch.zeros([num_rois]).to(device)
cen_gt = torch.zeros([num_rois, 2]).to(device)
loc_gt = torch.zeros([num_rois, 3]).to(device)
tid_gt = torch.zeros([num_rois]).to(device)
if phase == 'train':
bin_gt = torch.zeros([num_rois, 2]).to(device).long()
res_gt = torch.zeros([num_rois, 2]).to(device)
else:
alpha_gt = torch.zeros([num_rois]).to(device)
ignore = torch.zeros([num_rois]).to(device)
# Feed valid info to gpu
sum_gt = 0
sum_det = 0
for idx in range(num_imgs):
if n_pd_box[idx] > 0:
# indicate which image to get feature
boxes[sum_det:sum_det + n_pd_box[idx], 0] = idx
boxes[sum_det:sum_det + n_pd_box[idx], 1:5] = rois[idx,
:n_pd_box[idx],
0:4] # box
cen_pd[sum_det:sum_det + n_pd_box[idx]] = box_info['cen_pd'][idx,
:n_pd_box[idx]]
rois_pd[sum_det:sum_det + n_pd_box[idx]] = rois[idx,
:n_pd_box[idx],
:] # for tracking
if n_gt_box[idx] > 0:
dim_gt[sum_gt:sum_gt + n_gt_box[idx]] = box_info['dim_gt'][idx,
:n_gt_box[idx]]
dep_gt[sum_gt:sum_gt + n_gt_box[idx]] = box_info['depth_gt'][
idx, :n_gt_box[idx]]
cen_gt[sum_gt:sum_gt + n_gt_box[idx]] = box_info['cen_gt'][idx,
:n_gt_box[idx]]
loc_gt[sum_gt:sum_gt + n_gt_box[idx]] = box_info['loc_gt'][idx,
:n_gt_box[idx]]
tid_gt[sum_gt:sum_gt + n_gt_box[idx]] = box_info['tid_gt'][idx,
:n_gt_box[idx]]
rois_gt[sum_gt:sum_gt + n_gt_box[idx]] = box_info['rois_gt'][
idx, :n_gt_box[idx]]
if phase == 'train':
bin_gt[sum_gt:sum_gt + n_gt_box[idx]] = box_info[
'bin_cls_gt'][
idx, :n_gt_box[idx]]
res_gt[sum_gt:sum_gt + n_gt_box[idx]] = box_info[
'bin_res_gt'][
idx, :n_gt_box[idx]]
else:
alpha_gt[sum_gt:sum_gt + n_gt_box[idx]] = box_info[
'alpha_gt'][
idx,
:n_gt_box[idx]]
ignore[sum_gt:sum_gt + n_gt_box[idx]] = box_info['ignore'][
idx, :n_gt_box[idx]]
sum_gt += n_gt_box[idx]
sum_det += n_pd_box[idx]
# Inference of 3D estimation
img_feat = self.base(image)
if num_det > 0:
pooled_feat = self.roi_pool(img_feat, boxes)
dim = self.dim(pooled_feat).flatten(start_dim=1)
cen = self.cen(pooled_feat).flatten(start_dim=1) + cen_pd
orient_ = self.rot(pooled_feat).flatten(start_dim=1)
# bin 1
divider1 = torch.sqrt(orient_[:, 2:3] ** 2 + orient_[:, 3:4] ** 2)
b1sin = orient_[:, 2:3] / divider1
b1cos = orient_[:, 3:4] / divider1
# bin 2
divider2 = torch.sqrt(orient_[:, 6:7] ** 2 + orient_[:, 7:8] ** 2)
b2sin = orient_[:, 6:7] / divider2
b2cos = orient_[:, 7:8] / divider2
rot = torch.cat(
[orient_[:, 0:2], b1sin, b1cos, orient_[:, 4:6], b2sin, b2cos],
1)
dep = nu.get_pred_depth(self.dep(pooled_feat).flatten())
loc_pd = []
sum_l = 0
for l_idx in range(num_imgs):
if n_pd_box[l_idx] == 0:
continue
cam_calib = box_info['cam_calib'][l_idx]
position = box_info['cam_loc'][l_idx]
rotation = box_info['cam_rot'][l_idx]
loc_pd.append(tu.point3dcoord_torch(
cen[sum_l:sum_l + n_pd_box[l_idx]],
dep[sum_l:sum_l + n_pd_box[l_idx]],
cam_calib,
position,
rotation))
sum_l += n_pd_box[l_idx]
loc_pd = torch.cat(loc_pd)
else:
pooled_feat = image.new_zeros(1, 128, 7, 7)
dim = image.new_ones(1, 3)
rot = image.new_ones(1, 8)
dep = image.new_zeros(1)
cen = image.new_zeros(1, 2)
loc_pd = image.new_zeros(1, 3)
# Pack infos
box_output = {'rois': rois_pd,
'feat': pooled_feat.detach(),
'dim': dim.detach(),
'rot': rot.detach(),
'dep': dep.detach(),
'cen': cen.detach(),
'loc': loc_pd.detach(),
}
if phase == 'train':
loss_dim = nu.compute_dim_loss(dim, dim_gt).unsqueeze(0)
loss_rot = nu.compute_rot_loss(rot, bin_gt, res_gt).unsqueeze(0)
loss_dep = nu.compute_dep_loss(dep, dep_gt).unsqueeze(0)
loss_dep += nu.compute_dep_loss(loc_pd, loc_gt).unsqueeze(0)
loss_cen = nu.compute_cen_loss(cen, cen_gt).unsqueeze(0)
targets = (loss_dim, loss_rot, loss_dep, loss_cen)
else:
targets = (rois_gt,
dim_gt,
alpha_gt,
dep_gt,
cen_gt,
loc_gt,
ignore,
tid_gt)
return box_output, targets
|
hata/discord/application/preinstanced.py | Multiface24111/hata | 173 | 11132011 | __all__ = ('EntitlementType', 'SKUAccessType', 'SKUFeatureType', 'SKUGenre', 'SKUType', 'TeamMembershipState', )
from ..bases import PreinstancedBase, Preinstance as P
class TeamMembershipState(PreinstancedBase):
"""
Represents a ``TeamMember``'s state at a ``Team``.
Attributes
----------
name : `str`
The name of state.
value : `int`
The Discord side identifier value of the team membership state.
Class Attributes
----------------
INSTANCES : `dict` of (`int`, ``TeamMembershipState``) items
Stores the created team membership state instances. This container is accessed when translating a Discord
team membership state's value to it's representation.
VALUE_TYPE : `type` = `int`
The team membership states' values' type.
DEFAULT_NAME : `str` = `'Undefined'`
The default name of the team membership states.
Every predefined team membership state can be accessed as class attribute as well:
+-----------------------+-----------+-------+
| Class attribute name | name | value |
+=======================+===========+=======+
| none | none | 0 |
+-----------------------+-----------+-------+
| invited | invited | 1 |
+-----------------------+-----------+-------+
| accepted | accepted | 2 |
+-----------------------+-----------+-------+
"""
INSTANCES = {}
VALUE_TYPE = int
__slots__ = ()
# predefined
none = P(0, 'none')
invited = P(1, 'invited')
accepted = P(2, 'accepted')
class SKUFeatureType(PreinstancedBase):
"""
Represents an SKU's feature type.
Attributes
----------
name : `str`
The name of the feature type.
value : `int`
The Discord side identifier value of the SKU feature type.
Class Attributes
----------------
INSTANCES : `dict` of (`int`, ``SKUFeatureType``) items
Stores the created SKU feature type instances. This container is accessed when translating a Discord
SKU feature type's value to it's representation.
VALUE_TYPE : `type` = `int`
The SKU feature types' values' type.
DEFAULT_NAME : `str` = `'Undefined'`
The default name of the SKU feature types.
Every predefined SKU feature type can be accessed as class attribute as well:
+-----------------------+-----------------------+-------+
| Class attribute name | name | value |
+=======================+=======================+=======+
| none | none | 0 |
+-----------------------+-----------------------+-------+
| single_player | single_player | 1 |
+-----------------------+-----------------------+-------+
| online_multiplayer | online_multiplayer | 2 |
+-----------------------+-----------------------+-------+
| local_multiplayer | local_multiplayer | 3 |
+-----------------------+-----------------------+-------+
| pvp | pvp | 4 |
+-----------------------+-----------------------+-------+
| local_coop | local_coop | 5 |
+-----------------------+-----------------------+-------+
| cross_platform | cross_platform | 6 |
+-----------------------+-----------------------+-------+
| rich_presence | rich_presence | 7 |
+-----------------------+-----------------------+-------+
| discord_game_invites | discord_game_invites | 8 |
+-----------------------+-----------------------+-------+
| spectator_mode | spectator_mode | 9 |
+-----------------------+-----------------------+-------+
| controller_support | controller_support | 10 |
+-----------------------+-----------------------+-------+
| cloud_saves | cloud_saves | 11 |
+-----------------------+-----------------------+-------+
| online_coop | online_coop | 12 |
+-----------------------+-----------------------+-------+
| secure_networking | secure_networking | 13 |
+-----------------------+-----------------------+-------+
"""
INSTANCES = {}
VALUE_TYPE = int
__slots__ = ()
# predefined
none = P(0, 'none')
single_player = P(1, 'single_player')
online_multiplayer = P(2, 'online_multiplayer')
local_multiplayer = P(3, 'local_multiplayer')
pvp = P(4, 'pvp')
local_coop = P(5, 'local_coop')
cross_platform = P(6, 'cross_platform')
rich_presence = P(7, 'rich_presence')
discord_game_invites = P(8, 'discord_game_invites')
spectator_mode = P(9, 'spectator_mode')
controller_support = P(10, 'controller_support')
cloud_saves = P(11, 'cloud_saves')
online_coop = P(12, 'online_coop')
secure_networking = P(13, 'secure_networking')
class SKUGenre(PreinstancedBase):
"""
Represents an SKU's feature type.
Attributes
----------
name : `str`
The name of the feature type.
value : `int`
The Discord side identifier value of the SKU genre.
Class Attributes
----------------
INSTANCES : `dict` of (`int`, ``SKUGenre``) items
Stores the created SKU genre instances. This container is accessed when translating a Discord
SKU genre's value to it's representation.
VALUE_TYPE : `type` = `int`
The SKU genres' values' type.
DEFAULT_NAME : `str` = `'Undefined'`
The default name of the SKU genres.
Every predefined SKU genre can be accessed as class attribute as well:
+-----------------------+-----------------------+-------+
| Class attribute name | name | value |
+=======================+=======================+=======+
| none | none | 0 |
+-----------------------+-----------------------+-------+
| action | action | 1 |
+-----------------------+-----------------------+-------+
| action_rpg | action_rpg | 2 |
+-----------------------+-----------------------+-------+
| brawler | brawler | 3 |
+-----------------------+-----------------------+-------+
| hack_and_slash | hack_and_slash | 4 |
+-----------------------+-----------------------+-------+
| platformer | platformer | 5 |
+-----------------------+-----------------------+-------+
| stealth | stealth | 6 |
+-----------------------+-----------------------+-------+
| survival | survival | 7 |
+-----------------------+-----------------------+-------+
| adventure | adventure | 8 |
+-----------------------+-----------------------+-------+
| action_adventure | action_adventure | 9 |
+-----------------------+-----------------------+-------+
| metroidvania | metroidvania | 10 |
+-----------------------+-----------------------+-------+
| open_world | open_world | 11 |
+-----------------------+-----------------------+-------+
| psychological_horror | psychological_horror | 12 |
+-----------------------+-----------------------+-------+
| sandbox | sandbox | 13 |
+-----------------------+-----------------------+-------+
| survival_horror | survival_horror | 14 |
+-----------------------+-----------------------+-------+
| visual_novel | visual_novel | 15 |
+-----------------------+-----------------------+-------+
| driving_racing | driving_racing | 16 |
+-----------------------+-----------------------+-------+
| vehicular_combat | vehicular_combat | 17 |
+-----------------------+-----------------------+-------+
| massively_multiplayer | massively_multiplayer | 18 |
+-----------------------+-----------------------+-------+
| mmorpg | mmorpg | 19 |
+-----------------------+-----------------------+-------+
| role_playing | role_playing | 20 |
+-----------------------+-----------------------+-------+
| dungeon_crawler | dungeon_crawler | 21 |
+-----------------------+-----------------------+-------+
| roguelike | roguelike | 22 |
+-----------------------+-----------------------+-------+
| shooter | shooter | 23 |
+-----------------------+-----------------------+-------+
| light_gun | light_gun | 24 |
+-----------------------+-----------------------+-------+
| shoot_em_up | shoot_em_up | 25 |
+-----------------------+-----------------------+-------+
| fps | fps | 26 |
+-----------------------+-----------------------+-------+
| dual_joystick_shooter | dual_joystick_shooter | 27 |
+-----------------------+-----------------------+-------+
| simulation | simulation | 28 |
+-----------------------+-----------------------+-------+
| flight_simulation | flight_simulation | 29 |
+-----------------------+-----------------------+-------+
| train_simulation | train_simulation | 30 |
+-----------------------+-----------------------+-------+
| life_simulation | life_simulation | 31 |
+-----------------------+-----------------------+-------+
| fishing | fishing | 32 |
+-----------------------+-----------------------+-------+
| sports | sports | 33 |
+-----------------------+-----------------------+-------+
| baseball | baseball | 34 |
+-----------------------+-----------------------+-------+
| basketball | basketball | 35 |
+-----------------------+-----------------------+-------+
| billiards | billiards | 36 |
+-----------------------+-----------------------+-------+
| bowling | bowling | 37 |
+-----------------------+-----------------------+-------+
| boxing | boxing | 38 |
+-----------------------+-----------------------+-------+
| football | football | 39 |
+-----------------------+-----------------------+-------+
| golf | golf | 40 |
+-----------------------+-----------------------+-------+
| hockey | hockey | 41 |
+-----------------------+-----------------------+-------+
| skateboarding_skating | skateboarding_skating | 42 |
+-----------------------+-----------------------+-------+
| snowboarding_skiing | snowboarding_skiing | 43 |
+-----------------------+-----------------------+-------+
| soccer | soccer | 44 |
+-----------------------+-----------------------+-------+
| track_field | track_field | 45 |
+-----------------------+-----------------------+-------+
| surfing_wakeboarding | surfing_wakeboarding | 46 |
+-----------------------+-----------------------+-------+
| wrestling | wrestling | 47 |
+-----------------------+-----------------------+-------+
| strategy | strategy | 48 |
+-----------------------+-----------------------+-------+
| four_x | four_x | 49 |
+-----------------------+-----------------------+-------+
| artillery | artillery | 50 |
+-----------------------+-----------------------+-------+
| rts | rts | 51 |
+-----------------------+-----------------------+-------+
| tower_defense | tower_defense | 52 |
+-----------------------+-----------------------+-------+
| turn_based_strategy | turn_based_strategy | 53 |
+-----------------------+-----------------------+-------+
| wargame | wargame | 54 |
+-----------------------+-----------------------+-------+
| moba | moba | 55 |
+-----------------------+-----------------------+-------+
| fighting | fighting | 56 |
+-----------------------+-----------------------+-------+
| puzzle | puzzle | 57 |
+-----------------------+-----------------------+-------+
| card_game | card_game | 58 |
+-----------------------+-----------------------+-------+
| education | education | 59 |
+-----------------------+-----------------------+-------+
| fitness | fitness | 60 |
+-----------------------+-----------------------+-------+
| gambling | gambling | 61 |
+-----------------------+-----------------------+-------+
| music_rhythm | music_rhythm | 62 |
+-----------------------+-----------------------+-------+
| party_mini_game | party_mini_game | 63 |
+-----------------------+-----------------------+-------+
| pinball | pinball | 64 |
+-----------------------+-----------------------+-------+
| trivia_board_game | trivia_board_game | 65 |
+-----------------------+-----------------------+-------+
"""
INSTANCES = {}
VALUE_TYPE = int
__slots__ = ()
none = P(0, 'none')
action = P(1, 'action')
action_rpg = P(2, 'action_rpg')
brawler = P(3, 'brawler')
hack_and_slash = P(4, 'hack_and_slash')
platformer = P(5, 'platformer')
stealth = P(6, 'stealth')
survival = P(7, 'survival')
adventure = P(8, 'adventure')
action_adventure = P(9, 'action_adventure')
metroidvania = P(10, 'metroidvania')
open_world = P(11, 'open_world')
psychological_horror = P(12, 'psychological_horror')
sandbox = P(13, 'sandbox')
survival_horror = P(14, 'survival_horror')
visual_novel = P(15, 'visual_novel')
driving_racing = P(16, 'driving_racing')
vehicular_combat = P(17, 'vehicular_combat')
massively_multiplayer = P(18, 'massively_multiplayer')
mmorpg = P(19, 'mmorpg')
role_playing = P(20, 'role_playing')
dungeon_crawler = P(21, 'dungeon_crawler')
roguelike = P(22, 'roguelike')
shooter = P(23, 'shooter')
light_gun = P(24, 'light_gun')
shoot_em_up = P(25, 'shoot_em_up')
fps = P(26, 'fps')
dual_joystick_shooter = P(27, 'dual_joystick_shooter')
simulation = P(28, 'simulation')
flight_simulation = P(29, 'flight_simulation')
train_simulation = P(30, 'train_simulation')
life_simulation = P(31, 'life_simulation')
fishing = P(32, 'fishing')
sports = P(33, 'sports')
baseball = P(34, 'baseball')
basketball = P(35, 'basketball')
billiards = P(36, 'billiards')
bowling = P(37, 'bowling')
boxing = P(38, 'boxing')
football = P(39, 'football')
golf = P(40, 'golf')
hockey = P(41, 'hockey')
skateboarding_skating = P(42, 'skateboarding_skating')
snowboarding_skiing = P(43, 'snowboarding_skiing')
soccer = P(44, 'soccer')
track_field = P(45, 'track_field')
surfing_wakeboarding = P(46, 'surfing_wakeboarding')
wrestling = P(47, 'wrestling')
strategy = P(48, 'strategy')
four_x = P(49, 'four_x')
artillery = P(50, 'artillery')
rts = P(51, 'rts')
tower_defense = P(52, 'tower_defense')
turn_based_strategy = P(53, 'turn_based_strategy')
wargame = P(54, 'wargame')
moba = P(55, 'moba')
fighting = P(56, 'fighting')
puzzle = P(57, 'puzzle')
card_game = P(58, 'card_game')
education = P(59, 'education')
fitness = P(60, 'fitness')
gambling = P(61, 'gambling')
music_rhythm = P(62, 'music_rhythm')
party_mini_game = P(63, 'party_mini_game')
pinball = P(64, 'pinball')
trivia_board_game = P(65, 'trivia_board_game')
class SKUAccessType(PreinstancedBase):
"""
Represents an SKU's access type.
Attributes
----------
name : `str`
The name of state.
value : `int`
The Discord side identifier value of the SKU access type.
Class Attributes
----------------
INSTANCES : `dict` of (`int`, ``SKUAccessType``) items
Stores the created SKU access type instances. This container is accessed when translating a Discord
SKU access type's value to it's representation.
VALUE_TYPE : `type` = `int`
The SKU access types' values' type.
DEFAULT_NAME : `str` = `'Undefined'`
The default name of the SKU access types.
Every predefined SKU access type can be accessed as class attribute as well:
+-----------------------+---------------+-------+
| Class attribute name | name | value |
+=======================+===============+=======+
| none | none | 0 |
+-----------------------+---------------+-------+
| full | full | 1 |
+-----------------------+---------------+-------+
| early_access | early_access | 2 |
+-----------------------+---------------+-------+
| vip_access | vip_access | 3 |
+-----------------------+---------------+-------+
"""
INSTANCES = {}
VALUE_TYPE = int
__slots__ = ()
# predefined
none = P(0, 'none')
full = P(1, 'full')
early_access = P(2, 'early_access')
vip_access = P(3, 'vip_access')
class SKUType(PreinstancedBase):
"""
Represents an SKU's type.
Attributes
----------
name : `str`
The name of state.
value : `int`
The Discord side identifier value of the SKU type.
Class Attributes
----------------
INSTANCES : `dict` of (`int`, ``SKUType``) items
Stores the created SKU type instances. This container is accessed when translating a Discord
SKU type's value to it's representation.
VALUE_TYPE : `type` = `int`
The SKU types' values' type.
DEFAULT_NAME : `str` = `'Undefined'`
The default name of the SKU types.
Every predefined SKU type can be accessed as class attribute as well:
+-----------------------+-------------------+-------+
| Class attribute name | name | value |
+=======================+===================+=======+
| none | none | 0 |
+-----------------------+-------------------+-------+
| durable_primary | durable_primary | 1 |
+-----------------------+-------------------+-------+
| durable | durable | 2 |
+-----------------------+-------------------+-------+
| consumable | consumable | 3 |
+-----------------------+-------------------+-------+
| bundle | bundle | 4 |
+-----------------------+-------------------+-------+
| subscription | subscription | 5 |
+-----------------------+-------------------+-------+
"""
INSTANCES = {}
VALUE_TYPE = int
__slots__ = ()
# predefined
none = P(0, 'none')
durable_primary = P(1, 'durable_primary')
durable = P(2, 'durable')
consumable = P(3, 'consumable')
bundle = P(4, 'bundle')
subscription = P(5, 'subscription')
class EntitlementType(PreinstancedBase):
"""
Represents an entitlement's type.
Attributes
----------
name : `str`
The name of state.
value : `int`
The Discord side identifier value of the entitlement type.
Class Attributes
----------------
INSTANCES : `dict` of (`int`, ``EntitlementType``) items
Stores the created entitlement type instances. This container is accessed when translating a Discord
entitlement type's value to it's representation.
VALUE_TYPE : `type` = `int`
The entitlement types' values' type.
DEFAULT_NAME : `str` = `'Undefined'`
The default name of the entitlement types.
Every predefined entitlement type can be accessed as class attribute as well:
+-----------------------+-----------------------+-------+
| Class attribute name | name | value |
+=======================+=======================+=======+
| none | none | 0 |
+-----------------------+-----------------------+-------+
| purchase | purchase | 1 |
+-----------------------+-----------------------+-------+
| premium_subscription | premium_subscription | 2 |
+-----------------------+-----------------------+-------+
| developer_gift | developer_gift | 3 |
+-----------------------+-----------------------+-------+
| test_mode_purchase | test_mode_purchase | 4 |
+-----------------------+-----------------------+-------+
| free_purchase | free_purchase | 5 |
+-----------------------+-----------------------+-------+
| user_gift | user_gift | 6 |
+-----------------------+-----------------------+-------+
"""
INSTANCES = {}
VALUE_TYPE = int
__slots__ = ()
# predefined
none = P(0, 'none')
purchase = P(1, 'purchase')
premium_subscription = P(2, 'premium_subscription')
developer_gift = P(3, 'developer_gift')
test_mode_purchase = P(4, 'test_mode_purchase')
free_purchase = P(5, 'free_purchase')
user_gift = P(6, 'user_gift')
|
tools/apilint/apilint_test.py | rio-31/android_frameworks_base-1 | 164 | 11132030 | #!/usr/bin/env python
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import apilint
def cls(pkg, name):
return apilint.Class(apilint.Package(999, "package %s {" % pkg, None), 999,
"public final class %s {" % name, None)
_ri = apilint._retry_iterator
c1 = cls("android.app", "ActivityManager")
c2 = cls("android.app", "Notification")
c3 = cls("android.app", "Notification.Action")
c4 = cls("android.graphics", "Bitmap")
class UtilTests(unittest.TestCase):
def test_retry_iterator(self):
it = apilint._retry_iterator([1, 2, 3, 4])
self.assertEqual(it.next(), 1)
self.assertEqual(it.next(), 2)
self.assertEqual(it.next(), 3)
it.send("retry")
self.assertEqual(it.next(), 3)
self.assertEqual(it.next(), 4)
with self.assertRaises(StopIteration):
it.next()
def test_retry_iterator_one(self):
it = apilint._retry_iterator([1])
self.assertEqual(it.next(), 1)
it.send("retry")
self.assertEqual(it.next(), 1)
with self.assertRaises(StopIteration):
it.next()
def test_retry_iterator_one(self):
it = apilint._retry_iterator([1])
self.assertEqual(it.next(), 1)
it.send("retry")
self.assertEqual(it.next(), 1)
with self.assertRaises(StopIteration):
it.next()
def test_skip_to_matching_class_found(self):
it = _ri([c1, c2, c3, c4])
self.assertEquals(apilint._skip_to_matching_class(it, c3),
c3)
self.assertEqual(it.next(), c4)
def test_skip_to_matching_class_not_found(self):
it = _ri([c1, c2, c3, c4])
self.assertEquals(apilint._skip_to_matching_class(it, cls("android.content", "ContentProvider")),
None)
self.assertEqual(it.next(), c4)
def test_yield_until_matching_class_found(self):
it = _ri([c1, c2, c3, c4])
self.assertEquals(list(apilint._yield_until_matching_class(it, c3)),
[c1, c2])
self.assertEqual(it.next(), c4)
def test_yield_until_matching_class_not_found(self):
it = _ri([c1, c2, c3, c4])
self.assertEquals(list(apilint._yield_until_matching_class(it, cls("android.content", "ContentProvider"))),
[c1, c2, c3])
self.assertEqual(it.next(), c4)
def test_yield_until_matching_class_None(self):
it = _ri([c1, c2, c3, c4])
self.assertEquals(list(apilint._yield_until_matching_class(it, None)),
[c1, c2, c3, c4])
faulty_current_txt = """
// Signature format: 2.0
package android.app {
public final class Activity {
}
public final class WallpaperColors implements android.os.Parcelable {
ctor public WallpaperColors(@NonNull android.os.Parcel);
method public int describeContents();
method public void writeToParcel(@NonNull android.os.Parcel, int);
field @NonNull public static final android.os.Parcelable.Creator<android.app.WallpaperColors> CREATOR;
}
}
""".strip().split('\n')
ok_current_txt = """
// Signature format: 2.0
package android.app {
public final class Activity {
}
public final class WallpaperColors implements android.os.Parcelable {
ctor public WallpaperColors();
method public int describeContents();
method public void writeToParcel(@NonNull android.os.Parcel, int);
field @NonNull public static final android.os.Parcelable.Creator<android.app.WallpaperColors> CREATOR;
}
}
""".strip().split('\n')
system_current_txt = """
// Signature format: 2.0
package android.app {
public final class WallpaperColors implements android.os.Parcelable {
method public int getSomething();
}
}
""".strip().split('\n')
class BaseFileTests(unittest.TestCase):
def test_base_file_avoids_errors(self):
failures, _ = apilint.examine_stream(system_current_txt, ok_current_txt)
self.assertEquals(failures, {})
def test_class_with_base_finds_same_errors(self):
failures_with_classes_with_base, _ = apilint.examine_stream("", faulty_current_txt,
in_classes_with_base=[cls("android.app", "WallpaperColors")])
failures_with_system_txt, _ = apilint.examine_stream(system_current_txt, faulty_current_txt)
self.assertEquals(failures_with_classes_with_base.keys(), failures_with_system_txt.keys())
def test_classes_with_base_is_emited(self):
classes_with_base = []
_, _ = apilint.examine_stream(system_current_txt, faulty_current_txt,
out_classes_with_base=classes_with_base)
self.assertEquals(map(lambda x: x.fullname, classes_with_base), ["android.app.WallpaperColors"])
class ParseV2Stream(unittest.TestCase):
def test_field_kinds(self):
api = apilint._parse_stream("""
// Signature format: 2.0
package android {
public enum SomeEnum {
enum_constant public static final android.SomeEnum ENUM_CONST;
field public static final int FIELD_CONST;
property public final int someProperty;
ctor public SomeEnum();
method public Object? getObject();
}
}
""".strip().split('\n'))
self.assertEquals(api['android.SomeEnum'].fields[0].split[0], 'enum_constant')
self.assertEquals(api['android.SomeEnum'].fields[1].split[0], 'field')
self.assertEquals(api['android.SomeEnum'].fields[2].split[0], 'property')
self.assertEquals(api['android.SomeEnum'].ctors[0].split[0], 'ctor')
self.assertEquals(api['android.SomeEnum'].methods[0].split[0], 'method')
class ParseV3Stream(unittest.TestCase):
def test_field_kinds(self):
api = apilint._parse_stream("""
// Signature format: 3.0
package a {
public final class ContextKt {
method public static inline <reified T> T! getSystemService(android.content.Context);
method public static inline void withStyledAttributes(android.content.Context, android.util.AttributeSet? set = null, int[] attrs, @AttrRes int defStyleAttr = 0, @StyleRes int defStyleRes = 0, kotlin.jvm.functions.Function1<? super android.content.res.TypedArray,kotlin.Unit> block);
}
}
""".strip().split('\n'))
self.assertEquals(api['a.ContextKt'].methods[0].name, 'getSystemService')
self.assertEquals(api['a.ContextKt'].methods[0].split[:4], ['method', 'public', 'static', 'inline'])
self.assertEquals(api['a.ContextKt'].methods[1].name, 'withStyledAttributes')
self.assertEquals(api['a.ContextKt'].methods[1].split[:4], ['method', 'public', 'static', 'inline'])
class V2TokenizerTests(unittest.TestCase):
def _test(self, raw, expected):
self.assertEquals(apilint.V2Tokenizer(raw).tokenize(), expected)
def test_simple(self):
self._test(" method public some.Type someName(some.Argument arg, int arg);",
['method', 'public', 'some.Type', 'someName', '(', 'some.Argument',
'arg', ',', 'int', 'arg', ')', ';'])
self._test("class Some.Class extends SomeOther {",
['class', 'Some.Class', 'extends', 'SomeOther', '{'])
def test_varargs(self):
self._test("name(String...)",
['name', '(', 'String', '...', ')'])
def test_kotlin(self):
self._test("String? name(String!...)",
['String', '?', 'name', '(', 'String', '!', '...', ')'])
def test_annotation(self):
self._test("method @Nullable public void name();",
['method', '@', 'Nullable', 'public', 'void', 'name', '(', ')', ';'])
def test_annotation_args(self):
self._test("@Some(val=1, other=2) class Class {",
['@', 'Some', '(', 'val', '=', '1', ',', 'other', '=', '2', ')',
'class', 'Class', '{'])
def test_comment(self):
self._test("some //comment", ['some'])
def test_strings(self):
self._test(r'"" "foo" "\"" "\\"', ['""', '"foo"', r'"\""', r'"\\"'])
def test_at_interface(self):
self._test("public @interface Annotation {",
['public', '@interface', 'Annotation', '{'])
def test_array_type(self):
self._test("int[][]", ['int', '[]', '[]'])
def test_generics(self):
self._test("<>foobar<A extends Object>",
['<', '>', 'foobar', '<', 'A', 'extends', 'Object', '>'])
class V2ParserTests(unittest.TestCase):
def _cls(self, raw):
pkg = apilint.Package(999, "package pkg {", None)
return apilint.Class(pkg, 1, raw, '', sig_format=2)
def _method(self, raw, cls=None):
if not cls:
cls = self._cls("class Class {")
return apilint.Method(cls, 1, raw, '', sig_format=2)
def _field(self, raw):
cls = self._cls("class Class {")
return apilint.Field(cls, 1, raw, '', sig_format=2)
def test_parse_package(self):
pkg = apilint.Package(999, "package wifi.p2p {", None)
self.assertEquals("wifi.p2p", pkg.name)
def test_class(self):
cls = self._cls("@Deprecated @IntRange(from=1, to=2) public static abstract class Some.Name extends Super<Class> implements Interface<Class> {")
self.assertTrue('deprecated' in cls.split)
self.assertTrue('static' in cls.split)
self.assertTrue('abstract' in cls.split)
self.assertTrue('class' in cls.split)
self.assertEquals('Super', cls.extends)
self.assertEquals('Interface', cls.implements)
self.assertEquals('pkg.Some.Name', cls.fullname)
def test_enum(self):
cls = self._cls("public enum Some.Name {")
self._field("enum_constant public static final android.ValueType COLOR;")
def test_interface(self):
cls = self._cls("@Deprecated @IntRange(from=1, to=2) public interface Some.Name extends Interface<Class> {")
self.assertTrue('deprecated' in cls.split)
self.assertTrue('interface' in cls.split)
self.assertEquals('Interface', cls.extends)
self.assertEquals('Interface', cls.implements)
self.assertEquals('pkg.Some.Name', cls.fullname)
def test_at_interface(self):
cls = self._cls("@java.lang.annotation.Target({java.lang.annotation.ElementType.TYPE, java.lang.annotation.ElementType.FIELD, java.lang.annotation.ElementType.METHOD, java.lang.annotation.ElementType.PARAMETER, java.lang.annotation.ElementType.CONSTRUCTOR, java.lang.annotation.ElementType.LOCAL_VARIABLE}) @java.lang.annotation.Retention(java.lang.annotation.RetentionPolicy.CLASS) public @interface SuppressLint {")
self.assertTrue('@interface' in cls.split)
self.assertEquals('pkg.SuppressLint', cls.fullname)
def test_parse_method(self):
m = self._method("method @Deprecated public static native <T> Class<T>[][] name("
+ "Class<T[]>[][], Class<T[][][]>[][]...) throws Exception, T;")
self.assertTrue('static' in m.split)
self.assertTrue('public' in m.split)
self.assertTrue('method' in m.split)
self.assertTrue('native' in m.split)
self.assertTrue('deprecated' in m.split)
self.assertEquals('java.lang.Class[][]', m.typ)
self.assertEquals('name', m.name)
self.assertEquals(['java.lang.Class[][]', 'java.lang.Class[][]...'], m.args)
self.assertEquals(['java.lang.Exception', 'T'], m.throws)
def test_ctor(self):
m = self._method("ctor @Deprecated <T> ClassName();")
self.assertTrue('ctor' in m.split)
self.assertTrue('deprecated' in m.split)
self.assertEquals('ctor', m.typ)
self.assertEquals('ClassName', m.name)
def test_parse_annotation_method(self):
cls = self._cls("@interface Annotation {")
self._method('method abstract String category() default "";', cls=cls)
self._method('method abstract boolean deepExport() default false;', cls=cls)
self._method('method abstract ViewDebug.FlagToString[] flagMapping() default {};', cls=cls)
self._method('method abstract ViewDebug.FlagToString[] flagMapping() default (double)java.lang.Float.NEGATIVE_INFINITY;', cls=cls)
def test_parse_string_field(self):
f = self._field('field @Deprecated public final String SOME_NAME = "value";')
self.assertTrue('field' in f.split)
self.assertTrue('deprecated' in f.split)
self.assertTrue('final' in f.split)
self.assertEquals('java.lang.String', f.typ)
self.assertEquals('SOME_NAME', f.name)
self.assertEquals('value', f.value)
def test_parse_field(self):
f = self._field('field public Object SOME_NAME;')
self.assertTrue('field' in f.split)
self.assertEquals('java.lang.Object', f.typ)
self.assertEquals('SOME_NAME', f.name)
self.assertEquals(None, f.value)
def test_parse_int_field(self):
f = self._field('field public int NAME = 123;')
self.assertTrue('field' in f.split)
self.assertEquals('int', f.typ)
self.assertEquals('NAME', f.name)
self.assertEquals('123', f.value)
def test_parse_quotient_field(self):
f = self._field('field public int NAME = (0.0/0.0);')
self.assertTrue('field' in f.split)
self.assertEquals('int', f.typ)
self.assertEquals('NAME', f.name)
self.assertEquals('( 0.0 / 0.0 )', f.value)
def test_kotlin_types(self):
self._field('field public List<Integer[]?[]!>?[]![]? NAME;')
self._method("method <T?> Class<T!>?[]![][]? name(Type!, Type argname,"
+ "Class<T?>[][]?[]!...!) throws Exception, T;")
self._method("method <T> T name(T a = 1, T b = A(1), Lambda f = { false }, N? n = null, "
+ """double c = (1/0), float d = 1.0f, String s = "heyo", char c = 'a');""")
def test_kotlin_operator(self):
self._method('method public operator void unaryPlus(androidx.navigation.NavDestination);')
self._method('method public static operator androidx.navigation.NavDestination get(androidx.navigation.NavGraph, @IdRes int id);')
self._method('method public static operator <T> T get(androidx.navigation.NavigatorProvider, kotlin.reflect.KClass<T> clazz);')
def test_kotlin_property(self):
self._field('property public VM value;')
self._field('property public final String? action;')
def test_kotlin_varargs(self):
self._method('method public void error(int p = "42", Integer int2 = "null", int p1 = "42", vararg String args);')
def test_kotlin_default_values(self):
self._method('method public void foo(String! = null, String! = "Hello World", int = 42);')
self._method('method void method(String, String firstArg = "hello", int secondArg = "42", String thirdArg = "world");')
self._method('method void method(String, String firstArg = "hello", int secondArg = "42");')
self._method('method void method(String, String firstArg = "hello");')
self._method('method void edit(android.Type, boolean commit = false, Function1<? super Editor,kotlin.Unit> action);')
self._method('method <K, V> LruCache<K,V> lruCache(int maxSize, Function2<? super K,? super V,java.lang.Integer> sizeOf = { _, _ -> 1 }, Function1<? extends V> create = { (V)null }, Function4<kotlin.Unit> onEntryRemoved = { _, _, _, _ -> });')
self._method('method android.Bitmap? drawToBitmap(android.View, android.Config config = android.graphics.Bitmap.Config.ARGB_8888);')
self._method('method void emptyLambda(Function0<kotlin.Unit> sizeOf = {});')
self._method('method void method1(int p = 42, Integer? int2 = null, int p1 = 42, String str = "hello world", java.lang.String... args);')
self._method('method void method2(int p, int int2 = (2 * int) * some.other.pkg.Constants.Misc.SIZE);')
self._method('method void method3(String str, int p, int int2 = double(int) + str.length);')
self._method('method void print(test.pkg.Foo foo = test.pkg.Foo());')
def test_type_use_annotation(self):
self._method('method public static int codePointAt(char @NonNull [], int);')
self._method('method @NonNull public java.util.Set<java.util.Map.@NonNull Entry<K,V>> entrySet();')
m = self._method('method @NonNull public java.lang.annotation.@NonNull Annotation @NonNull [] getAnnotations();')
self.assertEquals('java.lang.annotation.Annotation[]', m.typ)
m = self._method('method @NonNull public abstract java.lang.annotation.@NonNull Annotation @NonNull [] @NonNull [] getParameterAnnotations();')
self.assertEquals('java.lang.annotation.Annotation[][]', m.typ)
m = self._method('method @NonNull public @NonNull String @NonNull [] split(@NonNull String, int);')
self.assertEquals('java.lang.String[]', m.typ)
class PackageTests(unittest.TestCase):
def _package(self, raw):
return apilint.Package(123, raw, "blame")
def test_regular_package(self):
p = self._package("package an.pref.int {")
self.assertEquals('an.pref.int', p.name)
def test_annotation_package(self):
p = self._package("package @RestrictTo(a.b.C) an.pref.int {")
self.assertEquals('an.pref.int', p.name)
def test_multi_annotation_package(self):
p = self._package("package @Rt(a.b.L_G_P) @RestrictTo(a.b.C) an.pref.int {")
self.assertEquals('an.pref.int', p.name)
class FilterTests(unittest.TestCase):
def test_filter_match_prefix(self):
self.assertTrue(apilint.match_filter(["a"], "a.B"))
self.assertTrue(apilint.match_filter(["a.B"], "a.B.C"))
def test_filter_dont_match_prefix(self):
self.assertFalse(apilint.match_filter(["c"], "a.B"))
self.assertFalse(apilint.match_filter(["a."], "a.B"))
self.assertFalse(apilint.match_filter(["a.B."], "a.B.C"))
def test_filter_match_exact(self):
self.assertTrue(apilint.match_filter(["a.B"], "a.B"))
def test_filter_dont_match_exact(self):
self.assertFalse(apilint.match_filter([""], "a.B"))
self.assertFalse(apilint.match_filter(["a.C"], "a.B"))
self.assertFalse(apilint.match_filter(["a.C"], "a.B"))
if __name__ == "__main__":
unittest.main()
|
contrib/python/xapiand-py/xapiand/utils/client.py | Kronuz/Xapiand | 370 | 11132056 | <filename>contrib/python/xapiand-py/xapiand/utils/client.py
# Copyright (c) 2019 Dubalu LLC
# Copyright (c) 2017 Elasticsearch
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to you under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import weakref
from datetime import date, datetime
from functools import wraps
from ..compat import string_types, quote_plus, PY2
# parts of URL to be omitted
SKIP_IN_PATH = (None, '', b'', [], ())
def _escape(value):
"""
Escape a single value of a URL string or a query parameter. If it is a list
or tuple, turn it into a comma-separated string first.
"""
# make sequences into comma-separated stings
if isinstance(value, (list, tuple)):
value = ','.join(value)
# dates and datetimes into isoformat
elif isinstance(value, (date, datetime)):
value = value.isoformat()
# make bools into true/false strings
elif isinstance(value, bool):
value = str(value).lower()
# don't decode bytestrings
elif isinstance(value, bytes):
return value
# encode strings to utf-8
if isinstance(value, string_types):
if PY2 and isinstance(value, unicode):
return value.encode('utf-8')
if not PY2 and isinstance(value, str):
return value.encode('utf-8')
return str(value)
def make_url(url, id=""):
"""
Create a normalized URL string.
"""
if isinstance(url, tuple):
url = list(url)
elif not isinstance(url, list):
url = url.split('/')
# preserve ',', '*' and '~' in url for nicer URLs in logs
url = [quote_plus(_escape(u), b',*~') for u in url if u not in SKIP_IN_PATH]
url.append(quote_plus(_escape(id), b',*~'))
return '/' + '/'.join(url)
# parameters that apply to all methods
GLOBAL_PARAMS = ('pretty', 'human', 'routing')
def query_params(*accepted_params):
"""
Decorator that pops all accepted parameters from method's kwargs and puts
them in the params argument.
"""
def _wrapper(func):
@wraps(func)
def _wrapped(*args, **kwargs):
params = {}
if 'params' in kwargs:
params = kwargs.pop('params').copy()
for p in accepted_params + GLOBAL_PARAMS:
if p in kwargs:
value = kwargs.pop(p)
if value is not None:
if isinstance(value, (list, tuple)):
params[p] = [_escape(v) for v in value]
else:
params[p] = _escape(value)
# don't treat ignore and request_timeout as other params to avoid escaping
for p in ('ignore', 'request_timeout'):
if p in kwargs:
params[p] = kwargs.pop(p)
return func(*args, params=params, **kwargs)
return _wrapped
return _wrapper
class NamespacedClient(object):
def __init__(self, client):
self.client = client
@property
def transport(self):
return self.client.transport
class AddonClient(NamespacedClient):
@classmethod
def infect_client(cls, client):
addon = cls(weakref.proxy(client))
setattr(client, cls.namespace, addon)
return client
|
tests/complex/nsaugment/tst-app.py | eLBati/pyxb | 123 | 11132076 | # -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import unittest
import app
import common
import pyxb.utils.domutils
from pyxb.utils import six
pyxb.utils.domutils.BindingDOMSupport.DeclareNamespace(app.Namespace, 'app')
pyxb.utils.domutils.BindingDOMSupport.DeclareNamespace(common.Namespace, 'common')
class Test (unittest.TestCase):
def testMissingApp (self):
# The app element is not in the base common, it's in the
# application-specific module that's private.
self.assertRaises(AttributeError, getattr, common, 'app')
def testApp (self):
instance = app.elt(base='hi', app='there')
xmlt = six.u('<app:elt xmlns:app="urn:app" xmlns:common="urn:common"><common:base><common:bstr>hi</common:bstr></common:base><common:app><common:astr>there</common:astr></common:app></app:elt>')
xmld = xmlt.encode('utf-8')
self.assertEqual(instance.toxml('utf-8', root_only=True), xmld)
if '__main__' == __name__:
unittest.main()
|
alipay/aop/api/domain/AlipayOpenMiniInnerversionUploadModel.py | antopen/alipay-sdk-python-all | 213 | 11132085 | <gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.MiniAppPluginReference import MiniAppPluginReference
class AlipayOpenMiniInnerversionUploadModel(object):
def __init__(self):
self._app_origin = None
self._build_app_type = None
self._build_extra_info = None
self._build_extra_mini_project_config = None
self._build_js_permission = None
self._build_main_url = None
self._build_max_android_client_version = None
self._build_max_ios_client_version = None
self._build_min_android_client_version = None
self._build_min_ios_client_version = None
self._build_package_md_5 = None
self._build_package_name = None
self._build_package_stream = None
self._build_qcloud_info = None
self._build_signed_pkg_url = None
self._build_source_pkg_size = None
self._build_source_pkg_url = None
self._build_sub_url = None
self._build_version = None
self._builded_package_size = None
self._builded_package_url = None
self._bundle_id = None
self._client_type = None
self._inst_code = None
self._mini_app_id = None
self._plugin_refs = None
@property
def app_origin(self):
return self._app_origin
@app_origin.setter
def app_origin(self, value):
self._app_origin = value
@property
def build_app_type(self):
return self._build_app_type
@build_app_type.setter
def build_app_type(self, value):
self._build_app_type = value
@property
def build_extra_info(self):
return self._build_extra_info
@build_extra_info.setter
def build_extra_info(self, value):
self._build_extra_info = value
@property
def build_extra_mini_project_config(self):
return self._build_extra_mini_project_config
@build_extra_mini_project_config.setter
def build_extra_mini_project_config(self, value):
self._build_extra_mini_project_config = value
@property
def build_js_permission(self):
return self._build_js_permission
@build_js_permission.setter
def build_js_permission(self, value):
self._build_js_permission = value
@property
def build_main_url(self):
return self._build_main_url
@build_main_url.setter
def build_main_url(self, value):
self._build_main_url = value
@property
def build_max_android_client_version(self):
return self._build_max_android_client_version
@build_max_android_client_version.setter
def build_max_android_client_version(self, value):
self._build_max_android_client_version = value
@property
def build_max_ios_client_version(self):
return self._build_max_ios_client_version
@build_max_ios_client_version.setter
def build_max_ios_client_version(self, value):
self._build_max_ios_client_version = value
@property
def build_min_android_client_version(self):
return self._build_min_android_client_version
@build_min_android_client_version.setter
def build_min_android_client_version(self, value):
self._build_min_android_client_version = value
@property
def build_min_ios_client_version(self):
return self._build_min_ios_client_version
@build_min_ios_client_version.setter
def build_min_ios_client_version(self, value):
self._build_min_ios_client_version = value
@property
def build_package_md_5(self):
return self._build_package_md_5
@build_package_md_5.setter
def build_package_md_5(self, value):
self._build_package_md_5 = value
@property
def build_package_name(self):
return self._build_package_name
@build_package_name.setter
def build_package_name(self, value):
self._build_package_name = value
@property
def build_package_stream(self):
return self._build_package_stream
@build_package_stream.setter
def build_package_stream(self, value):
self._build_package_stream = value
@property
def build_qcloud_info(self):
return self._build_qcloud_info
@build_qcloud_info.setter
def build_qcloud_info(self, value):
self._build_qcloud_info = value
@property
def build_signed_pkg_url(self):
return self._build_signed_pkg_url
@build_signed_pkg_url.setter
def build_signed_pkg_url(self, value):
self._build_signed_pkg_url = value
@property
def build_source_pkg_size(self):
return self._build_source_pkg_size
@build_source_pkg_size.setter
def build_source_pkg_size(self, value):
self._build_source_pkg_size = value
@property
def build_source_pkg_url(self):
return self._build_source_pkg_url
@build_source_pkg_url.setter
def build_source_pkg_url(self, value):
self._build_source_pkg_url = value
@property
def build_sub_url(self):
return self._build_sub_url
@build_sub_url.setter
def build_sub_url(self, value):
self._build_sub_url = value
@property
def build_version(self):
return self._build_version
@build_version.setter
def build_version(self, value):
self._build_version = value
@property
def builded_package_size(self):
return self._builded_package_size
@builded_package_size.setter
def builded_package_size(self, value):
self._builded_package_size = value
@property
def builded_package_url(self):
return self._builded_package_url
@builded_package_url.setter
def builded_package_url(self, value):
self._builded_package_url = value
@property
def bundle_id(self):
return self._bundle_id
@bundle_id.setter
def bundle_id(self, value):
self._bundle_id = value
@property
def client_type(self):
return self._client_type
@client_type.setter
def client_type(self, value):
self._client_type = value
@property
def inst_code(self):
return self._inst_code
@inst_code.setter
def inst_code(self, value):
self._inst_code = value
@property
def mini_app_id(self):
return self._mini_app_id
@mini_app_id.setter
def mini_app_id(self, value):
self._mini_app_id = value
@property
def plugin_refs(self):
return self._plugin_refs
@plugin_refs.setter
def plugin_refs(self, value):
if isinstance(value, list):
self._plugin_refs = list()
for i in value:
if isinstance(i, MiniAppPluginReference):
self._plugin_refs.append(i)
else:
self._plugin_refs.append(MiniAppPluginReference.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.app_origin:
if hasattr(self.app_origin, 'to_alipay_dict'):
params['app_origin'] = self.app_origin.to_alipay_dict()
else:
params['app_origin'] = self.app_origin
if self.build_app_type:
if hasattr(self.build_app_type, 'to_alipay_dict'):
params['build_app_type'] = self.build_app_type.to_alipay_dict()
else:
params['build_app_type'] = self.build_app_type
if self.build_extra_info:
if hasattr(self.build_extra_info, 'to_alipay_dict'):
params['build_extra_info'] = self.build_extra_info.to_alipay_dict()
else:
params['build_extra_info'] = self.build_extra_info
if self.build_extra_mini_project_config:
if hasattr(self.build_extra_mini_project_config, 'to_alipay_dict'):
params['build_extra_mini_project_config'] = self.build_extra_mini_project_config.to_alipay_dict()
else:
params['build_extra_mini_project_config'] = self.build_extra_mini_project_config
if self.build_js_permission:
if hasattr(self.build_js_permission, 'to_alipay_dict'):
params['build_js_permission'] = self.build_js_permission.to_alipay_dict()
else:
params['build_js_permission'] = self.build_js_permission
if self.build_main_url:
if hasattr(self.build_main_url, 'to_alipay_dict'):
params['build_main_url'] = self.build_main_url.to_alipay_dict()
else:
params['build_main_url'] = self.build_main_url
if self.build_max_android_client_version:
if hasattr(self.build_max_android_client_version, 'to_alipay_dict'):
params['build_max_android_client_version'] = self.build_max_android_client_version.to_alipay_dict()
else:
params['build_max_android_client_version'] = self.build_max_android_client_version
if self.build_max_ios_client_version:
if hasattr(self.build_max_ios_client_version, 'to_alipay_dict'):
params['build_max_ios_client_version'] = self.build_max_ios_client_version.to_alipay_dict()
else:
params['build_max_ios_client_version'] = self.build_max_ios_client_version
if self.build_min_android_client_version:
if hasattr(self.build_min_android_client_version, 'to_alipay_dict'):
params['build_min_android_client_version'] = self.build_min_android_client_version.to_alipay_dict()
else:
params['build_min_android_client_version'] = self.build_min_android_client_version
if self.build_min_ios_client_version:
if hasattr(self.build_min_ios_client_version, 'to_alipay_dict'):
params['build_min_ios_client_version'] = self.build_min_ios_client_version.to_alipay_dict()
else:
params['build_min_ios_client_version'] = self.build_min_ios_client_version
if self.build_package_md_5:
if hasattr(self.build_package_md_5, 'to_alipay_dict'):
params['build_package_md_5'] = self.build_package_md_5.to_alipay_dict()
else:
params['build_package_md_5'] = self.build_package_md_5
if self.build_package_name:
if hasattr(self.build_package_name, 'to_alipay_dict'):
params['build_package_name'] = self.build_package_name.to_alipay_dict()
else:
params['build_package_name'] = self.build_package_name
if self.build_package_stream:
if hasattr(self.build_package_stream, 'to_alipay_dict'):
params['build_package_stream'] = self.build_package_stream.to_alipay_dict()
else:
params['build_package_stream'] = self.build_package_stream
if self.build_qcloud_info:
if hasattr(self.build_qcloud_info, 'to_alipay_dict'):
params['build_qcloud_info'] = self.build_qcloud_info.to_alipay_dict()
else:
params['build_qcloud_info'] = self.build_qcloud_info
if self.build_signed_pkg_url:
if hasattr(self.build_signed_pkg_url, 'to_alipay_dict'):
params['build_signed_pkg_url'] = self.build_signed_pkg_url.to_alipay_dict()
else:
params['build_signed_pkg_url'] = self.build_signed_pkg_url
if self.build_source_pkg_size:
if hasattr(self.build_source_pkg_size, 'to_alipay_dict'):
params['build_source_pkg_size'] = self.build_source_pkg_size.to_alipay_dict()
else:
params['build_source_pkg_size'] = self.build_source_pkg_size
if self.build_source_pkg_url:
if hasattr(self.build_source_pkg_url, 'to_alipay_dict'):
params['build_source_pkg_url'] = self.build_source_pkg_url.to_alipay_dict()
else:
params['build_source_pkg_url'] = self.build_source_pkg_url
if self.build_sub_url:
if hasattr(self.build_sub_url, 'to_alipay_dict'):
params['build_sub_url'] = self.build_sub_url.to_alipay_dict()
else:
params['build_sub_url'] = self.build_sub_url
if self.build_version:
if hasattr(self.build_version, 'to_alipay_dict'):
params['build_version'] = self.build_version.to_alipay_dict()
else:
params['build_version'] = self.build_version
if self.builded_package_size:
if hasattr(self.builded_package_size, 'to_alipay_dict'):
params['builded_package_size'] = self.builded_package_size.to_alipay_dict()
else:
params['builded_package_size'] = self.builded_package_size
if self.builded_package_url:
if hasattr(self.builded_package_url, 'to_alipay_dict'):
params['builded_package_url'] = self.builded_package_url.to_alipay_dict()
else:
params['builded_package_url'] = self.builded_package_url
if self.bundle_id:
if hasattr(self.bundle_id, 'to_alipay_dict'):
params['bundle_id'] = self.bundle_id.to_alipay_dict()
else:
params['bundle_id'] = self.bundle_id
if self.client_type:
if hasattr(self.client_type, 'to_alipay_dict'):
params['client_type'] = self.client_type.to_alipay_dict()
else:
params['client_type'] = self.client_type
if self.inst_code:
if hasattr(self.inst_code, 'to_alipay_dict'):
params['inst_code'] = self.inst_code.to_alipay_dict()
else:
params['inst_code'] = self.inst_code
if self.mini_app_id:
if hasattr(self.mini_app_id, 'to_alipay_dict'):
params['mini_app_id'] = self.mini_app_id.to_alipay_dict()
else:
params['mini_app_id'] = self.mini_app_id
if self.plugin_refs:
if isinstance(self.plugin_refs, list):
for i in range(0, len(self.plugin_refs)):
element = self.plugin_refs[i]
if hasattr(element, 'to_alipay_dict'):
self.plugin_refs[i] = element.to_alipay_dict()
if hasattr(self.plugin_refs, 'to_alipay_dict'):
params['plugin_refs'] = self.plugin_refs.to_alipay_dict()
else:
params['plugin_refs'] = self.plugin_refs
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayOpenMiniInnerversionUploadModel()
if 'app_origin' in d:
o.app_origin = d['app_origin']
if 'build_app_type' in d:
o.build_app_type = d['build_app_type']
if 'build_extra_info' in d:
o.build_extra_info = d['build_extra_info']
if 'build_extra_mini_project_config' in d:
o.build_extra_mini_project_config = d['build_extra_mini_project_config']
if 'build_js_permission' in d:
o.build_js_permission = d['build_js_permission']
if 'build_main_url' in d:
o.build_main_url = d['build_main_url']
if 'build_max_android_client_version' in d:
o.build_max_android_client_version = d['build_max_android_client_version']
if 'build_max_ios_client_version' in d:
o.build_max_ios_client_version = d['build_max_ios_client_version']
if 'build_min_android_client_version' in d:
o.build_min_android_client_version = d['build_min_android_client_version']
if 'build_min_ios_client_version' in d:
o.build_min_ios_client_version = d['build_min_ios_client_version']
if 'build_package_md_5' in d:
o.build_package_md_5 = d['build_package_md_5']
if 'build_package_name' in d:
o.build_package_name = d['build_package_name']
if 'build_package_stream' in d:
o.build_package_stream = d['build_package_stream']
if 'build_qcloud_info' in d:
o.build_qcloud_info = d['build_qcloud_info']
if 'build_signed_pkg_url' in d:
o.build_signed_pkg_url = d['build_signed_pkg_url']
if 'build_source_pkg_size' in d:
o.build_source_pkg_size = d['build_source_pkg_size']
if 'build_source_pkg_url' in d:
o.build_source_pkg_url = d['build_source_pkg_url']
if 'build_sub_url' in d:
o.build_sub_url = d['build_sub_url']
if 'build_version' in d:
o.build_version = d['build_version']
if 'builded_package_size' in d:
o.builded_package_size = d['builded_package_size']
if 'builded_package_url' in d:
o.builded_package_url = d['builded_package_url']
if 'bundle_id' in d:
o.bundle_id = d['bundle_id']
if 'client_type' in d:
o.client_type = d['client_type']
if 'inst_code' in d:
o.inst_code = d['inst_code']
if 'mini_app_id' in d:
o.mini_app_id = d['mini_app_id']
if 'plugin_refs' in d:
o.plugin_refs = d['plugin_refs']
return o
|
corehq/apps/smsforms/app.py | akashkj/commcare-hq | 471 | 11132096 | <filename>corehq/apps/smsforms/app.py
import re
from xml.etree.cElementTree import XML, tostring
from dimagi.utils.parsing import json_format_datetime
from corehq.apps.app_manager.util import get_cloudcare_session_data
from corehq.apps.cloudcare.touchforms_api import CaseSessionDataHelper
from corehq.apps.formplayer_api.smsforms import sms as tfsms
from corehq.apps.formplayer_api.smsforms.api import (
FormplayerInterface,
InvalidSessionIdException,
TouchformsError,
XFormsConfig,
)
from corehq.apps.receiverwrapper.util import submit_form_locally
from corehq.form_processor.utils import is_commcarecase
from corehq.messaging.scheduling.util import utcnow
from .models import SQLXFormsSession
COMMCONNECT_DEVICE_ID = "commconnect"
def start_session(session, domain, contact, app, form, case_id=None, yield_responses=False):
"""
Starts a session in touchforms and saves the record in the database.
Returns a tuple containing the session object and the (text-only)
list of generated questions/responses based on the form.
Special params:
yield_responses - If True, the list of xforms responses is returned, otherwise the text prompt for each is returned
"""
# NOTE: this call assumes that "contact" will expose three
# properties: .raw_username, .get_id, and .get_language_code
session_data = CaseSessionDataHelper(domain, contact, case_id, app, form).get_session_data(
COMMCONNECT_DEVICE_ID)
kwargs = {}
if is_commcarecase(contact):
kwargs['restore_as_case_id'] = contact.case_id
else:
kwargs['restore_as'] = contact.raw_username
if app and form:
session_data.update(get_cloudcare_session_data(domain, form, contact))
language = contact.get_language_code()
config = XFormsConfig(form_content=form.render_xform().decode('utf-8'),
language=language,
session_data=session_data,
domain=domain,
**kwargs)
session_start_info = tfsms.start_session(config)
session.session_id = session_start_info.session_id
session.save()
responses = session_start_info.first_responses
if len(responses) > 0 and responses[0].status == 'http-error':
session.mark_completed(False)
session.save()
raise TouchformsError('Cannot connect to touchforms.')
# Prevent future update conflicts by getting the session again from the db
# since the session could have been updated separately in the first_responses call
session = SQLXFormsSession.objects.get(pk=session.pk)
if yield_responses:
return (session, responses)
else:
return (session, _responses_to_text(responses))
def get_responses(domain, session_id, text):
"""
Try to process this message like a session-based submission against
an xform.
Returns a list of responses if there are any.
"""
return list(tfsms.next_responses(session_id, text, domain))
def _responses_to_text(responses):
return [r.text_prompt for r in responses if r.text_prompt]
def get_events_from_responses(responses):
return [r.event for r in responses if r.event]
def submit_unfinished_form(session):
"""
Gets the raw instance of the session's form and submits it. This is used with
sms and ivr surveys to save all questions answered so far in a session that
needs to close.
If session.include_case_updates_in_partial_submissions is False, no case
create / update / close actions will be performed, but the form will still be submitted.
The form is only submitted if the smsforms session has not yet completed.
"""
# Get and clean the raw xml
try:
response = FormplayerInterface(session.session_id, session.domain).get_raw_instance()
# Formplayer's ExceptionResponseBean includes the exception message,
# stautus ("error"), url, and type ("text")
if response.get('status') == 'error':
raise TouchformsError(response.get('exception'))
xml = response['output']
except InvalidSessionIdException:
return
root = XML(xml)
case_tag_regex = re.compile(r"^(\{.*\}){0,1}case$") # Use regex in order to search regardless of namespace
meta_tag_regex = re.compile(r"^(\{.*\}){0,1}meta$")
timeEnd_tag_regex = re.compile(r"^(\{.*\}){0,1}timeEnd$")
current_timstamp = json_format_datetime(utcnow())
for child in root:
if case_tag_regex.match(child.tag) is not None:
# Found the case tag
case_element = child
case_element.set("date_modified", current_timstamp)
if not session.include_case_updates_in_partial_submissions:
# Remove case actions (create, update, close)
child_elements = [case_action for case_action in case_element]
for case_action in child_elements:
case_element.remove(case_action)
elif meta_tag_regex.match(child.tag) is not None:
# Found the meta tag, now set the value for timeEnd
for meta_child in child:
if timeEnd_tag_regex.match(meta_child.tag):
meta_child.text = current_timstamp
cleaned_xml = tostring(root)
# Submit the xml
result = submit_form_locally(cleaned_xml, session.domain, app_id=session.app_id, partial_submission=True)
session.submission_id = result.xform.form_id
|
tests/engine/test_version_parsing.py | 0scarB/piccolo | 750 | 11132104 | from unittest import TestCase
from piccolo.engine.postgres import PostgresEngine
from ..base import postgres_only
@postgres_only
class TestVersionParsing(TestCase):
def test_version_parsing(self):
"""
Make sure the version number can correctly be parsed from a range
of known formats.
"""
self.assertEqual(
PostgresEngine._parse_raw_version_string(version_string="9.4"), 9.4
)
self.assertEqual(
PostgresEngine._parse_raw_version_string(version_string="9.4.1"),
9.4,
)
self.assertEqual(
PostgresEngine._parse_raw_version_string(
version_string="12.4 (Ubuntu 12.4-0ubuntu0.20.04.1)"
),
12.4,
)
|
open_spiel/python/bots/human.py | texasmichelle/open_spiel | 3,167 | 11132108 | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A bot that asks the user which action to play."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import pyspiel
_MAX_WIDTH = int(os.getenv("COLUMNS", 80)) # Get your TTY width.
def _print_columns(strings):
"""Prints a list of strings in columns."""
padding = 2
longest = max(len(s) for s in strings)
max_columns = math.floor((_MAX_WIDTH - 1) / (longest + 2 * padding))
rows = math.ceil(len(strings) / max_columns)
columns = math.ceil(len(strings) / rows) # Might not fill all max_columns.
for r in range(rows):
for c in range(columns):
i = r + c * rows
if i < len(strings):
print(" " * padding + strings[i].ljust(longest + padding), end="")
print()
class HumanBot(pyspiel.Bot):
"""Asks the user which action to play."""
def step_with_policy(self, state):
"""Returns the stochastic policy and selected action in the given state."""
legal_actions = state.legal_actions(state.current_player())
if not legal_actions:
return [], pyspiel.INVALID_ACTION
p = 1 / len(legal_actions)
policy = [(action, p) for action in legal_actions]
action_map = {
state.action_to_string(state.current_player(), action): action
for action in legal_actions
}
while True:
action_str = input("Choose an action (empty to print legal actions): ")
if not action_str:
print("Legal actions(s):")
longest_num = max(len(str(action)) for action in legal_actions)
_print_columns([
"{}: {}".format(str(action).rjust(longest_num), action_str)
for action_str, action in sorted(action_map.items())
])
continue
if action_str in action_map:
return policy, action_map[action_str]
try:
action = int(action_str)
except ValueError:
print("Could not parse the action:", action_str)
continue
if action in legal_actions:
return policy, action
print("Illegal action selected:", action_str)
def step(self, state):
return self.step_with_policy(state)[1]
def restart_at(self, state):
pass
|
tests/pylon_tests/gigE/pylongigetestcase.py | matt-phair/pypylon | 358 | 11132115 | import unittest
from pypylon import pylon
def get_class_and_filter():
device_class = "BaslerGigE"
di = pylon.DeviceInfo()
di.SetDeviceClass(device_class)
return device_class, [di]
class PylonTestCase(unittest.TestCase):
device_class, device_filter = get_class_and_filter()
def create_first(self):
tlf = pylon.TlFactory.GetInstance()
return pylon.InstantCamera(tlf.CreateFirstDevice(self.device_filter[0]))
|
setup.py | alek5k/pytransform3d | 304 | 11132118 | <reponame>alek5k/pytransform3d
#!/usr/bin/env python
from setuptools import setup, find_packages
import pytransform3d
if __name__ == "__main__":
with open("README.md", "r") as f:
long_description = f.read()
setup(name="pytransform3d",
version=pytransform3d.__version__,
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/rock-learning/pytransform3d',
description='3D transformations for Python',
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Visualization",
],
license='BSD-3-Clause',
packages=find_packages(),
install_requires=["numpy", "scipy", "matplotlib", "lxml",
"beautifulsoup4"],
extras_require={
"all": ["pydot", "trimesh", "open3d"],
"doc": ["numpydoc", "sphinx", "sphinx-gallery", "sphinx-bootstrap-theme"],
"test": ["nose", "coverage"]
}
)
|
test/integration/test_tone_analyzer_v3.py | johann-petrak/python-sdk | 1,579 | 11132125 | <filename>test/integration/test_tone_analyzer_v3.py
# coding: utf-8
from unittest import TestCase
import os
import ibm_watson
import pytest
import json
import time
from os.path import join
from ibm_watson.tone_analyzer_v3 import ToneInput
@pytest.mark.skipif(os.getenv('TONE_ANALYZER_APIKEY') is None,
reason='requires PTONE_ANALYZER_APIKEY')
class TestToneAnalyzerV3(TestCase):
def setUp(self):
self.tone_analyzer = ibm_watson.ToneAnalyzerV3(version='2017-09-21')
self.tone_analyzer.set_default_headers({
'X-Watson-Learning-Opt-Out': '1',
'X-Watson-Test': '1'
})
def test_tone_chat(self):
utterances = [{
'text': 'I am very happy.',
'user': 'glenn'
}, {
'text': 'It is a good day.',
'user': 'glenn'
}]
tone_chat = self.tone_analyzer.tone_chat(utterances).get_result()
assert tone_chat is not None
def test_tone1(self):
tone = self.tone_analyzer.tone(tone_input='I am very happy. It is a good day.', content_type="text/plain").get_result()
assert tone is not None
def test_tone2(self):
with open(join(os.getcwd(), 'resources/tone-example.json')) as tone_json:
tone = self.tone_analyzer.tone(json.load(tone_json)['text'], content_type="text/plain").get_result()
assert tone is not None
def test_tone3(self):
with open(join(os.getcwd(), 'resources/tone-example.json')) as tone_json:
tone = self.tone_analyzer.tone(tone_input=json.load(tone_json)['text'], content_type='text/plain', sentences=True).get_result()
assert tone is not None
def test_tone4(self):
with open(join(os.getcwd(), 'resources/tone-example.json')) as tone_json:
tone = self.tone_analyzer.tone(tone_input=json.load(tone_json), content_type='application/json').get_result()
assert tone is not None
def test_tone5(self):
with open(join(os.getcwd(), 'resources/tone-example-html.json')) as tone_html:
tone = self.tone_analyzer.tone(json.load(tone_html)['text'],content_type='text/html').get_result()
assert tone is not None
def test_tone6(self):
tone_input = ToneInput('I am very happy. It is a good day.')
tone = self.tone_analyzer.tone(tone_input=tone_input, content_type="application/json").get_result()
assert tone is not None |
openfold/model/model.py | aqlaboratory/openfold | 789 | 11132164 | # Copyright 2021 AlQuraishi Laboratory
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import weakref
import torch
import torch.nn as nn
from openfold.model.embedders import (
InputEmbedder,
RecyclingEmbedder,
TemplateAngleEmbedder,
TemplatePairEmbedder,
ExtraMSAEmbedder,
)
from openfold.model.evoformer import EvoformerStack, ExtraMSAStack
from openfold.model.heads import AuxiliaryHeads
from openfold.model.structure_module import StructureModule
from openfold.model.template import (
TemplatePairStack,
TemplatePointwiseAttention,
embed_templates_average,
embed_templates_offload,
)
import openfold.np.residue_constants as residue_constants
from openfold.utils.feats import (
pseudo_beta_fn,
build_extra_msa_feat,
build_template_angle_feat,
build_template_pair_feat,
atom14_to_atom37,
)
from openfold.utils.loss import (
compute_plddt,
)
from openfold.utils.tensor_utils import (
add,
dict_multimap,
tensor_tree_map,
)
class AlphaFold(nn.Module):
"""
Alphafold 2.
Implements Algorithm 2 (but with training).
"""
def __init__(self, config):
"""
Args:
config:
A dict-like config object (like the one in config.py)
"""
super(AlphaFold, self).__init__()
self.globals = config.globals
self.config = config.model
self.template_config = self.config.template
self.extra_msa_config = self.config.extra_msa
# Main trunk + structure module
self.input_embedder = InputEmbedder(
**self.config["input_embedder"],
)
self.recycling_embedder = RecyclingEmbedder(
**self.config["recycling_embedder"],
)
self.template_angle_embedder = TemplateAngleEmbedder(
**self.template_config["template_angle_embedder"],
)
self.template_pair_embedder = TemplatePairEmbedder(
**self.template_config["template_pair_embedder"],
)
self.template_pair_stack = TemplatePairStack(
**self.template_config["template_pair_stack"],
)
self.template_pointwise_att = TemplatePointwiseAttention(
**self.template_config["template_pointwise_attention"],
)
self.extra_msa_embedder = ExtraMSAEmbedder(
**self.extra_msa_config["extra_msa_embedder"],
)
self.extra_msa_stack = ExtraMSAStack(
**self.extra_msa_config["extra_msa_stack"],
)
self.evoformer = EvoformerStack(
**self.config["evoformer_stack"],
)
self.structure_module = StructureModule(
**self.config["structure_module"],
)
self.aux_heads = AuxiliaryHeads(
self.config["heads"],
)
def embed_templates(self, batch, z, pair_mask, templ_dim):
if(self.template_config.offload_templates):
return embed_templates_offload(
self, batch, z, pair_mask, templ_dim,
)
elif(self.template_config.average_templates):
return embed_templates_average(
self, batch, z, pair_mask, templ_dim
)
inplace_safe = not (self.training or torch.is_grad_enabled())
# Embed the templates one at a time (with a poor man's vmap)
pair_embeds = []
n = z.shape[-2]
n_templ = batch["template_aatype"].shape[templ_dim]
if(inplace_safe):
# We'll preallocate the full pair tensor now to avoid manifesting
# a second copy during the stack later on
t_pair = z.new_zeros(
z.shape[:-3] +
(n_templ, n, n, self.globals.c_t)
)
for i in range(n_templ):
idx = batch["template_aatype"].new_tensor(i)
single_template_feats = tensor_tree_map(
lambda t: torch.index_select(t, templ_dim, idx),
batch,
)
# [*, N, N, C_t]
t = build_template_pair_feat(
single_template_feats,
use_unit_vector=self.config.template.use_unit_vector,
inf=self.config.template.inf,
eps=self.config.template.eps,
**self.config.template.distogram,
).to(z.dtype)
t = self.template_pair_embedder(t)
if(inplace_safe):
t_pair[..., i, :, :, :] = t
else:
pair_embeds.append(t)
del t
if(not inplace_safe):
t_pair = torch.cat(pair_embeds, dim=templ_dim)
del pair_embeds
# [*, S_t, N, N, C_z]
t = self.template_pair_stack(
t_pair,
pair_mask.unsqueeze(-3).to(dtype=z.dtype),
chunk_size=self.globals.chunk_size,
use_lma=self.globals.use_lma,
_mask_trans=self.config._mask_trans,
)
del t_pair
# [*, N, N, C_z]
t = self.template_pointwise_att(
t,
z,
template_mask=batch["template_mask"].to(dtype=z.dtype),
use_lma=self.globals.use_lma,
)
if(inplace_safe):
t *= (torch.sum(batch["template_mask"], dim=-1) > 0)
else:
t = t * (torch.sum(batch["template_mask"], dim=-1) > 0)
ret = {}
if self.config.template.embed_angles:
template_angle_feat = build_template_angle_feat(
batch
)
# [*, S_t, N, C_m]
a = self.template_angle_embedder(template_angle_feat)
ret["template_angle_embedding"] = a
ret.update({"template_pair_embedding": t})
del t
return ret
def iteration(self, feats, prevs, _recycle=True):
# Primary output dictionary
outputs = {}
# This needs to be done manually for DeepSpeed's sake
dtype = next(self.parameters()).dtype
for k in feats:
if(feats[k].dtype == torch.float32):
feats[k] = feats[k].to(dtype=dtype)
# Grab some data about the input
batch_dims = feats["target_feat"].shape[:-2]
no_batch_dims = len(batch_dims)
n = feats["target_feat"].shape[-2]
n_seq = feats["msa_feat"].shape[-3]
device = feats["target_feat"].device
inplace_safe = not (self.training or torch.is_grad_enabled())
# Prep some features
seq_mask = feats["seq_mask"]
pair_mask = seq_mask[..., None] * seq_mask[..., None, :]
msa_mask = feats["msa_mask"]
## Initialize the MSA and pair representations
# m: [*, S_c, N, C_m]
# z: [*, N, N, C_z]
m, z = self.input_embedder(
feats["target_feat"],
feats["residue_index"],
feats["msa_feat"],
)
# Unpack the recycling embeddings. Removing them from the list allows
# them to be freed further down in this function.
m_1_prev, z_prev, x_prev = reversed([prevs.pop() for _ in range(3)])
# Initialize the recycling embeddings, if needs be
if None in [m_1_prev, z_prev, x_prev]:
# [*, N, C_m]
m_1_prev = m.new_zeros(
(*batch_dims, n, self.config.input_embedder.c_m),
requires_grad=False,
)
# [*, N, N, C_z]
z_prev = z.new_zeros(
(*batch_dims, n, n, self.config.input_embedder.c_z),
requires_grad=False,
)
# [*, N, 3]
x_prev = z.new_zeros(
(*batch_dims, n, residue_constants.atom_type_num, 3),
requires_grad=False,
)
x_prev = pseudo_beta_fn(
feats["aatype"], x_prev, None
).to(dtype=z.dtype)
# m_1_prev_emb: [*, N, C_m]
# z_prev_emb: [*, N, N, C_z]
m_1_prev_emb, z_prev_emb = self.recycling_embedder(
m_1_prev,
z_prev,
x_prev,
_inplace=not (self.training or torch.is_grad_enabled()),
)
# [*, S_c, N, C_m]
m[..., 0, :, :] += m_1_prev_emb
# [*, N, N, C_z]
z += z_prev_emb
# This matters during inference with large N
del m_1_prev, z_prev, x_prev, m_1_prev_emb, z_prev_emb
# Embed the templates + merge with MSA/pair embeddings
if self.config.template.enabled:
template_feats = {
k: v for k, v in feats.items() if k.startswith("template_")
}
template_embeds = self.embed_templates(
template_feats,
z,
pair_mask.to(dtype=z.dtype),
no_batch_dims,
)
# [*, N, N, C_z]
z = add(z,
template_embeds.pop("template_pair_embedding"),
inplace_safe,
)
if self.config.template.embed_angles:
# [*, S = S_c + S_t, N, C_m]
m = torch.cat(
[m, template_embeds["template_angle_embedding"]],
dim=-3
)
# [*, S, N]
torsion_angles_mask = feats["template_torsion_angles_mask"]
msa_mask = torch.cat(
[feats["msa_mask"], torsion_angles_mask[..., 2]],
dim=-2
)
# Embed extra MSA features + merge with pairwise embeddings
if self.config.extra_msa.enabled:
# [*, S_e, N, C_e]
a = self.extra_msa_embedder(build_extra_msa_feat(feats))
# [*, N, N, C_z]
z = self.extra_msa_stack(
a,
z,
msa_mask=feats["extra_msa_mask"].to(dtype=a.dtype),
chunk_size=self.globals.chunk_size,
use_lma=self.globals.use_lma,
pair_mask=pair_mask.to(dtype=z.dtype),
_mask_trans=self.config._mask_trans,
)
del a
# Run MSA + pair embeddings through the trunk of the network
# m: [*, S, N, C_m]
# z: [*, N, N, C_z]
# s: [*, N, C_s]
m, z, s = self.evoformer(
m,
z,
msa_mask=msa_mask.to(dtype=m.dtype),
pair_mask=pair_mask.to(dtype=z.dtype),
chunk_size=self.globals.chunk_size,
use_lma=self.globals.use_lma,
_mask_trans=self.config._mask_trans,
)
outputs["msa"] = m[..., :n_seq, :, :]
outputs["pair"] = z
outputs["single"] = s
# Predict 3D structure
outputs["sm"] = self.structure_module(
s,
z,
feats["aatype"],
mask=feats["seq_mask"].to(dtype=s.dtype),
)
outputs["final_atom_positions"] = atom14_to_atom37(
outputs["sm"]["positions"][-1], feats
)
outputs["final_atom_mask"] = feats["atom37_atom_exists"]
outputs["final_affine_tensor"] = outputs["sm"]["frames"][-1]
# Save embeddings for use during the next recycling iteration
# [*, N, C_m]
m_1_prev = m[..., 0, :, :]
# [*, N, N, C_z]
z_prev = z
# [*, N, 3]
x_prev = outputs["final_atom_positions"]
return outputs, m_1_prev, z_prev, x_prev
def forward(self, batch):
"""
Args:
batch:
Dictionary of arguments outlined in Algorithm 2. Keys must
include the official names of the features in the
supplement subsection 1.2.9.
The final dimension of each input must have length equal to
the number of recycling iterations.
Features (without the recycling dimension):
"aatype" ([*, N_res]):
Contrary to the supplement, this tensor of residue
indices is not one-hot.
"target_feat" ([*, N_res, C_tf])
One-hot encoding of the target sequence. C_tf is
config.model.input_embedder.tf_dim.
"residue_index" ([*, N_res])
Tensor whose final dimension consists of
consecutive indices from 0 to N_res.
"msa_feat" ([*, N_seq, N_res, C_msa])
MSA features, constructed as in the supplement.
C_msa is config.model.input_embedder.msa_dim.
"seq_mask" ([*, N_res])
1-D sequence mask
"msa_mask" ([*, N_seq, N_res])
MSA mask
"pair_mask" ([*, N_res, N_res])
2-D pair mask
"extra_msa_mask" ([*, N_extra, N_res])
Extra MSA mask
"template_mask" ([*, N_templ])
Template mask (on the level of templates, not
residues)
"template_aatype" ([*, N_templ, N_res])
Tensor of template residue indices (indices greater
than 19 are clamped to 20 (Unknown))
"template_all_atom_positions"
([*, N_templ, N_res, 37, 3])
Template atom coordinates in atom37 format
"template_all_atom_mask" ([*, N_templ, N_res, 37])
Template atom coordinate mask
"template_pseudo_beta" ([*, N_templ, N_res, 3])
Positions of template carbon "pseudo-beta" atoms
(i.e. C_beta for all residues but glycine, for
for which C_alpha is used instead)
"template_pseudo_beta_mask" ([*, N_templ, N_res])
Pseudo-beta mask
"""
# Initialize recycling embeddings
m_1_prev, z_prev, x_prev = None, None, None
prevs = [m_1_prev, z_prev, x_prev]
is_grad_enabled = torch.is_grad_enabled()
# Main recycling loop
num_iters = batch["aatype"].shape[-1]
for cycle_no in range(num_iters):
# Select the features for the current recycling cycle
fetch_cur_batch = lambda t: t[..., cycle_no]
feats = tensor_tree_map(fetch_cur_batch, batch)
# Enable grad iff we're training and it's the final recycling layer
is_final_iter = cycle_no == (num_iters - 1)
with torch.set_grad_enabled(is_grad_enabled and is_final_iter):
if is_final_iter:
# Sidestep AMP bug (PyTorch issue #65766)
if torch.is_autocast_enabled():
torch.clear_autocast_cache()
# Run the next iteration of the model
outputs, m_1_prev, z_prev, x_prev = self.iteration(
feats,
prevs,
_recycle=(num_iters > 1)
)
if(not is_final_iter):
del outputs
prevs = [m_1_prev, z_prev, x_prev]
del m_1_prev, z_prev, x_prev
# Run auxiliary heads
outputs.update(self.aux_heads(outputs))
return outputs
|
Trakttv.bundle/Contents/Libraries/Shared/plugin/core/logger/filters/duplicate.py | disrupted/Trakttv.bundle | 1,346 | 11132176 | <reponame>disrupted/Trakttv.bundle<gh_stars>1000+
from logging import Filter
import logging
log = logging.getLogger(__name__)
class DuplicateReportFilter(Filter):
def filter(self, record):
if self.is_duplicate_message(record):
return False
return True
@classmethod
def is_duplicate_message(cls, record):
if record.levelno < logging.WARNING:
return False
if not record:
return False
# Try retrieve "duplicate" attribute from record
duplicate = getattr(record, 'duplicate', None)
# Convert to boolean
return bool(duplicate)
|
gamestonk_terminal/stocks/fundamental_analysis/yahoo_finance_view.py | elan17/GamestonkTerminal | 1,835 | 11132203 | """ Yahoo Finance View """
__docformat__ = "numpy"
import os
import webbrowser
import matplotlib.pyplot as plt
import pandas as pd
from tabulate import tabulate
from gamestonk_terminal.stocks.fundamental_analysis import yahoo_finance_model
from gamestonk_terminal import feature_flags as gtff
from gamestonk_terminal.helper_funcs import export_data, plot_autoscale
from gamestonk_terminal.config_plot import PLOT_DPI
from gamestonk_terminal.rich_config import console
def open_headquarters_map(ticker: str):
"""Headquarters location of the company
Parameters
----------
ticker : str
Fundamental analysis ticker symbol
"""
webbrowser.open(yahoo_finance_model.get_hq(ticker))
console.print("")
def open_web(ticker: str):
"""Website of the company
Parameters
----------
ticker : str
Fundamental analysis ticker symbol
"""
webbrowser.open(yahoo_finance_model.get_website(ticker))
console.print("")
def display_info(ticker: str):
"""Yahoo Finance ticker info
Parameters
----------
ticker : str
Fundamental analysis ticker symbol
"""
summary = ""
df_info = yahoo_finance_model.get_info(ticker)
if "Long business summary" in df_info.index:
summary = df_info.loc["Long business summary"].values[0]
df_info = df_info.drop(index=["Long business summary"])
if gtff.USE_TABULATE_DF:
print(tabulate(df_info, headers=[], showindex=True, tablefmt="fancy_grid"))
else:
console.print(df_info.to_string(header=False))
if summary:
console.print("Business Summary:")
console.print(summary)
console.print("")
def display_shareholders(ticker: str):
"""Yahoo Finance ticker shareholders
Parameters
----------
ticker : str
Fundamental analysis ticker symbol
"""
(
df_major_holders,
df_institutional_shareholders,
df_mutualfund_shareholders,
) = yahoo_finance_model.get_shareholders(ticker)
dfs = [df_major_holders, df_institutional_shareholders, df_mutualfund_shareholders]
titles = ["Major Holders:\n", "Institutuinal Holders:\n", "Mutual Fund Holders:\n"]
console.print("")
for df, title in zip(dfs, titles):
console.print(title)
if gtff.USE_TABULATE_DF:
print(
tabulate(df, headers=df.columns, tablefmt="fancy_grid", showindex=False)
)
else:
console.print(df.to_string(index=False))
console.print("")
def display_sustainability(ticker: str):
"""Yahoo Finance ticker sustainability
Parameters
----------
other_args : List[str]
argparse other args
ticker : str
Fundamental analysis ticker symbol
"""
df_sustainability = yahoo_finance_model.get_sustainability(ticker)
if df_sustainability.empty:
console.print("No sustainability data found.", "\n")
return
if gtff.USE_TABULATE_DF:
print(
tabulate(
df_sustainability,
headers=[],
tablefmt="fancy_grid",
showindex=True,
)
)
else:
console.print(df_sustainability.to_string(index=True))
console.print("")
def display_calendar_earnings(ticker: str):
"""Yahoo Finance ticker calendar earnings
Parameters
----------
ticker : str
Fundamental analysis ticker symbol
"""
df_calendar = yahoo_finance_model.get_calendar_earnings(ticker).T
if df_calendar.empty:
console.print("No calendar events found.\n")
return
if gtff.USE_TABULATE_DF:
print(
tabulate(
df_calendar,
showindex=False,
headers=df_calendar.columns,
tablefmt="fancy_grid",
)
)
else:
console.print(df_calendar.to_string(index=False))
console.print("")
def display_dividends(
ticker: str, limit: int = 12, plot: bool = False, export: str = ""
):
"""Display historical dividends
Parameters
----------
ticker: str
Stock ticker
limit: int
Number to show
plot: bool
Plots hitsorical data
export: str
Format to export data
"""
div_history = yahoo_finance_model.get_dividends(ticker)
if div_history.empty:
console.print("No dividends found.\n")
return
div_history["Dif"] = div_history.diff()
div_history = div_history[::-1]
if plot:
fig, ax = plt.subplots(
figsize=plot_autoscale(), constrained_layout=False, dpi=PLOT_DPI
)
ax.plot(
div_history.index,
div_history["Dividends"],
ls="-",
linewidth=0.75,
marker=".",
markersize=4,
mfc="k",
mec="k",
c="k",
alpha=1,
)
ax.set_xlabel("Date")
ax.set_ylabel("Amount ($)")
ax.set_title(f"Dividend History for {ticker}")
ax.set_xlim(div_history.index[-1], div_history.index[0])
if gtff.USE_ION:
plt.ion()
fig.tight_layout()
plt.show()
else:
div_history.index = pd.to_datetime(div_history.index, format="%Y%m%d").strftime(
"%Y-%m-%d"
)
if gtff.USE_TABULATE_DF:
print(
tabulate(
div_history.head(limit),
tablefmt="fancy_grid",
headers=["Amount Paid ($)", "Change"],
floatfmt=".2f",
)
)
else:
console.print(div_history.to_string())
console.print("")
export_data(export, os.path.dirname(os.path.abspath(__file__)), "divs", div_history)
|
tests/integration/s3/mock_storage_service.py | mariocesar/boto | 2,906 | 11132206 | # Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Provides basic mocks of core storage service classes, for unit testing:
ACL, Key, Bucket, Connection, and StorageUri. We implement a subset of
the interfaces defined in the real boto classes, but don't handle most
of the optional params (which we indicate with the constant "NOT_IMPL").
"""
import copy
import boto
import base64
import re
from hashlib import md5
from boto.utils import compute_md5
from boto.utils import find_matching_headers
from boto.utils import merge_headers_by_name
from boto.s3.prefix import Prefix
from boto.compat import six
NOT_IMPL = None
class MockAcl(object):
def __init__(self, parent=NOT_IMPL):
pass
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
pass
def to_xml(self):
return '<mock_ACL_XML/>'
class MockKey(object):
def __init__(self, bucket=None, name=None):
self.bucket = bucket
self.name = name
self.data = None
self.etag = None
self.size = None
self.closed = True
self.content_encoding = None
self.content_language = None
self.content_type = None
self.last_modified = 'Wed, 06 Oct 2010 05:11:54 GMT'
self.BufferSize = 8192
def __repr__(self):
if self.bucket:
return '<MockKey: %s,%s>' % (self.bucket.name, self.name)
else:
return '<MockKey: %s>' % self.name
def get_contents_as_string(self, headers=NOT_IMPL,
cb=NOT_IMPL, num_cb=NOT_IMPL,
torrent=NOT_IMPL,
version_id=NOT_IMPL):
return self.data
def get_contents_to_file(self, fp, headers=NOT_IMPL,
cb=NOT_IMPL, num_cb=NOT_IMPL,
torrent=NOT_IMPL,
version_id=NOT_IMPL,
res_download_handler=NOT_IMPL):
fp.write(six.ensure_binary(self.data))
def get_file(self, fp, headers=NOT_IMPL, cb=NOT_IMPL, num_cb=NOT_IMPL,
torrent=NOT_IMPL, version_id=NOT_IMPL,
override_num_retries=NOT_IMPL):
fp.write(self.data)
def _handle_headers(self, headers):
if not headers:
return
if find_matching_headers('Content-Encoding', headers):
self.content_encoding = merge_headers_by_name('Content-Encoding',
headers)
if find_matching_headers('Content-Type', headers):
self.content_type = merge_headers_by_name('Content-Type', headers)
if find_matching_headers('Content-Language', headers):
self.content_language = merge_headers_by_name('Content-Language',
headers)
# Simplistic partial implementation for headers: Just supports range GETs
# of flavor 'Range: bytes=xyz-'.
def open_read(self, headers=None, query_args=NOT_IMPL,
override_num_retries=NOT_IMPL):
if self.closed:
self.read_pos = 0
self.closed = False
if headers and 'Range' in headers:
match = re.match('bytes=([0-9]+)-$', headers['Range'])
if match:
self.read_pos = int(match.group(1))
def close(self, fast=NOT_IMPL):
self.closed = True
def read(self, size=0):
self.open_read()
if size == 0:
data = self.data[self.read_pos:]
self.read_pos = self.size
else:
data = self.data[self.read_pos:self.read_pos+size]
self.read_pos += size
if not data:
self.close()
return data
def set_contents_from_file(self, fp, headers=None, replace=NOT_IMPL,
cb=NOT_IMPL, num_cb=NOT_IMPL,
policy=NOT_IMPL, md5=NOT_IMPL,
res_upload_handler=NOT_IMPL):
self.data = fp.read()
self.set_etag()
self.size = len(self.data)
self._handle_headers(headers)
def set_contents_from_stream(self, fp, headers=None, replace=NOT_IMPL,
cb=NOT_IMPL, num_cb=NOT_IMPL, policy=NOT_IMPL,
reduced_redundancy=NOT_IMPL, query_args=NOT_IMPL,
size=NOT_IMPL):
self.data = ''
chunk = fp.read(self.BufferSize)
while chunk:
self.data += chunk
chunk = fp.read(self.BufferSize)
self.set_etag()
self.size = len(self.data)
self._handle_headers(headers)
def set_contents_from_string(self, s, headers=NOT_IMPL, replace=NOT_IMPL,
cb=NOT_IMPL, num_cb=NOT_IMPL, policy=NOT_IMPL,
md5=NOT_IMPL, reduced_redundancy=NOT_IMPL):
self.data = copy.copy(s)
self.set_etag()
self.size = len(s)
self._handle_headers(headers)
def set_contents_from_filename(self, filename, headers=None,
replace=NOT_IMPL, cb=NOT_IMPL,
num_cb=NOT_IMPL, policy=NOT_IMPL,
md5=NOT_IMPL, res_upload_handler=NOT_IMPL):
fp = open(filename, 'rb')
self.set_contents_from_file(fp, headers, replace, cb, num_cb,
policy, md5, res_upload_handler)
fp.close()
def copy(self, dst_bucket_name, dst_key, metadata=NOT_IMPL,
reduced_redundancy=NOT_IMPL, preserve_acl=NOT_IMPL):
dst_bucket = self.bucket.connection.get_bucket(dst_bucket_name)
return dst_bucket.copy_key(dst_key, self.bucket.name,
self.name, metadata)
@property
def provider(self):
provider = None
if self.bucket and self.bucket.connection:
provider = self.bucket.connection.provider
return provider
def set_etag(self):
"""
Set etag attribute by generating hex MD5 checksum on current
contents of mock key.
"""
m = md5()
if not isinstance(self.data, bytes):
m.update(self.data.encode('utf-8'))
else:
m.update(self.data)
hex_md5 = m.hexdigest()
self.etag = hex_md5
def compute_md5(self, fp):
"""
:type fp: file
:param fp: File pointer to the file to MD5 hash. The file pointer
will be reset to the beginning of the file before the
method returns.
:rtype: tuple
:return: A tuple containing the hex digest version of the MD5 hash
as the first element and the base64 encoded version of the
plain digest as the second element.
"""
tup = compute_md5(fp)
# Returned values are MD5 hash, base64 encoded MD5 hash, and file size.
# The internal implementation of compute_md5() needs to return the
# file size but we don't want to return that value to the external
# caller because it changes the class interface (i.e. it might
# break some code) so we consume the third tuple value here and
# return the remainder of the tuple to the caller, thereby preserving
# the existing interface.
self.size = tup[2]
return tup[0:2]
class MockBucket(object):
def __init__(self, connection=None, name=None, key_class=NOT_IMPL):
self.name = name
self.keys = {}
self.acls = {name: MockAcl()}
# default object ACLs are one per bucket and not supported for keys
self.def_acl = MockAcl()
self.subresources = {}
self.connection = connection
self.logging = False
def __repr__(self):
return 'MockBucket: %s' % self.name
def copy_key(self, new_key_name, src_bucket_name,
src_key_name, metadata=NOT_IMPL, src_version_id=NOT_IMPL,
storage_class=NOT_IMPL, preserve_acl=NOT_IMPL,
encrypt_key=NOT_IMPL, headers=NOT_IMPL, query_args=NOT_IMPL):
new_key = self.new_key(key_name=new_key_name)
src_key = self.connection.get_bucket(
src_bucket_name).get_key(src_key_name)
new_key.data = copy.copy(src_key.data)
new_key.size = len(new_key.data)
return new_key
def disable_logging(self):
self.logging = False
def enable_logging(self, target_bucket_prefix):
self.logging = True
def get_logging_config(self):
return {"Logging": {}}
def get_versioning_status(self, headers=NOT_IMPL):
return False
def get_acl(self, key_name='', headers=NOT_IMPL, version_id=NOT_IMPL):
if key_name:
# Return ACL for the key.
return self.acls[key_name]
else:
# Return ACL for the bucket.
return self.acls[self.name]
def get_def_acl(self, key_name=NOT_IMPL, headers=NOT_IMPL,
version_id=NOT_IMPL):
# Return default ACL for the bucket.
return self.def_acl
def get_subresource(self, subresource, key_name=NOT_IMPL, headers=NOT_IMPL,
version_id=NOT_IMPL):
if subresource in self.subresources:
return self.subresources[subresource]
else:
return '<Subresource/>'
def new_key(self, key_name=None):
mock_key = MockKey(self, key_name)
self.keys[key_name] = mock_key
self.acls[key_name] = MockAcl()
return mock_key
def delete_key(self, key_name, headers=NOT_IMPL,
version_id=NOT_IMPL, mfa_token=NOT_IMPL):
if key_name not in self.keys:
raise boto.exception.StorageResponseError(404, 'Not Found')
del self.keys[key_name]
def get_all_keys(self, headers=NOT_IMPL):
return six.itervalues(self.keys)
def get_key(self, key_name, headers=NOT_IMPL, version_id=NOT_IMPL):
# Emulate behavior of boto when get_key called with non-existent key.
if key_name not in self.keys:
return None
return self.keys[key_name]
def list(self, prefix='', delimiter='', marker=NOT_IMPL,
headers=NOT_IMPL):
prefix = prefix or '' # Turn None into '' for prefix match.
# Return list instead of using a generator so we don't get
# 'dictionary changed size during iteration' error when performing
# deletions while iterating (e.g., during test cleanup).
result = []
key_name_set = set()
for k in six.itervalues(self.keys):
if k.name.startswith(prefix):
k_name_past_prefix = k.name[len(prefix):]
if delimiter:
pos = k_name_past_prefix.find(delimiter)
else:
pos = -1
if (pos != -1):
key_or_prefix = Prefix(
bucket=self, name=k.name[:len(prefix)+pos+1])
else:
key_or_prefix = MockKey(bucket=self, name=k.name)
if key_or_prefix.name not in key_name_set:
key_name_set.add(key_or_prefix.name)
result.append(key_or_prefix)
return result
def set_acl(self, acl_or_str, key_name='', headers=NOT_IMPL,
version_id=NOT_IMPL):
# We only handle setting ACL XML here; if you pass a canned ACL
# the get_acl call will just return that string name.
if key_name:
# Set ACL for the key.
self.acls[key_name] = MockAcl(acl_or_str)
else:
# Set ACL for the bucket.
self.acls[self.name] = MockAcl(acl_or_str)
def set_def_acl(self, acl_or_str, key_name=NOT_IMPL, headers=NOT_IMPL,
version_id=NOT_IMPL):
# We only handle setting ACL XML here; if you pass a canned ACL
# the get_acl call will just return that string name.
# Set default ACL for the bucket.
self.def_acl = acl_or_str
def set_subresource(self, subresource, value, key_name=NOT_IMPL,
headers=NOT_IMPL, version_id=NOT_IMPL):
self.subresources[subresource] = value
class MockProvider(object):
def __init__(self, provider):
self.provider = provider
def get_provider_name(self):
return self.provider
class MockConnection(object):
def __init__(self, aws_access_key_id=NOT_IMPL,
aws_secret_access_key=NOT_IMPL, is_secure=NOT_IMPL,
port=NOT_IMPL, proxy=NOT_IMPL, proxy_port=NOT_IMPL,
proxy_user=NOT_IMPL, proxy_pass=NOT_IMPL,
host=NOT_IMPL, debug=NOT_IMPL,
https_connection_factory=NOT_IMPL,
calling_format=NOT_IMPL,
path=NOT_IMPL, provider='s3',
bucket_class=NOT_IMPL):
self.buckets = {}
self.provider = MockProvider(provider)
def create_bucket(self, bucket_name, headers=NOT_IMPL, location=NOT_IMPL,
policy=NOT_IMPL, storage_class=NOT_IMPL):
if bucket_name in self.buckets:
raise boto.exception.StorageCreateError(
409, 'BucketAlreadyOwnedByYou',
"<Message>Your previous request to create the named bucket "
"succeeded and you already own it.</Message>")
mock_bucket = MockBucket(name=bucket_name, connection=self)
self.buckets[bucket_name] = mock_bucket
return mock_bucket
def delete_bucket(self, bucket, headers=NOT_IMPL):
if bucket not in self.buckets:
raise boto.exception.StorageResponseError(
404, 'NoSuchBucket', '<Message>no such bucket</Message>')
del self.buckets[bucket]
def get_bucket(self, bucket_name, validate=NOT_IMPL, headers=NOT_IMPL):
if bucket_name not in self.buckets:
raise boto.exception.StorageResponseError(404, 'NoSuchBucket',
'Not Found')
return self.buckets[bucket_name]
def get_all_buckets(self, headers=NOT_IMPL):
return six.itervalues(self.buckets)
# We only mock a single provider/connection.
mock_connection = MockConnection()
class MockBucketStorageUri(object):
delim = '/'
def __init__(self, scheme, bucket_name=None, object_name=None,
debug=NOT_IMPL, suppress_consec_slashes=NOT_IMPL,
version_id=None, generation=None, is_latest=False):
self.scheme = scheme
self.bucket_name = bucket_name
self.object_name = object_name
self.suppress_consec_slashes = suppress_consec_slashes
if self.bucket_name and self.object_name:
self.uri = ('%s://%s/%s' % (self.scheme, self.bucket_name,
self.object_name))
elif self.bucket_name:
self.uri = ('%s://%s/' % (self.scheme, self.bucket_name))
else:
self.uri = ('%s://' % self.scheme)
self.version_id = version_id
self.generation = generation and int(generation)
self.is_version_specific = (bool(self.generation)
or bool(self.version_id))
self.is_latest = is_latest
if bucket_name and object_name:
self.versionless_uri = '%s://%s/%s' % (scheme, bucket_name,
object_name)
def __repr__(self):
"""Returns string representation of URI."""
return self.uri
def acl_class(self):
return MockAcl
def canned_acls(self):
return boto.provider.Provider('aws').canned_acls
def clone_replace_name(self, new_name):
return self.__class__(self.scheme, self.bucket_name, new_name)
def clone_replace_key(self, key):
return self.__class__(
key.provider.get_provider_name(),
bucket_name=key.bucket.name,
object_name=key.name,
suppress_consec_slashes=self.suppress_consec_slashes,
version_id=getattr(key, 'version_id', None),
generation=getattr(key, 'generation', None),
is_latest=getattr(key, 'is_latest', None))
def connect(self, access_key_id=NOT_IMPL, secret_access_key=NOT_IMPL):
return mock_connection
def create_bucket(self, headers=NOT_IMPL, location=NOT_IMPL,
policy=NOT_IMPL, storage_class=NOT_IMPL):
return self.connect().create_bucket(self.bucket_name)
def delete_bucket(self, headers=NOT_IMPL):
return self.connect().delete_bucket(self.bucket_name)
def get_versioning_config(self, headers=NOT_IMPL):
self.get_bucket().get_versioning_status(headers)
def has_version(self):
return (issubclass(type(self), MockBucketStorageUri)
and ((self.version_id is not None)
or (self.generation is not None)))
def delete_key(self, validate=NOT_IMPL, headers=NOT_IMPL,
version_id=NOT_IMPL, mfa_token=NOT_IMPL):
self.get_bucket().delete_key(self.object_name)
def disable_logging(self, validate=NOT_IMPL, headers=NOT_IMPL,
version_id=NOT_IMPL):
self.get_bucket().disable_logging()
def enable_logging(self, target_bucket, target_prefix, validate=NOT_IMPL,
headers=NOT_IMPL, version_id=NOT_IMPL):
self.get_bucket().enable_logging(target_bucket)
def get_logging_config(self, validate=NOT_IMPL, headers=NOT_IMPL,
version_id=NOT_IMPL):
return self.get_bucket().get_logging_config()
def equals(self, uri):
return self.uri == uri.uri
def get_acl(self, validate=NOT_IMPL, headers=NOT_IMPL, version_id=NOT_IMPL):
return self.get_bucket().get_acl(self.object_name)
def get_def_acl(self, validate=NOT_IMPL, headers=NOT_IMPL,
version_id=NOT_IMPL):
return self.get_bucket().get_def_acl(self.object_name)
def get_subresource(self, subresource, validate=NOT_IMPL, headers=NOT_IMPL,
version_id=NOT_IMPL):
return self.get_bucket().get_subresource(subresource, self.object_name)
def get_all_buckets(self, headers=NOT_IMPL):
return self.connect().get_all_buckets()
def get_all_keys(self, validate=NOT_IMPL, headers=NOT_IMPL):
return self.get_bucket().get_all_keys(self)
def list_bucket(self, prefix='', delimiter='', headers=NOT_IMPL,
all_versions=NOT_IMPL):
return self.get_bucket().list(prefix=prefix, delimiter=delimiter)
def get_bucket(self, validate=NOT_IMPL, headers=NOT_IMPL):
return self.connect().get_bucket(self.bucket_name)
def get_key(self, validate=NOT_IMPL, headers=NOT_IMPL,
version_id=NOT_IMPL):
return self.get_bucket().get_key(self.object_name)
def is_file_uri(self):
return False
def is_cloud_uri(self):
return True
def names_container(self):
return bool(not self.object_name)
def names_singleton(self):
return bool(self.object_name)
def names_directory(self):
return False
def names_provider(self):
return bool(not self.bucket_name)
def names_bucket(self):
return self.names_container()
def names_file(self):
return False
def names_object(self):
return not self.names_container()
def is_stream(self):
return False
def new_key(self, validate=NOT_IMPL, headers=NOT_IMPL):
bucket = self.get_bucket()
return bucket.new_key(self.object_name)
def set_acl(self, acl_or_str, key_name='', validate=NOT_IMPL,
headers=NOT_IMPL, version_id=NOT_IMPL):
self.get_bucket().set_acl(acl_or_str, key_name)
def set_def_acl(self, acl_or_str, key_name=NOT_IMPL, validate=NOT_IMPL,
headers=NOT_IMPL, version_id=NOT_IMPL):
self.get_bucket().set_def_acl(acl_or_str)
def set_subresource(self, subresource, value, validate=NOT_IMPL,
headers=NOT_IMPL, version_id=NOT_IMPL):
self.get_bucket().set_subresource(subresource, value, self.object_name)
def copy_key(self, src_bucket_name, src_key_name, metadata=NOT_IMPL,
src_version_id=NOT_IMPL, storage_class=NOT_IMPL,
preserve_acl=NOT_IMPL, encrypt_key=NOT_IMPL, headers=NOT_IMPL,
query_args=NOT_IMPL, src_generation=NOT_IMPL):
dst_bucket = self.get_bucket()
return dst_bucket.copy_key(new_key_name=self.object_name,
src_bucket_name=src_bucket_name,
src_key_name=src_key_name)
def set_contents_from_string(self, s, headers=NOT_IMPL, replace=NOT_IMPL,
cb=NOT_IMPL, num_cb=NOT_IMPL, policy=NOT_IMPL,
md5=NOT_IMPL, reduced_redundancy=NOT_IMPL):
key = self.new_key()
key.set_contents_from_string(s)
def set_contents_from_file(self, fp, headers=None, replace=NOT_IMPL,
cb=NOT_IMPL, num_cb=NOT_IMPL, policy=NOT_IMPL,
md5=NOT_IMPL, size=NOT_IMPL, rewind=NOT_IMPL,
res_upload_handler=NOT_IMPL):
key = self.new_key()
return key.set_contents_from_file(fp, headers=headers)
def set_contents_from_stream(self, fp, headers=NOT_IMPL, replace=NOT_IMPL,
cb=NOT_IMPL, num_cb=NOT_IMPL, policy=NOT_IMPL,
reduced_redundancy=NOT_IMPL,
query_args=NOT_IMPL, size=NOT_IMPL):
dst_key.set_contents_from_stream(fp)
def get_contents_to_file(self, fp, headers=NOT_IMPL, cb=NOT_IMPL,
num_cb=NOT_IMPL, torrent=NOT_IMPL,
version_id=NOT_IMPL, res_download_handler=NOT_IMPL,
response_headers=NOT_IMPL):
key = self.get_key()
key.get_contents_to_file(fp)
def get_contents_to_stream(self, fp, headers=NOT_IMPL, cb=NOT_IMPL,
num_cb=NOT_IMPL, version_id=NOT_IMPL):
key = self.get_key()
return key.get_contents_to_file(fp)
|
torchelie/nn/graph.py | maxferrari/Torchelie | 117 | 11132219 | <gh_stars>100-1000
import torch.nn as nn
from typing import Union, Tuple, List
from torchelie.utils import experimental
def tup(x):
if isinstance(x, (tuple, list)):
return list(x)
return [x]
ArgNames = Union[str, List[str]]
NamedModule = Tuple[str, nn.Module]
class ModuleGraph(nn.Sequential):
"""
Allows description of networks as computation graphs. The graph is
constructed by labelling inputs and outputs of each node. Each node will be
ran in declaration order, fetching its input values from a pool of named
values populated from previous node's output values and keyword arguments
in forward.
Simple example:
>>> m = tnn.ModuleGraph(outputs='y')
>>> m.add_operation(
inputs=['x'],
operation=nn.Linear(10, 20),
name='linear',
outputs=['y'])
>>> m(x=torch.randn(1, 10))
<a bunch of numbers>
Multiple inputs example:
If a layer takes more than 1 input, labels can be a tuple or a list of
labels instead. The same applies if a module returns more than 1 output
values.
>>> m = tnn.ModuleGraph(outputs=['x1', 'y'])
>>> m.add_operation(
inputs=['x0'],
operation=nn.Linear(10, 20)
name='linear',
outputs=['x1'])
>>> m.add_operation(
inputs=['x1', 'z'],
operation=nn.AdaIN2d(20, 3)
name='adain',
outputs=['y'])
>>> m(x0=torch.randn(1, 10), z=torch.randn(1, 3))['y']
<a bunch of numbers>
"""
def __init__(self, outputs: Union[str, List[str]]) -> None:
super().__init__()
self.ins: List[List[str]] = []
self.outs: List[List[str]] = []
self.outputs = outputs
def add_operation(self, inputs: List[str], outputs: List[str], name: str,
operation: nn.Module) -> 'ModuleGraph':
self.ins.append(inputs)
self.outs.append(outputs)
self.add_module(name, operation)
return self
def forward(self, **args):
variables = dict(args)
for i_names, f, o_names in zip(self.ins, self._modules.values(),
self.outs):
ins = [variables[k] for k in i_names]
outs = tup(f(*ins))
for o, k in zip(outs, o_names):
variables[k] = o
if isinstance(self.outputs, str):
return variables[self.outputs]
return {k: variables[k] for k in self.outputs}
@experimental
def to_dot(self) -> str:
txt = ''
for i_names, f_nm, o_names in zip(self.ins, self._modules.keys(),
self.outs):
for k in i_names:
txt += f'{k} -> {f_nm};\n'
for k in o_names:
txt += f'{f_nm} -> {k};\n'
txt += f'{f_nm} [shape=square];\n'
return txt
|
benchexec/tools/goblint.py | SvenUmbricht/benchexec | 137 | 11132237 | <gh_stars>100-1000
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 <NAME> <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.tools.template
import benchexec.result as result
import re
class Tool(benchexec.tools.template.BaseTool2):
"""
Tool info for Goblint.
URL: https://goblint.in.tum.de/
"""
def executable(self, tool_locator):
return tool_locator.find_executable("goblint")
def version(self, executable):
return self._version_from_tool(executable, line_prefix="Goblint version: ")
def name(self):
return "Goblint"
_DATA_MODELS = {"ILP32": "32bit", "LP64": "64bit"}
def cmdline(self, executable, options, task, rlimits):
additional_options = []
if task.property_file:
additional_options += ["--sets", "ana.specification", task.property_file]
if task.options:
data_model = task.options.get("data_model")
if data_model:
data_model_option = self._DATA_MODELS.get(data_model)
if data_model_option:
additional_options += [
"--sets",
"exp.architecture",
data_model_option,
]
else:
raise benchexec.tools.template.UnsupportedFeatureException(
f"Unsupported data_model '{data_model}'"
)
return [
executable,
*options,
*additional_options,
*task.input_files,
]
def determine_result(self, run):
status = None
for line in run.output:
if "Fatal error" in line:
if "Assertion failed" in line:
return "ASSERTION"
else:
m = re.search(
r"Fatal error: exception (Stack overflow|Out of memory|[A-Za-z._]+)",
line,
)
if m:
return f"EXCEPTION ({m.group(1)})"
else:
return "EXCEPTION"
else:
m = re.match(r"SV-COMP result: (.*)", line)
if m:
status = m.group(1)
if status:
return status
if run.exit_code.value != 0:
return result.RESULT_ERROR
else:
return result.RESULT_UNKNOWN
|
update/src/testing.py | isaiasfsilva/ROLO | 962 | 11132247 | from utils_dataset import *
from utils_draw_coord import debug_decimal_coord
from utils_io_folder import *
from utils_io_coord import *
def get_batch_by_repeat(ndarray, batchsize):
batch_ndarray = []
for id in range(batchsize):
batch_ndarray.append(ndarray)
return batch_ndarray
def test(self, sess, loss, batch_pred_coords):
print("\n\n\n--------------------------------------------TESTING OTB-50---------------------------------------------------------\n")
num_videos = 50
loss_dataset_total = 0
OTB_folder_path = "/home/ngh/dev/ROLO-dev/benchmark/DATA/"
for video_id in range(num_videos):
if video_id in [1, 5, 16, 20, 21, 22, 23, 28, 30, 32, 36, 42, 43, 46]: continue
[img_wid, img_ht, sequence_name, st_frame, self.training_iters] = choose_video_sequence_from_OTB50(video_id)
print('testing sequence: ', sequence_name)
x_path = os.path.join(OTB_folder_path, sequence_name, 'yolo_out/')
y_path = os.path.join(OTB_folder_path, sequence_name, 'groundtruth_rect.txt')
self.output_path = os.path.join(OTB_folder_path, sequence_name, 'rolo_loc_test/')
create_folder(self.output_path)
img_folder_path = os.path.join(OTB_folder_path, sequence_name, 'img/')
img_paths = get_immediate_childfile_paths(img_folder_path)
loss_seq_total = frame_id = 0
offset_id = self.nsteps
init_state_zeros = np.zeros((self.batchsize, 2*self.len_vec))
while frame_id < self.training_iters- self.nsteps:
''' The index start from zero, while the frame usually starts from one '''
st_id = st_frame - 1
if frame_id < st_id:
frame_id += 1
continue
''' Load input data & ground truth '''
xs = load_vecs_of_stepsize_in_numpy_folder(x_path,
frame_id - st_id,
self.nsteps)
ys = load_gt_decimal_coords_from_file(y_path,
frame_id - st_id + offset_id,
img_wid,
img_ht)
batch_xs = get_batch_by_repeat(xs, self.batchsize)
batch_ys = get_batch_by_repeat(ys, self.batchsize)
batch_xs = np.reshape(batch_xs, [self.batchsize, self.nsteps, self.len_vec])
batch_ys = np.reshape(batch_ys, [self.batchsize, 4])
''' Save pred_location to file '''
#utils.save_rolo_output(self.output_path, pred_loc, id, self.nsteps, self.batchsize)
init_state = init_state_zeros
#init_state = sess.run(self.final_state,
# feed_dict={self.x: batch_xs,
# self.y: batch_ys,
# self.istate: init_state_zeros})
batch_loss = sess.run(loss,
feed_dict={self.x: batch_xs,
self.y: batch_ys,
self.istate: init_state})
loss_seq_total += batch_loss
if self.display_validate is True:
coord_decimal_gt = sess.run(self.y,
feed_dict = {self.x: batch_xs,
self.y: batch_ys,
self.istate: init_state})
coord_decimal_pred = sess.run(batch_pred_coords,
feed_dict = {self.x: batch_xs,
self.y: batch_ys,
self.istate: init_state}
)[0]
img = cv2.imread(img_paths[frame_id])
debug_decimal_coord(img, coord_decimal_pred)
frame_id += 1
loss_seq_avg = loss_seq_total / frame_id
print "Avg loss for " + sequence_name + ": " + str(loss_seq_avg)
loss_dataset_total += loss_seq_avg
print('Total loss of Dataset: %f \n', loss_dataset_total)
print("-----------------------------------------TESTING OTB-50 END---------------------------------------------------------\n\n\n")
return loss_dataset_total
|
test/test_global_vars.py | robertmaynard/hpc-container-maker | 340 | 11132291 |
from __future__ import unicode_literals
from __future__ import print_function
import logging # pylint: disable=unused-import
import unittest
import os
from helpers import docker, ubuntu
from hpccm.common import container_type
from hpccm.recipe import recipe
class Test_global_vars(unittest.TestCase):
def test_global_vars(self):
"""Global variables"""
path = os.path.dirname(__file__)
rf = os.path.join(path, 'global_vars_recipe.py')
try:
recipe(rf, ctype=container_type.SINGULARITY, raise_exceptions=True)
except Exception as e:
self.fail(e)
|
data/dataloader.py | xxchenxx/classifier-balancing | 734 | 11132311 | <reponame>xxchenxx/classifier-balancing
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the OLTR project which
notice below and in LICENSE in the root directory of
this source tree.
Copyright (c) 2019, <NAME>
All rights reserved.
"""
import numpy as np
import torchvision
from torch.utils.data import Dataset, DataLoader, ConcatDataset
from torchvision import transforms
import os
from PIL import Image
# Image statistics
RGB_statistics = {
'iNaturalist18': {
'mean': [0.466, 0.471, 0.380],
'std': [0.195, 0.194, 0.192]
},
'default': {
'mean': [0.485, 0.456, 0.406],
'std':[0.229, 0.224, 0.225]
}
}
# Data transformation with augmentation
def get_data_transform(split, rgb_mean, rbg_std, key='default'):
data_transforms = {
'train': transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(rgb_mean, rbg_std)
]) if key == 'iNaturalist18' else transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0),
transforms.ToTensor(),
transforms.Normalize(rgb_mean, rbg_std)
]),
'val': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(rgb_mean, rbg_std)
]),
'test': transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(rgb_mean, rbg_std)
])
}
return data_transforms[split]
# Dataset
class LT_Dataset(Dataset):
def __init__(self, root, txt, transform=None):
self.img_path = []
self.labels = []
self.transform = transform
with open(txt) as f:
for line in f:
self.img_path.append(os.path.join(root, line.split()[0]))
self.labels.append(int(line.split()[1]))
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
path = self.img_path[index]
label = self.labels[index]
with open(path, 'rb') as f:
sample = Image.open(f).convert('RGB')
if self.transform is not None:
sample = self.transform(sample)
return sample, label, index
# Load datasets
def load_data(data_root, dataset, phase, batch_size, sampler_dic=None, num_workers=4, test_open=False, shuffle=True):
if phase == 'train_plain':
txt_split = 'train'
elif phase == 'train_val':
txt_split = 'val'
phase = 'train'
else:
txt_split = phase
txt = './data/%s/%s_%s.txt'%(dataset, dataset, txt_split)
# txt = './data/%s/%s_%s.txt'%(dataset, dataset, (phase if phase != 'train_plain' else 'train'))
print('Loading data from %s' % (txt))
if dataset == 'iNaturalist18':
print('===> Loading iNaturalist18 statistics')
key = 'iNaturalist18'
else:
key = 'default'
rgb_mean, rgb_std = RGB_statistics[key]['mean'], RGB_statistics[key]['std']
if phase not in ['train', 'val']:
transform = get_data_transform('test', rgb_mean, rgb_std, key)
else:
transform = get_data_transform(phase, rgb_mean, rgb_std, key)
print('Use data transformation:', transform)
set_ = LT_Dataset(data_root, txt, transform)
print(len(set_))
if phase == 'test' and test_open:
open_txt = './data/%s/%s_open.txt'%(dataset, dataset)
print('Testing with opensets from %s'%(open_txt))
open_set_ = LT_Dataset('./data/%s/%s_open'%(dataset, dataset), open_txt, transform)
set_ = ConcatDataset([set_, open_set_])
if sampler_dic and phase == 'train':
print('Using sampler: ', sampler_dic['sampler'])
# print('Sample %s samples per-class.' % sampler_dic['num_samples_cls'])
print('Sampler parameters: ', sampler_dic['params'])
return DataLoader(dataset=set_, batch_size=batch_size, shuffle=False,
sampler=sampler_dic['sampler'](set_, **sampler_dic['params']),
num_workers=num_workers)
else:
print('No sampler.')
print('Shuffle is %s.' % (shuffle))
return DataLoader(dataset=set_, batch_size=batch_size,
shuffle=shuffle, num_workers=num_workers)
|
var/spack/repos/builtin/packages/py-libensemble/package.py | LiamBindle/spack | 2,360 | 11132319 | <gh_stars>1000+
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
class PyLibensemble(PythonPackage):
"""Library for managing ensemble-like collections of computations."""
homepage = "https://libensemble.readthedocs.io"
pypi = "libensemble/libensemble-0.8.0.tar.gz"
git = "https://github.com/Libensemble/libensemble.git"
maintainers = ['shuds13']
tags = ['e4s']
version('develop', branch='develop')
version('0.8.0', sha256='1102e56c6381c9692de6888add23780ec69f18ad33f12119dc0391776a9a7300')
version('0.7.2', sha256='69b64304d1ecce4d57687ea6062f89bd813ae93b2a290bb1f595c5626ab6f197')
version('0.7.1', sha256='5cb294269624c1284ea25be9ed3bc668a2333e21e97a97b57ad339eb85435e46')
version('0.7.0', sha256='4c3c16ef3d4750b7a54198fae5d7ae402c5f5411ae85189da41afd20e20027dc')
version('0.6.0', sha256='3f6a926d3868da53835ed93fc2e2a047b368dacb648c7608ee3a66debcee4d38')
version('0.5.2', sha256='3e36c29a4a2adc0984ecfcc998cb5bb8a2cdfbe7a1ae92f7b35b06e41d21b889')
version('0.5.1', sha256='522e0cc086a3ed75a101b704c0fe01eae07f2684bd8d6da7bdfe9371d3187362')
version('0.5.0', sha256='c4623171dee049bfaa38a9c433609299a56b1afb774db8b71321247bc7556b8f')
version('0.4.1', sha256='282c32ffb79d84cc80b5cc7043c202d5f0b8ebff10f63924752f092e3938db5e')
version('0.4.0', sha256='9384aa3a58cbc20bbd1c6fddfadb5e6a943d593a3a81c8665f030dbc6d76e76e')
version('0.3.0', sha256='c8efdf45d0da0ef6299ee778cea1c285c95972af70d3a729ee6dc855e66f9294')
version('0.2.0', sha256='ecac7275d4d0f4a5e497e5c9ef2cd998da82b2c020a0fb87546eeea262f495ff')
version('0.1.0', sha256='0b27c59ae80f7af8b1bee92fcf2eb6c9a8fd3494bf2eb6b3ea17a7c03d3726bb')
variant('mpi', default=False, description='Install with MPI')
variant('scipy', default=False, description='Install with scipy')
variant('petsc4py', default=False, description='Install with petsc4py')
variant('nlopt', default=False, description='Install with nlopt')
variant('mpmath', default=False, description='Install with mpmath')
variant('deap', default=False, description='Install with DEAP')
variant('tasmanian', default=False, description='Install with tasmanian')
variant('pyyaml', default=False, description='Install with pyyaml')
# depends_on('[email protected]:2.8,3.3:', when='@:0.4.1')
# depends_on('[email protected]:', when='@0.5.0:')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-psutil', type=('build', 'run'), when='@0.7.1:')
depends_on('mpi', when='@:0.4.1')
depends_on('mpi', when='+mpi')
depends_on('[email protected]:', type=('build', 'run'), when='@:0.4.1')
depends_on('[email protected]:', type=('build', 'run'), when='+mpi')
depends_on('py-scipy', type=('build', 'run'), when='+scipy')
depends_on('py-petsc4py', type=('build', 'run'), when='+petsc4py')
depends_on('py-petsc4py@main', type=('build', 'run'), when='@develop+petsc4py')
depends_on('nlopt', type=('build', 'run'), when='+nlopt')
depends_on('py-mpmath', type=('build', 'run'), when='+mpmath')
depends_on('py-deap', type=('build', 'run'), when='+deap')
depends_on('tasmanian+python', type=('build', 'run'), when='+tasmanian')
depends_on('py-pyyaml', type=('build', 'run'), when='+pyyaml')
conflicts('~mpi', when='@:0.4.1')
@run_after('install')
def cache_test_sources(self):
"""Copy the example source files after the package is installed to an
install test subdirectory for use during `spack test run`."""
self.cache_extra_test_sources(join_path('examples', 'calling_scripts',
'regression_tests'))
def run_tutorial_tests(self, exe):
"""Run example stand alone test"""
test_dir = join_path(self.test_suite.current_test_cache_dir,
'examples', 'calling_scripts', 'regression_tests')
if not os.path.isfile(join_path(test_dir, exe)):
print('Skipping {0} test'.format(exe))
return
self.run_test(self.spec['python'].command.path,
options=[exe, '--comms', 'local', '--nworkers', '2'],
purpose='test: run {0} example'.format(exe),
work_dir=test_dir)
def test(self):
self.run_tutorial_tests('test_uniform_sampling.py')
|
metrics.py | Fred62879/ACORN | 186 | 11132324 | import numpy as np
import torch
import trimesh
from scipy.spatial import cKDTree as KDTree
from inside_mesh.triangle_hash import TriangleHash as _TriangleHash
'''
Some code included from 'inside_mesh' library of Occupancy Networks
https://github.com/autonomousvision/occupancy_networks
'''
def define_grid_3d(N, voxel_origin=[-1, -1, -1], voxel_size=None):
''' define NxNxN coordinate grid across [-1, 1]
voxel_origin is the (bottom, left, down) corner, not the middle '''
if not voxel_size:
voxel_size = 2.0 / (N - 1)
# initialize empty tensors
overall_index = torch.arange(0, N ** 3, 1, out=torch.LongTensor())
grid = torch.zeros(N ** 3, 3)
# transform first 3 columns to be x, y, z voxel index
# every possible comb'n of [0..N,0..N,0..N]
grid[:, 2] = overall_index % N # [0,1,2,...,N-1,N,0,1,2,...,N]
grid[:, 1] = (overall_index.long() // N) % N # [N [N 0's, ..., N N's]]
grid[:, 0] = ((overall_index.long() // N) // N) % N # [N*N 0's,...,N*N N's]
# transform first 3 columns: voxel indices --> voxel coordinates
grid[:, 0] = (grid[:, 0] * voxel_size) + voxel_origin[2]
grid[:, 1] = (grid[:, 1] * voxel_size) + voxel_origin[1]
grid[:, 2] = (grid[:, 2] * voxel_size) + voxel_origin[0]
return grid
def compute_iou(path_gt, path_pr, N=128, sphere=False, sphere_radius=0.25):
''' compute iou score
parameters
path_gt: path to ground-truth mesh (.ply or .obj)
path_pr: path to predicted mesh (.ply or .obj)
N: NxNxN grid resolution at which to compute iou '''
# define NxNxN coordinate grid across [-1,1]
grid = np.array(define_grid_3d(N))
# load mesh
occ_pr = MeshDataset(path_pr)
# compute occupancy at specified grid points
if sphere:
occ_gt = torch.from_numpy(np.linalg.norm(grid, axis=-1) <= sphere_radius)
else:
occ_gt = MeshDataset(path_gt)
occ_gt = torch.tensor(check_mesh_contains(occ_gt.mesh, grid))
occ_pr = torch.tensor(check_mesh_contains(occ_pr.mesh, grid))
# compute iou
area_union = torch.sum((occ_gt | occ_pr).float())
area_intersect = torch.sum((occ_gt & occ_pr).float())
iou = area_intersect / area_union
return iou.item()
def compute_trimesh_chamfer(mesh1, mesh2, num_mesh_samples=300000):
"""
This function computes a symmetric chamfer distance, i.e. the sum of both chamfers.
gt_points: trimesh.points.PointCloud of just poins, sampled from the surface (see
compute_metrics.ply for more documentation)
gen_mesh: trimesh.base.Trimesh of output mesh from whichever autoencoding reconstruction
method (see compute_metrics.py for more)
"""
gen_points_sampled = trimesh.sample.sample_surface(mesh1, num_mesh_samples)[0]
gt_points_np = trimesh.sample.sample_surface(mesh2, num_mesh_samples)[0]
# one direction
gen_points_kd_tree = KDTree(gen_points_sampled)
one_distances, one_vertex_ids = gen_points_kd_tree.query(gt_points_np)
gt_to_gen_chamfer = np.mean(np.square(one_distances))
# other direction
gt_points_kd_tree = KDTree(gt_points_np)
two_distances, two_vertex_ids = gt_points_kd_tree.query(gen_points_sampled)
gen_to_gt_chamfer = np.mean(np.square(two_distances))
chamfer_dist = gt_to_gen_chamfer + gen_to_gt_chamfer
return chamfer_dist
class MeshDataset():
def __init__(self, path_mesh, sample=False, num_pts=0):
if not path_mesh:
return
self.mesh = trimesh.load(path_mesh, process=False,
force='mesh', skip_materials=True)
def check_mesh_contains(mesh, points, hash_resolution=512):
intersector = MeshIntersector(mesh, hash_resolution)
contains = intersector.query(points)
return contains
class MeshIntersector:
def __init__(self, mesh, resolution=512):
triangles = mesh.vertices[mesh.faces].astype(np.float64)
n_tri = triangles.shape[0]
self.resolution = resolution
self.bbox_min = triangles.reshape(3 * n_tri, 3).min(axis=0)
self.bbox_max = triangles.reshape(3 * n_tri, 3).max(axis=0)
# Tranlate and scale it to [0.5, self.resolution - 0.5]^3
self.scale = (resolution - 1) / (self.bbox_max - self.bbox_min)
self.translate = 0.5 - self.scale * self.bbox_min
self._triangles = triangles = self.rescale(triangles)
triangles2d = triangles[:, :, :2]
self._tri_intersector2d = TriangleIntersector2d(
triangles2d, resolution)
def query(self, points):
# Rescale points
points = self.rescale(points)
# placeholder result with no hits we'll fill in later
contains = np.zeros(len(points), dtype=np.bool)
# cull points outside of the axis aligned bounding box
# this avoids running ray tests unless points are close
inside_aabb = np.all(
(0 <= points) & (points <= self.resolution), axis=1)
if not inside_aabb.any():
return contains
# Only consider points inside bounding box
mask = inside_aabb
points = points[mask]
# Compute intersection depth and check order
points_indices, tri_indices = self._tri_intersector2d.query(points[:, :2])
triangles_intersect = self._triangles[tri_indices]
points_intersect = points[points_indices]
depth_intersect, abs_n_2 = self.compute_intersection_depth(
points_intersect, triangles_intersect)
# Count number of intersections in both directions
smaller_depth = depth_intersect >= points_intersect[:, 2] * abs_n_2
bigger_depth = depth_intersect < points_intersect[:, 2] * abs_n_2
points_indices_0 = points_indices[smaller_depth]
points_indices_1 = points_indices[bigger_depth]
nintersect0 = np.bincount(points_indices_0, minlength=points.shape[0])
nintersect1 = np.bincount(points_indices_1, minlength=points.shape[0])
# Check if point contained in mesh
contains1 = (np.mod(nintersect0, 2) == 1)
contains2 = (np.mod(nintersect1, 2) == 1)
contains[mask] = (contains1 & contains2)
return contains
def compute_intersection_depth(self, points, triangles):
t1 = triangles[:, 0, :]
t2 = triangles[:, 1, :]
t3 = triangles[:, 2, :]
v1 = t3 - t1
v2 = t2 - t1
normals = np.cross(v1, v2)
alpha = np.sum(normals[:, :2] * (t1[:, :2] - points[:, :2]), axis=1)
n_2 = normals[:, 2]
t1_2 = t1[:, 2]
s_n_2 = np.sign(n_2)
abs_n_2 = np.abs(n_2)
mask = (abs_n_2 != 0)
depth_intersect = np.full(points.shape[0], np.nan)
depth_intersect[mask] = \
t1_2[mask] * abs_n_2[mask] + alpha[mask] * s_n_2[mask]
return depth_intersect, abs_n_2
def rescale(self, array):
array = self.scale * array + self.translate
return array
class TriangleIntersector2d:
def __init__(self, triangles, resolution=128):
self.triangles = triangles
self.tri_hash = _TriangleHash(triangles, resolution)
def query(self, points):
point_indices, tri_indices = self.tri_hash.query(points)
point_indices = np.array(point_indices, dtype=np.int64)
tri_indices = np.array(tri_indices, dtype=np.int64)
points = points[point_indices]
triangles = self.triangles[tri_indices]
mask = self.check_triangles(points, triangles)
point_indices = point_indices[mask]
tri_indices = tri_indices[mask]
return point_indices, tri_indices
def check_triangles(self, points, triangles):
contains = np.zeros(points.shape[0], dtype=np.bool)
A = triangles[:, :2] - triangles[:, 2:]
A = A.transpose([0, 2, 1])
y = points - triangles[:, 2]
detA = A[:, 0, 0] * A[:, 1, 1] - A[:, 0, 1] * A[:, 1, 0]
mask = (np.abs(detA) != 0.)
A = A[mask]
y = y[mask]
detA = detA[mask]
s_detA = np.sign(detA)
abs_detA = np.abs(detA)
u = (A[:, 1, 1] * y[:, 0] - A[:, 0, 1] * y[:, 1]) * s_detA
v = (-A[:, 1, 0] * y[:, 0] + A[:, 0, 0] * y[:, 1]) * s_detA
sum_uv = u + v
contains[mask] = (
(0 < u) & (u < abs_detA) & (0 < v) & (v < abs_detA)
& (0 < sum_uv) & (sum_uv < abs_detA)
)
return contains
|
.circleci/codegen_validation/normalize_yaml_fragment.py | Hacky-DH/pytorch | 60,067 | 11132329 | #!/usr/bin/env python3
import os
import sys
import yaml
# Need to import modules that lie on an upward-relative path
sys.path.append(os.path.join(sys.path[0], '..'))
import cimodel.lib.miniyaml as miniyaml
def regurgitate(depth, use_pyyaml_formatter=False):
data = yaml.safe_load(sys.stdin)
if use_pyyaml_formatter:
output = yaml.dump(data, sort_keys=True)
sys.stdout.write(output)
else:
miniyaml.render(sys.stdout, data, depth)
if __name__ == "__main__":
regurgitate(3)
|
.venv/lib/python3.10/site-packages/lunr/utils.py | plocandido/docinfrati | 128 | 11132337 | <gh_stars>100-1000
def as_string(obj):
return "" if not obj else str(obj)
class CompleteSet(set):
def union(self, other):
return self
def intersection(self, other):
return set(other)
def __contains__(self, y):
return True
|
scripts/external_libs/python-daemon-2.0.5/daemon/daemon.py | timgates42/trex-core | 956 | 11132344 | # -*- coding: utf-8 -*-
# daemon/daemon.py
# Part of ‘python-daemon’, an implementation of PEP 3143.
#
# Copyright © 2008–2015 <NAME> <<EMAIL>>
# Copyright © 2007–2008 <NAME>, <NAME>
# Copyright © 2004–2005 <NAME>
# Copyright © 2003 <NAME>
# Copyright © 2002 <NAME>
# Copyright © 2001 <NAME>
#
# This is free software: you may copy, modify, and/or distribute this work
# under the terms of the Apache License, version 2.0 as published by the
# Apache Software Foundation.
# No warranty expressed or implied. See the file ‘LICENSE.ASF-2’ for details.
""" Daemon process behaviour.
"""
from __future__ import (absolute_import, unicode_literals)
import os
import sys
import resource
import errno
import signal
import socket
import atexit
try:
# Python 2 has both ‘str’ (bytes) and ‘unicode’ (text).
basestring = basestring
unicode = unicode
except NameError:
# Python 3 names the Unicode data type ‘str’.
basestring = str
unicode = str
class DaemonError(Exception):
""" Base exception class for errors from this module. """
def __init__(self, *args, **kwargs):
self._chain_from_context()
super(DaemonError, self).__init__(*args, **kwargs)
def _chain_from_context(self):
_chain_exception_from_existing_exception_context(self, as_cause=True)
class DaemonOSEnvironmentError(DaemonError, OSError):
""" Exception raised when daemon OS environment setup receives error. """
class DaemonProcessDetachError(DaemonError, OSError):
""" Exception raised when process detach fails. """
class DaemonContext:
""" Context for turning the current program into a daemon process.
A `DaemonContext` instance represents the behaviour settings and
process context for the program when it becomes a daemon. The
behaviour and environment is customised by setting options on the
instance, before calling the `open` method.
Each option can be passed as a keyword argument to the `DaemonContext`
constructor, or subsequently altered by assigning to an attribute on
the instance at any time prior to calling `open`. That is, for
options named `wibble` and `wubble`, the following invocation::
foo = daemon.DaemonContext(wibble=bar, wubble=baz)
foo.open()
is equivalent to::
foo = daemon.DaemonContext()
foo.wibble = bar
foo.wubble = baz
foo.open()
The following options are defined.
`files_preserve`
:Default: ``None``
List of files that should *not* be closed when starting the
daemon. If ``None``, all open file descriptors will be closed.
Elements of the list are file descriptors (as returned by a file
object's `fileno()` method) or Python `file` objects. Each
specifies a file that is not to be closed during daemon start.
`chroot_directory`
:Default: ``None``
Full path to a directory to set as the effective root directory of
the process. If ``None``, specifies that the root directory is not
to be changed.
`working_directory`
:Default: ``'/'``
Full path of the working directory to which the process should
change on daemon start.
Since a filesystem cannot be unmounted if a process has its
current working directory on that filesystem, this should either
be left at default or set to a directory that is a sensible “home
directory” for the daemon while it is running.
`umask`
:Default: ``0``
File access creation mask (“umask”) to set for the process on
daemon start.
A daemon should not rely on the parent process's umask value,
which is beyond its control and may prevent creating a file with
the required access mode. So when the daemon context opens, the
umask is set to an explicit known value.
If the conventional value of 0 is too open, consider setting a
value such as 0o022, 0o027, 0o077, or another specific value.
Otherwise, ensure the daemon creates every file with an
explicit access mode for the purpose.
`pidfile`
:Default: ``None``
Context manager for a PID lock file. When the daemon context opens
and closes, it enters and exits the `pidfile` context manager.
`detach_process`
:Default: ``None``
If ``True``, detach the process context when opening the daemon
context; if ``False``, do not detach.
If unspecified (``None``) during initialisation of the instance,
this will be set to ``True`` by default, and ``False`` only if
detaching the process is determined to be redundant; for example,
in the case when the process was started by `init`, by `initd`, or
by `inetd`.
`signal_map`
:Default: system-dependent
Mapping from operating system signals to callback actions.
The mapping is used when the daemon context opens, and determines
the action for each signal's signal handler:
* A value of ``None`` will ignore the signal (by setting the
signal action to ``signal.SIG_IGN``).
* A string value will be used as the name of an attribute on the
``DaemonContext`` instance. The attribute's value will be used
as the action for the signal handler.
* Any other value will be used as the action for the
signal handler. See the ``signal.signal`` documentation
for details of the signal handler interface.
The default value depends on which signals are defined on the
running system. Each item from the list below whose signal is
actually defined in the ``signal`` module will appear in the
default map:
* ``signal.SIGTTIN``: ``None``
* ``signal.SIGTTOU``: ``None``
* ``signal.SIGTSTP``: ``None``
* ``signal.SIGTERM``: ``'terminate'``
Depending on how the program will interact with its child
processes, it may need to specify a signal map that
includes the ``signal.SIGCHLD`` signal (received when a
child process exits). See the specific operating system's
documentation for more detail on how to determine what
circumstances dictate the need for signal handlers.
`uid`
:Default: ``os.getuid()``
`gid`
:Default: ``os.getgid()``
The user ID (“UID”) value and group ID (“GID”) value to switch
the process to on daemon start.
The default values, the real UID and GID of the process, will
relinquish any effective privilege elevation inherited by the
process.
`prevent_core`
:Default: ``True``
If true, prevents the generation of core files, in order to avoid
leaking sensitive information from daemons run as `root`.
`stdin`
:Default: ``None``
`stdout`
:Default: ``None``
`stderr`
:Default: ``None``
Each of `stdin`, `stdout`, and `stderr` is a file-like object
which will be used as the new file for the standard I/O stream
`sys.stdin`, `sys.stdout`, and `sys.stderr` respectively. The file
should therefore be open, with a minimum of mode 'r' in the case
of `stdin`, and mimimum of mode 'w+' in the case of `stdout` and
`stderr`.
If the object has a `fileno()` method that returns a file
descriptor, the corresponding file will be excluded from being
closed during daemon start (that is, it will be treated as though
it were listed in `files_preserve`).
If ``None``, the corresponding system stream is re-bound to the
file named by `os.devnull`.
"""
__metaclass__ = type
def __init__(
self,
chroot_directory=None,
working_directory="/",
umask=0,
uid=None,
gid=None,
prevent_core=True,
detach_process=None,
files_preserve=None,
pidfile=None,
stdin=None,
stdout=None,
stderr=None,
signal_map=None,
):
""" Set up a new instance. """
self.chroot_directory = chroot_directory
self.working_directory = working_directory
self.umask = umask
self.prevent_core = prevent_core
self.files_preserve = files_preserve
self.pidfile = pidfile
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
if uid is None:
uid = os.getuid()
self.uid = uid
if gid is None:
gid = os.getgid()
self.gid = gid
if detach_process is None:
detach_process = is_detach_process_context_required()
self.detach_process = detach_process
if signal_map is None:
signal_map = make_default_signal_map()
self.signal_map = signal_map
self._is_open = False
@property
def is_open(self):
""" ``True`` if the instance is currently open. """
return self._is_open
def open(self):
""" Become a daemon process.
:return: ``None``.
Open the daemon context, turning the current program into a daemon
process. This performs the following steps:
* If this instance's `is_open` property is true, return
immediately. This makes it safe to call `open` multiple times on
an instance.
* If the `prevent_core` attribute is true, set the resource limits
for the process to prevent any core dump from the process.
* If the `chroot_directory` attribute is not ``None``, set the
effective root directory of the process to that directory (via
`os.chroot`).
This allows running the daemon process inside a “chroot gaol”
as a means of limiting the system's exposure to rogue behaviour
by the process. Note that the specified directory needs to
already be set up for this purpose.
* Set the process UID and GID to the `uid` and `gid` attribute
values.
* Close all open file descriptors. This excludes those listed in
the `files_preserve` attribute, and those that correspond to the
`stdin`, `stdout`, or `stderr` attributes.
* Change current working directory to the path specified by the
`working_directory` attribute.
* Reset the file access creation mask to the value specified by
the `umask` attribute.
* If the `detach_process` option is true, detach the current
process into its own process group, and disassociate from any
controlling terminal.
* Set signal handlers as specified by the `signal_map` attribute.
* If any of the attributes `stdin`, `stdout`, `stderr` are not
``None``, bind the system streams `sys.stdin`, `sys.stdout`,
and/or `sys.stderr` to the files represented by the
corresponding attributes. Where the attribute has a file
descriptor, the descriptor is duplicated (instead of re-binding
the name).
* If the `pidfile` attribute is not ``None``, enter its context
manager.
* Mark this instance as open (for the purpose of future `open` and
`close` calls).
* Register the `close` method to be called during Python's exit
processing.
When the function returns, the running program is a daemon
process.
"""
if self.is_open:
return
if self.chroot_directory is not None:
change_root_directory(self.chroot_directory)
if self.prevent_core:
prevent_core_dump()
change_file_creation_mask(self.umask)
change_working_directory(self.working_directory)
change_process_owner(self.uid, self.gid)
if self.detach_process:
detach_process_context()
signal_handler_map = self._make_signal_handler_map()
set_signal_handlers(signal_handler_map)
exclude_fds = self._get_exclude_file_descriptors()
close_all_open_files(exclude=exclude_fds)
redirect_stream(sys.stdin, self.stdin)
redirect_stream(sys.stdout, self.stdout)
redirect_stream(sys.stderr, self.stderr)
if self.pidfile is not None:
self.pidfile.__enter__()
self._is_open = True
register_atexit_function(self.close)
def __enter__(self):
""" Context manager entry point. """
self.open()
return self
def close(self):
""" Exit the daemon process context.
:return: ``None``.
Close the daemon context. This performs the following steps:
* If this instance's `is_open` property is false, return
immediately. This makes it safe to call `close` multiple times
on an instance.
* If the `pidfile` attribute is not ``None``, exit its context
manager.
* Mark this instance as closed (for the purpose of future `open`
and `close` calls).
"""
if not self.is_open:
return
if self.pidfile is not None:
# Follow the interface for telling a context manager to exit,
# <URL:http://docs.python.org/library/stdtypes.html#typecontextmanager>.
self.pidfile.__exit__(None, None, None)
self._is_open = False
def __exit__(self, exc_type, exc_value, traceback):
""" Context manager exit point. """
self.close()
def terminate(self, signal_number, stack_frame):
""" Signal handler for end-process signals.
:param signal_number: The OS signal number received.
:param stack_frame: The frame object at the point the
signal was received.
:return: ``None``.
Signal handler for the ``signal.SIGTERM`` signal. Performs the
following step:
* Raise a ``SystemExit`` exception explaining the signal.
"""
exception = SystemExit(
"Terminating on signal {signal_number!r}".format(
signal_number=signal_number))
raise exception
def _get_exclude_file_descriptors(self):
""" Get the set of file descriptors to exclude closing.
:return: A set containing the file descriptors for the
files to be preserved.
The file descriptors to be preserved are those from the
items in `files_preserve`, and also each of `stdin`,
`stdout`, and `stderr`. For each item:
* If the item is ``None``, it is omitted from the return
set.
* If the item's ``fileno()`` method returns a value, that
value is in the return set.
* Otherwise, the item is in the return set verbatim.
"""
files_preserve = self.files_preserve
if files_preserve is None:
files_preserve = []
files_preserve.extend(
item for item in [self.stdin, self.stdout, self.stderr]
if hasattr(item, 'fileno'))
exclude_descriptors = set()
for item in files_preserve:
if item is None:
continue
file_descriptor = _get_file_descriptor(item)
if file_descriptor is not None:
exclude_descriptors.add(file_descriptor)
else:
exclude_descriptors.add(item)
return exclude_descriptors
def _make_signal_handler(self, target):
""" Make the signal handler for a specified target object.
:param target: A specification of the target for the
handler; see below.
:return: The value for use by `signal.signal()`.
If `target` is ``None``, return ``signal.SIG_IGN``. If `target`
is a text string, return the attribute of this instance named
by that string. Otherwise, return `target` itself.
"""
if target is None:
result = signal.SIG_IGN
elif isinstance(target, unicode):
name = target
result = getattr(self, name)
else:
result = target
return result
def _make_signal_handler_map(self):
""" Make the map from signals to handlers for this instance.
:return: The constructed signal map for this instance.
Construct a map from signal numbers to handlers for this
context instance, suitable for passing to
`set_signal_handlers`.
"""
signal_handler_map = dict(
(signal_number, self._make_signal_handler(target))
for (signal_number, target) in self.signal_map.items())
return signal_handler_map
def _get_file_descriptor(obj):
""" Get the file descriptor, if the object has one.
:param obj: The object expected to be a file-like object.
:return: The file descriptor iff the file supports it; otherwise
``None``.
The object may be a non-file object. It may also be a
file-like object with no support for a file descriptor. In
either case, return ``None``.
"""
file_descriptor = None
if hasattr(obj, 'fileno'):
try:
file_descriptor = obj.fileno()
except ValueError:
# The item doesn't support a file descriptor.
pass
return file_descriptor
def change_working_directory(directory):
""" Change the working directory of this process.
:param directory: The target directory path.
:return: ``None``.
"""
try:
os.chdir(directory)
except Exception as exc:
error = DaemonOSEnvironmentError(
"Unable to change working directory ({exc})".format(exc=exc))
raise error
def change_root_directory(directory):
""" Change the root directory of this process.
:param directory: The target directory path.
:return: ``None``.
Set the current working directory, then the process root directory,
to the specified `directory`. Requires appropriate OS privileges
for this process.
"""
try:
os.chdir(directory)
os.chroot(directory)
except Exception as exc:
error = DaemonOSEnvironmentError(
"Unable to change root directory ({exc})".format(exc=exc))
raise error
def change_file_creation_mask(mask):
""" Change the file creation mask for this process.
:param mask: The numeric file creation mask to set.
:return: ``None``.
"""
try:
os.umask(mask)
except Exception as exc:
error = DaemonOSEnvironmentError(
"Unable to change file creation mask ({exc})".format(exc=exc))
raise error
def change_process_owner(uid, gid):
""" Change the owning UID and GID of this process.
:param uid: The target UID for the daemon process.
:param gid: The target GID for the daemon process.
:return: ``None``.
Set the GID then the UID of the process (in that order, to avoid
permission errors) to the specified `gid` and `uid` values.
Requires appropriate OS privileges for this process.
"""
try:
os.setgid(gid)
os.setuid(uid)
except Exception as exc:
error = DaemonOSEnvironmentError(
"Unable to change process owner ({exc})".format(exc=exc))
raise error
def prevent_core_dump():
""" Prevent this process from generating a core dump.
:return: ``None``.
Set the soft and hard limits for core dump size to zero. On Unix,
this entirely prevents the process from creating core dump.
"""
core_resource = resource.RLIMIT_CORE
try:
# Ensure the resource limit exists on this platform, by requesting
# its current value.
core_limit_prev = resource.getrlimit(core_resource)
except ValueError as exc:
error = DaemonOSEnvironmentError(
"System does not support RLIMIT_CORE resource limit"
" ({exc})".format(exc=exc))
raise error
# Set hard and soft limits to zero, i.e. no core dump at all.
core_limit = (0, 0)
resource.setrlimit(core_resource, core_limit)
def detach_process_context():
""" Detach the process context from parent and session.
:return: ``None``.
Detach from the parent process and session group, allowing the
parent to exit while this process continues running.
Reference: “Advanced Programming in the Unix Environment”,
section 13.3, by <NAME>, published 1993 by
Addison-Wesley.
"""
def fork_then_exit_parent(error_message):
""" Fork a child process, then exit the parent process.
:param error_message: Message for the exception in case of a
detach failure.
:return: ``None``.
:raise DaemonProcessDetachError: If the fork fails.
"""
try:
pid = os.fork()
if pid > 0:
os._exit(0)
except OSError as exc:
error = DaemonProcessDetachError(
"{message}: [{exc.errno:d}] {exc.strerror}".format(
message=error_message, exc=exc))
raise error
fork_then_exit_parent(error_message="Failed first fork")
os.setsid()
fork_then_exit_parent(error_message="Failed second fork")
def is_process_started_by_init():
""" Determine whether the current process is started by `init`.
:return: ``True`` iff the parent process is `init`; otherwise
``False``.
The `init` process is the one with process ID of 1.
"""
result = False
init_pid = 1
if os.getppid() == init_pid:
result = True
return result
def is_socket(fd):
""" Determine whether the file descriptor is a socket.
:param fd: The file descriptor to interrogate.
:return: ``True`` iff the file descriptor is a socket; otherwise
``False``.
Query the socket type of `fd`. If there is no error, the file is a
socket.
"""
result = False
file_socket = socket.fromfd(fd, socket.AF_INET, socket.SOCK_RAW)
try:
socket_type = file_socket.getsockopt(
socket.SOL_SOCKET, socket.SO_TYPE)
except socket.error as exc:
exc_errno = exc.args[0]
if exc_errno == errno.ENOTSOCK:
# Socket operation on non-socket.
pass
else:
# Some other socket error.
result = True
else:
# No error getting socket type.
result = True
return result
def is_process_started_by_superserver():
""" Determine whether the current process is started by the superserver.
:return: ``True`` if this process was started by the internet
superserver; otherwise ``False``.
The internet superserver creates a network socket, and
attaches it to the standard streams of the child process. If
that is the case for this process, return ``True``, otherwise
``False``.
"""
result = False
stdin_fd = sys.__stdin__.fileno()
if is_socket(stdin_fd):
result = True
return result
def is_detach_process_context_required():
""" Determine whether detaching the process context is required.
:return: ``True`` iff the process is already detached; otherwise
``False``.
The process environment is interrogated for the following:
* Process was started by `init`; or
* Process was started by `inetd`.
If any of the above are true, the process is deemed to be already
detached.
"""
result = True
if is_process_started_by_init() or is_process_started_by_superserver():
result = False
return result
def close_file_descriptor_if_open(fd):
""" Close a file descriptor if already open.
:param fd: The file descriptor to close.
:return: ``None``.
Close the file descriptor `fd`, suppressing an error in the
case the file was not open.
"""
try:
os.close(fd)
except EnvironmentError as exc:
if exc.errno == errno.EBADF:
# File descriptor was not open.
pass
else:
error = DaemonOSEnvironmentError(
"Failed to close file descriptor {fd:d} ({exc})".format(
fd=fd, exc=exc))
raise error
MAXFD = 2048
def get_maximum_file_descriptors():
""" Get the maximum number of open file descriptors for this process.
:return: The number (integer) to use as the maximum number of open
files for this process.
The maximum is the process hard resource limit of maximum number of
open file descriptors. If the limit is “infinity”, a default value
of ``MAXFD`` is returned.
"""
limits = resource.getrlimit(resource.RLIMIT_NOFILE)
result = limits[1]
if result == resource.RLIM_INFINITY:
result = MAXFD
return result
def close_all_open_files(exclude=set()):
""" Close all open file descriptors.
:param exclude: Collection of file descriptors to skip when closing
files.
:return: ``None``.
Closes every file descriptor (if open) of this process. If
specified, `exclude` is a set of file descriptors to *not*
close.
"""
maxfd = get_maximum_file_descriptors()
for fd in reversed(range(maxfd)):
if fd not in exclude:
close_file_descriptor_if_open(fd)
def redirect_stream(system_stream, target_stream):
""" Redirect a system stream to a specified file.
:param standard_stream: A file object representing a standard I/O
stream.
:param target_stream: The target file object for the redirected
stream, or ``None`` to specify the null device.
:return: ``None``.
`system_stream` is a standard system stream such as
``sys.stdout``. `target_stream` is an open file object that
should replace the corresponding system stream object.
If `target_stream` is ``None``, defaults to opening the
operating system's null device and using its file descriptor.
"""
if target_stream is None:
target_fd = os.open(os.devnull, os.O_RDWR)
else:
target_fd = target_stream.fileno()
os.dup2(target_fd, system_stream.fileno())
def make_default_signal_map():
""" Make the default signal map for this system.
:return: A mapping from signal number to handler object.
The signals available differ by system. The map will not contain
any signals not defined on the running system.
"""
name_map = {
'SIGTSTP': None,
'SIGTTIN': None,
'SIGTTOU': None,
'SIGTERM': 'terminate',
}
signal_map = dict(
(getattr(signal, name), target)
for (name, target) in name_map.items()
if hasattr(signal, name))
return signal_map
def set_signal_handlers(signal_handler_map):
""" Set the signal handlers as specified.
:param signal_handler_map: A map from signal number to handler
object.
:return: ``None``.
See the `signal` module for details on signal numbers and signal
handlers.
"""
for (signal_number, handler) in signal_handler_map.items():
signal.signal(signal_number, handler)
def register_atexit_function(func):
""" Register a function for processing at program exit.
:param func: A callable function expecting no arguments.
:return: ``None``.
The function `func` is registered for a call with no arguments
at program exit.
"""
atexit.register(func)
def _chain_exception_from_existing_exception_context(exc, as_cause=False):
""" Decorate the specified exception with the existing exception context.
:param exc: The exception instance to decorate.
:param as_cause: If true, the existing context is declared to be
the cause of the exception.
:return: ``None``.
:PEP:`344` describes syntax and attributes (`__traceback__`,
`__context__`, `__cause__`) for use in exception chaining.
Python 2 does not have that syntax, so this function decorates
the exception with values from the current exception context.
"""
(existing_exc_type, existing_exc, existing_traceback) = sys.exc_info()
if as_cause:
exc.__cause__ = existing_exc
else:
exc.__context__ = existing_exc
exc.__traceback__ = existing_traceback
# Local variables:
# coding: utf-8
# mode: python
# End:
# vim: fileencoding=utf-8 filetype=python :
|
torchrec/fx/tracer.py | xing-liu/torchrec | 814 | 11132351 | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Dict, Optional, Union
import torch
from torch.fx.node import Argument
from torchrec.distributed.types import NoWait
class Tracer(torch.fx.Tracer):
"""
Custom FX tracer for torchrec
See `Torch.FX documentation <https://pytorch.org/docs/stable/fx.html>`_
We create a custom FX tracer to trace torchrec based models. The custom tracer
handles python generic types (i.e. NoWait[T], Awaitable[T]) and lower it to
TorchScript if needed
"""
def __init__(self) -> None:
super().__init__()
# pyre-ignore[2]
def create_arg(self, a: Any) -> Argument:
"""
A method to specify the behavior of tracing when preparing values to
be used as arguments to nodes in the ``Graph``.
Adds support for the NoWait type in addition to the default tracer
Args:
a (Any): The value to be emitted as an ``Argument`` in the ``Graph``.
Returns:
Argument: The value ``a`` converted into the appropriate ``Argument``
"""
if isinstance(a, NoWait):
return self.create_node(
"call_function",
target=NoWait,
args=self.create_arg((a._obj,)),
kwargs={},
type_expr=NoWait,
)
return super().create_arg(a)
def symbolic_trace(
# pyre-ignore[24]
root: Union[torch.nn.Module, Callable],
concrete_args: Optional[Dict[str, Any]] = None,
) -> torch.fx.GraphModule:
"""
Symbolic tracing API
Given an ``nn.Module`` or function instance ``root``, this function will return a ``GraphModule``
constructed by recording operations seen while tracing through ``root``.
``concrete_args`` allows you to partially specialize your function, whether it's to remove control flow or data structures.
Args:
root (Union[torch.nn.Module, Callable]): Module or function to be traced and converted
into a Graph representation.
concrete_args (Optional[Dict[str, any]]): Inputs to be partially specialized
Returns:
GraphModule: a Module created from the recorded operations from ``root``.
"""
tracer = Tracer()
graph = tracer.trace(root, concrete_args)
return torch.fx.GraphModule(root, graph)
|
pysnmp/smi/mibs/instances/__SNMP-FRAMEWORK-MIB.py | RKinsey/pysnmp | 492 | 11132386 | #
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2019, <NAME> <<EMAIL>>
# License: http://snmplabs.com/pysnmp/license.html
#
# This file instantiates some of the MIB managed objects for SNMP engine use
#
import time
if 'mibBuilder' not in globals():
import sys
sys.stderr.write(__doc__)
sys.exit(1)
MibScalarInstance, = mibBuilder.importSymbols(
'SNMPv2-SMI',
'MibScalarInstance'
)
(snmpEngineID,
snmpEngineBoots,
snmpEngineTime,
snmpEngineMaxMessageSize) = mibBuilder.importSymbols(
'SNMP-FRAMEWORK-MIB',
'snmpEngineID',
'snmpEngineBoots',
'snmpEngineTime',
'snmpEngineMaxMessageSize'
)
_snmpEngineID = MibScalarInstance(
snmpEngineID.name, (0,),
snmpEngineID.syntax
)
_snmpEngineBoots = MibScalarInstance(
snmpEngineBoots.name, (0,),
snmpEngineBoots.syntax.clone(1)
)
_snmpEngineTime = MibScalarInstance(
snmpEngineTime.name, (0,),
snmpEngineTime.syntax.clone(int(time.time()))
)
_snmpEngineMaxMessageSize = MibScalarInstance(
snmpEngineMaxMessageSize.name, (0,),
snmpEngineMaxMessageSize.syntax.clone(4096)
)
mibBuilder.exportSymbols(
'__SNMP-FRAMEWORK-MIB',
snmpEngineID=_snmpEngineID,
snmpEngineBoots=_snmpEngineBoots,
snmpEngineTime=_snmpEngineTime,
snmpEngineMaxMessageSize=_snmpEngineMaxMessageSize
)
|
custom_components/hacs/tasks/activate_categories.py | rubicon/home-assistant-config-1 | 1,383 | 11132398 | <filename>custom_components/hacs/tasks/activate_categories.py
"""Starting setup task: extra stores."""
from __future__ import annotations
from homeassistant.core import HomeAssistant
from ..base import HacsBase
from ..enums import HacsCategory, HacsStage
from .base import HacsTask
async def async_setup_task(hacs: HacsBase, hass: HomeAssistant) -> Task:
"""Set up this task."""
return Task(hacs=hacs, hass=hass)
class Task(HacsTask):
"""Set up extra stores in HACS if enabled in Home Assistant."""
stages = [HacsStage.SETUP]
def execute(self) -> None:
self.hacs.common.categories = set()
for category in (HacsCategory.INTEGRATION, HacsCategory.PLUGIN):
self.hacs.enable_hacs_category(HacsCategory(category))
if HacsCategory.PYTHON_SCRIPT in self.hacs.hass.config.components:
self.hacs.enable_hacs_category(HacsCategory.PYTHON_SCRIPT)
if self.hacs.hass.services.has_service("frontend", "reload_themes"):
self.hacs.enable_hacs_category(HacsCategory.THEME)
if self.hacs.configuration.appdaemon:
self.hacs.enable_hacs_category(HacsCategory.APPDAEMON)
if self.hacs.configuration.netdaemon:
self.hacs.enable_hacs_category(HacsCategory.NETDAEMON)
|
pencil.py | duduainankai/pencil-python | 127 | 11132414 | <filename>pencil.py<gh_stars>100-1000
#!/usr/bin/env python
# encoding: utf-8
"""
=================================================
The python version implementation
"Combining Sketch and Tone for Pencil Drawing Production"
<NAME>, <NAME>, <NAME>
International Symposium on Non-Photorealistic Animation and Rendering
(NPAR 2012), June, 2012
=================================================
pencil drawing implementation
usage:
cd {file directory}
python pencil.py {path of img file you want to try}
"""
from stitch_function import horizontal_stitch as hstitch, vertical_stitch as vstitch
from util import im2double, rot90, rot90c
from natural_histogram_matching import natural_histogram_matching
from PIL import Image
import numpy as np
from scipy import signal
from scipy.ndimage import interpolation
from scipy.sparse import csr_matrix as csr_matrix, spdiags as spdiags
from scipy.sparse.linalg import spsolve as spsolve
import math
import os
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
basedir = os.path.dirname(__file__)
output = os.path.join(basedir, 'output')
line_len_divisor = 40 # 卷积核大小与图片的倍数关系
# gammaS = 1 # 值越大, 轮廓的线条越粗
# gammaI = 1 # 值越大, 最后输出图片的颜色越深
Lambda = 0.2
texture_resize_ratio = 0.2
texture_file_name = 'texture.jpg'
def get_s(J, gammaS=1):
'''
产生笔画结构(Stroke Structure Generation )
stroke drawing aims at expressing general structures of the scene
1. classification:
首先计算图片的梯度, 分别在x和y两个方向上进行计算, 然后在相应位置求平方和 开根号
因为存在噪声的干扰, 直接使用计算梯度生成的轮廓效果不好
论文采用的方式是通过预测每个像素点的方向的方式生成线条
一共分成8个方向, 每个45度, 计算得到8个方向向量, 同时作为卷积核
每个像素点的方向是卷积过后8个值里面最大的那个表示的方向, 获得一个map set c, 用1表示方向, 其余为0
2. line shaping:
生成轮廓线条的过程
通过map set c与方向向量做卷积, 将同一个方向上的像素点聚合起来
同时可以将原本梯度图中的边缘像素点连接到线段中
:param J: 图片转换成灰度后的矩阵
:gammaS: 控制参数, 值越大线条越粗
:return: 图片的笔画结构, 轮廓线S
'''
h, w = J.shape
line_len_double = float(min(h, w)) / line_len_divisor
line_len = int(line_len_double)
line_len += line_len % 2
half_line_len = line_len / 2
# 计算梯度
# compute the image gradient 'Imag'
dJ = im2double(J)
Ix = np.column_stack((abs(dJ[:, 0:-1] - dJ[:, 1:]), np.zeros((h, 1))))
Iy = np.row_stack((abs(dJ[0:-1, :] - dJ[1:, :]), np.zeros((1, w))))
# eq.1
Imag = np.sqrt(Ix*Ix + Iy*Iy)
# 注释下面一行代码可以看到通过简单求梯度的方式进行轮廓结构的产生方式, 容易被噪声影响
# Image.fromarray((1 - Imag) * 255).show()
# create the 8 directional line segments L
# L[:, :, index]是一个用来表示第index+1个方向的线段
# 是一个卷积核
L = np.zeros((line_len, line_len, 8))
for n in range(8):
if n == 0 or n == 1 or n == 2 or n == 7:
for x in range(0, line_len):
y = round(((x+1) - half_line_len) * math.tan(math.pi/8*n))
y = half_line_len - y
if 0 < y <= line_len:
L[int(y-1), x, n] = 1
if n < 7:
L[:, :, n+4] = rot90c(L[:, :, n])
L[:, :, 3] = rot90(L[:, :, 7])
G = np.zeros((J.shape[0], J.shape[1], 8))
for n in range(8):
G[:, :, n] = signal.convolve2d(Imag, L[:, :, n], "same") # eq.2
Gindex = G.argmax(axis=2) # 获取最大值元素所在的下标 axis表示维度
# C is map set
C = np.zeros((J.shape[0], J.shape[1], 8))
for n in range(8):
# 八个方向 选取最大值所在的方向
# eq.3 论文中公式与解释有出入 选取的应该是最大值方向
C[:, :, n] = Imag * (1 * (Gindex == n))
# line shaping
# generate lines at each pixel
Spn = np.zeros((J.shape[0], J.shape[1], 8))
for n in range(8):
Spn[:, :, n] = signal.convolve2d(C[:, :, n], L[:, :, n], "same")
# 八个方向的求和, 并执行归一化操作
Sp = Spn.sum(axis=2)
Sp = (Sp - Sp[:].min()) / (Sp[:].max() - Sp[:].min())
S = (1 - Sp) ** gammaS
img = Image.fromarray(S * 255)
# img.show()
return S
def get_t(J, type, gammaI=1):
'''
色调渲染(tone rendering):
Tone Rendering tone drawing focuses more on shapes, shadow, and shading than on the use of lines
铅笔画的直方图有一定的pattern, 因为只是铅笔和白纸的结合
可以分成三个区域: 1.亮 2.暗 3.居于中间的部分, 于是就有三个用来模拟的模型
铅笔画的色调 颜色等通过用铅笔重复的涂画来体现
1. 直方图匹配
运用三种分布计算图片的直方图, 然后匹配一个正常图片的直方图
2. 纹理渲染(texture rendering):
计算模拟需要用铅笔重复涂画的次数beta
:param J: 图片转换成灰度后的矩阵
:param type: 图片类型
:param gammaI: 控制参数, 值越大最后的结果颜色越深
:return: 色调渲染后的图片矩阵T
'''
Jadjusted = natural_histogram_matching(J, type=type) ** gammaI
# Jadjusted = natural_histogram_matching(J, type=type)
texture = Image.open(texture_file_name)
texture = np.array(texture.convert("L"))
# texture = np.array(texture)
texture = texture[99: texture.shape[0]-100, 99: texture.shape[1]-100]
ratio = texture_resize_ratio * min(J.shape[0], J.shape[1]) / float(1024)
texture_resize = interpolation.zoom(texture, (ratio, ratio))
texture = im2double(texture_resize)
htexture = hstitch(texture, J.shape[1])
Jtexture = vstitch(htexture, J.shape[0])
size = J.shape[0] * J.shape[1]
nzmax = 2 * (size-1)
i = np.zeros((nzmax, 1))
j = np.zeros((nzmax, 1))
s = np.zeros((nzmax, 1))
for m in range(1, nzmax+1):
i[m-1] = int(math.ceil((m+0.1) / 2)) - 1
j[m-1] = int(math.ceil((m-0.1) / 2)) - 1
s[m-1] = -2 * (m % 2) + 1
dx = csr_matrix((s.T[0], (i.T[0], j.T[0])), shape=(size, size))
nzmax = 2 * (size - J.shape[1])
i = np.zeros((nzmax, 1))
j = np.zeros((nzmax, 1))
s = np.zeros((nzmax, 1))
for m in range(1, nzmax+1):
i[m-1, :] = int(math.ceil((m-1+0.1)/2) + J.shape[1] * (m % 2)) - 1
j[m-1, :] = math.ceil((m-0.1)/2) - 1
s[m-1, :] = -2 * (m % 2) + 1
dy = csr_matrix((s.T[0], (i.T[0], j.T[0])), shape=(size, size))
# +0.01是为了避免出现有0被进行log运算的情况, 但对正常值影响可以被忽略
Jtexture1d = np.log(np.reshape(Jtexture.T, (1, Jtexture.size), order="f") + 0.01)
Jtsparse = spdiags(Jtexture1d, 0, size, size)
Jadjusted1d = np.log(np.reshape(Jadjusted.T, (1, Jadjusted.size), order="f").T + 0.01)
nat = Jtsparse.T.dot(Jadjusted1d) # lnJ(x)
a = np.dot(Jtsparse.T, Jtsparse)
b = dx.T.dot(dx)
c = dy.T.dot(dy)
mat = a + Lambda * (b + c) # lnH(x)
# x = spsolve(a,b) <--> a*x = b
# lnH(x) * beta(x) = lnJ(x) --> beta(x) = spsolve(lnH(x), lnJ(x))
# 使用sparse matrix的spsolve 而不是linalg.solve()
beta1d = spsolve(mat, nat) # eq.8
beta = np.reshape(beta1d, (J.shape[0], J.shape[1]), order="c")
# 模拟素描时通过重复画线来加深阴影, 用pattern Jtexture重复画beta次
T = Jtexture ** beta # eq.9
T = (T - T.min()) / (T.max() - T.min())
img = Image.fromarray(T * 255)
# img.show()
return T
def pencil_draw(path="img/sjtu.jpg", gammaS=1, gammaI=1):
name = path.rsplit("/")[-1].split(".")[0]
suffix = path.rsplit("/")[-1].split(".")[1]
imr = Image.open(path)
type = "colour" if imr.mode == "RGB" else "black"
im = imr.convert("L")
J = np.array(im)
S = get_s(J, gammaS=gammaS)
T = get_t(J, type, gammaI=gammaI)
IPencil = S * T
img = Image.fromarray(IPencil * 255)
# img.show()
save_output(Image.fromarray(S * 255), name + "_s", suffix)
save_output(Image.fromarray(T * 255), name + "_t", suffix)
save_output(img, name + "_pencil", suffix)
return name + suffix
def make_output_dir():
if not os.path.exists(output):
os.mkdir(output)
def save_output(img, name, suffix):
if img.mode != 'RGB':
img = img.convert('RGB')
make_output_dir()
name = os.path.join(output, name)
filename = "{0}.{1}".format(name, suffix)
img.save(filename)
if __name__ == "__main__":
args = sys.argv
length = len(args)
if length > 1:
path = args[1]
pencil_draw(path=path)
else:
pencil_draw()
|
hulearn/datasets.py | ParikhKadam/human-learn | 575 | 11132419 | <reponame>ParikhKadam/human-learn
import os
from pkg_resources import resource_filename
import pandas as pd
def load_titanic(return_X_y: bool = False, as_frame: bool = False):
"""
Loads in a subset of the titanic dataset. You can find the full dataset [here](https://www.kaggle.com/c/titanic/data).
Arguments:
return_X_y: return a tuple of (`X`, `y`) for convenience
as_frame: return all the data as a pandas dataframe
Usage:
```python
from hulearn.datasets import load_titanic
df = load_titanic(as_frame=True)
X, y = load_titanic(return_X_y=True)
```
"""
filepath = resource_filename("hulearn", os.path.join("data", "titanic.zip"))
df = pd.read_csv(filepath)
if as_frame:
return df
X, y = (
df[["pclass", "name", "sex", "age", "fare", "sibsp", "parch"]].values,
df["survived"].values,
)
if return_X_y:
return X, y
return {"data": X, "target": y}
def load_fish(return_X_y: bool = False, as_frame: bool = False):
"""
Loads in a subset of the Fish market dataset. You can find the full dataset [here](https://www.kaggle.com/aungpyaeap/fish-market).
Arguments:
return_X_y: return a tuple of (`X`, `y`) for convenience
as_frame: return all the data as a pandas dataframe
Usage:
```python
from hulearn.datasets import load_fish
df = load_fish(as_frame=True)
X, y = load_fish(return_X_y=True)
```
"""
filepath = resource_filename("hulearn", os.path.join("data", "fish.zip"))
df = pd.read_csv(filepath)
if as_frame:
return df
X, y = (
df[["Species", "Length1", "Length2", "Length3", "Height", "Width"]].values,
df["Weight"].values,
)
if return_X_y:
return X, y
return {"data": X, "target": y}
|
tridet/utils/geometry.py | flipson/dd3d | 227 | 11132421 | <reponame>flipson/dd3d
# Copyright 2021 Toyota Research Institute. All rights reserved.
import logging
import cv2
import numpy as np
import torch
from pytorch3d.transforms.rotation_conversions import matrix_to_quaternion, quaternion_to_matrix
LOG = logging.getLogger(__name__)
PI = 3.14159265358979323846
EPS = 1e-7
def allocentric_to_egocentric(quat, proj_ctr, inv_intrinsics):
"""
Parameters
----------
quat: Tensor
(N, 4). Batch of (allocentric) quaternions.
proj_ctr: Tensor
(N, 2). Projected centers. xy coordninates.
inv_intrinsics: [type]
(N, 3, 3). Inverted intrinsics.
"""
R_obj_to_local = quaternion_to_matrix(quat)
# ray == z-axis in local orientaion
ray = unproject_points2d(proj_ctr, inv_intrinsics)
z = ray / ray.norm(dim=1, keepdim=True)
# gram-schmit process: local_y = global_y - global_y \dot local_z
y = z.new_tensor([[0., 1., 0.]]) - z[:, 1:2] * z
y = y / y.norm(dim=1, keepdim=True)
x = torch.cross(y, z, dim=1)
# local -> global
R_local_to_global = torch.stack([x, y, z], dim=-1)
# obj -> global
R_obj_to_global = torch.bmm(R_local_to_global, R_obj_to_local)
egocentric_quat = matrix_to_quaternion(R_obj_to_global)
# Make sure it's unit norm.
quat_norm = egocentric_quat.norm(dim=1, keepdim=True)
if not torch.allclose(quat_norm, torch.as_tensor(1.), atol=1e-3):
LOG.warning(
f"Some of the input quaternions are not unit norm: min={quat_norm.min()}, max={quat_norm.max()}; therefore normalizing."
)
egocentric_quat = egocentric_quat / quat_norm.clamp(min=EPS)
return egocentric_quat
def homogenize_points(xy):
"""
Parameters
----------
xy: Tensor
xy coordinates. shape=(N, ..., 2)
E.g., (N, 2) or (N, K, 2) or (N, H, W, 2)
Returns
-------
Tensor:
1. is appended to the last dimension. shape=(N, ..., 3)
E.g, (N, 3) or (N, K, 3) or (N, H, W, 3).
"""
# NOTE: this seems to work for arbitrary number of dimensions of input
pad = torch.nn.ConstantPad1d(padding=(0, 1), value=1.)
return pad(xy)
def project_points3d(Xw, K):
_, C = Xw.shape
assert C == 3
uv, _ = cv2.projectPoints(
Xw, np.zeros((3, 1), dtype=np.float32), np.zeros(3, dtype=np.float32), K, np.zeros(5, dtype=np.float32)
)
return uv.reshape(-1, 2)
def unproject_points2d(points2d, inv_K, scale=1.0):
"""
Parameters
----------
points2d: Tensor
xy coordinates. shape=(N, ..., 2)
E.g., (N, 2) or (N, K, 2) or (N, H, W, 2)
inv_K: Tensor
Inverted intrinsics; shape=(N, 3, 3)
scale: float, default: 1.0
Scaling factor.
Returns
-------
Tensor:
Unprojected 3D point. shape=(N, ..., 3)
E.g., (N, 3) or (N, K, 3) or (N, H, W, 3)
"""
points2d = homogenize_points(points2d)
siz = points2d.size()
points2d = points2d.view(-1, 3).unsqueeze(-1) # (N, 3, 1)
unprojected = torch.matmul(inv_K, points2d) # (N, 3, 3) x (N, 3, 1) -> (N, 3, 1)
unprojected = unprojected.view(siz)
return unprojected * scale
|
rally/task/processing/utils.py | lolwww/rally | 263 | 11132424 | <gh_stars>100-1000
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import math
class GraphZipper(object):
def __init__(self, base_size, zipped_size=1000):
"""Init graph zipper.
:param base_size: Amount of points in raw graph
:param zip_size: Amount of points that should be in zipped graph
"""
self.base_size = base_size
self.zipped_size = zipped_size
if self.base_size >= self.zipped_size:
self.compression_ratio = self.base_size / float(self.zipped_size)
else:
self.compression_ratio = 1
self.point_order = 0
self.cached_ratios_sum = 0
self.ratio_value_points = []
self.zipped_graph = []
def _get_zipped_point(self):
if self.point_order - self.compression_ratio <= 1:
order = 1
elif self.point_order == self.base_size:
order = self.base_size
else:
order = self.point_order - int(self.compression_ratio / 2.0)
value = (
sum(p[0] * p[1] for p in self.ratio_value_points)
/ self.compression_ratio
)
return [order, value]
def add_point(self, value):
self.point_order += 1
if self.point_order > self.base_size:
raise RuntimeError("GraphZipper is already full. "
"You can't add more points.")
if not isinstance(value, (int, float)):
value = 0
if self.compression_ratio <= 1: # We don't need to compress
self.zipped_graph.append([self.point_order, value])
elif self.cached_ratios_sum + 1 < self.compression_ratio:
self.cached_ratios_sum += 1
self.ratio_value_points.append([1, value])
else:
rest = self.compression_ratio - self.cached_ratios_sum
self.ratio_value_points.append([rest, value])
self.zipped_graph.append(self._get_zipped_point())
self.ratio_value_points = [[1 - rest, value]]
self.cached_ratios_sum = self.ratio_value_points[0][0]
def get_zipped_graph(self):
return self.zipped_graph
def percentile(points, percent, ignore_sorting=False):
if not points:
return None
if not ignore_sorting:
points.sort()
k = (len(points) - 1) * percent
f = math.floor(k)
c = math.ceil(k)
if f == c:
return points[int(k)]
d0 = points[int(f)] * (c - k)
d1 = points[int(c)] * (k - f)
return d0 + d1
|
external/model-preparation-algorithm/mpa_tasks/extensions/datasets/mpa_cls_dataset.py | opencv/openvino_training_extensions | 775 | 11132453 | # Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import torch
import numpy as np
from mmcv.utils.registry import build_from_cfg
from mmcls.datasets.builder import DATASETS, PIPELINES
from mmcls.datasets.pipelines import Compose
from mmcls.datasets.base_dataset import BaseDataset
from mpa.utils.logger import get_logger
logger = get_logger()
@DATASETS.register_module()
class MPAClsDataset(BaseDataset):
def __init__(self, old_new_indices=None, ote_dataset=None, labels=None, **kwargs):
self.ote_dataset = ote_dataset
self.labels = labels
self.CLASSES = list(label.name for label in labels)
self.gt_labels = []
pipeline = kwargs['pipeline']
self.img_indices = dict(old=[], new=[])
self.num_classes = len(self.CLASSES)
if old_new_indices is not None:
self.img_indices['old'] = old_new_indices['old']
self.img_indices['new'] = old_new_indices['new']
if isinstance(pipeline, dict):
self.pipeline = {}
for k, v in pipeline.items():
_pipeline = [dict(type='LoadImageFromOTEDataset'), *v]
_pipeline = [build_from_cfg(p, PIPELINES) for p in _pipeline]
self.pipeline[k] = Compose(_pipeline)
self.num_pipes = len(pipeline)
elif isinstance(pipeline, list):
self.num_pipes = 1
_pipeline = [dict(type='LoadImageFromOTEDataset'), *pipeline]
self.pipeline = Compose([build_from_cfg(p, PIPELINES) for p in _pipeline])
self.load_annotations()
def load_annotations(self):
for dataset_item in self.ote_dataset:
if dataset_item.get_annotations() == []:
label = None
else:
label = int(dataset_item.get_annotations()[0].get_labels()[0].id_)
self.gt_labels.append(label)
self.gt_labels = np.array(self.gt_labels)
def __getitem__(self, index):
dataset_item = self.ote_dataset[index]
if self.pipeline is None:
return dataset_item
results = {}
results['index'] = index
results['dataset_item'] = dataset_item
results['height'], results['width'], _ = dataset_item.numpy.shape
results['gt_label'] = None if self.gt_labels[index] is None else torch.tensor(self.gt_labels[index])
results = self.pipeline(results)
return results
def get_gt_labels(self):
"""Get all ground-truth labels (categories).
Returns:
list[int]: categories for all images.
"""
return self.gt_labels
def __len__(self):
return len(self.ote_dataset)
def evaluate(self,
results,
metric='accuracy',
metric_options=None,
logger=None):
"""Evaluate the dataset with new metric 'class_accuracy'
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
Default value is `accuracy`.
'accuracy', 'precision', 'recall', 'f1_score', 'support', 'class_accuracy'
metric_options (dict, optional): Options for calculating metrics.
Allowed keys are 'topk', 'thrs' and 'average_mode'.
Defaults to None.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Defaults to None.
Returns:
dict: evaluation results
"""
if metric_options is None:
metric_options = {'topk': (1, 5) if self.num_classes >= 5 else (1, )}
if isinstance(metric, str):
metrics = [metric]
else:
metrics = metric
if 'class_accuracy' in metrics:
metrics.remove('class_accuracy')
self.class_acc = True
eval_results = super().evaluate(results, metrics, metric_options, logger)
# Add Evaluation Accuracy score per Class
if self.class_acc:
results = np.vstack(results)
gt_labels = self.get_gt_labels()
accuracies = self.class_accuracy(results, gt_labels)
eval_results.update({f'{c} accuracy': a for c, a in zip(self.CLASSES, accuracies)})
eval_results.update({'mean accuracy': np.mean(accuracies)})
return eval_results
def class_accuracy(self, results, gt_labels):
accracies = []
pred_label = results.argsort(axis=1)[:, -1:][:, ::-1]
for i in range(self.num_classes):
cls_pred = pred_label == i
cls_pred = cls_pred[gt_labels == i]
cls_acc = np.sum(cls_pred) / len(cls_pred)
accracies.append(cls_acc)
return accracies
|
modules/gif/config.py | ppiecuch/gd_goost | 323 | 11132458 | #!/usr/bin/env python3
def can_build(env, platform):
# This module is part of the Goost project, but can be built independently.
# Refer to https://github.com/goostengine/goost for more information.
if "goost_image_enabled" in env:
return env["goost_image_enabled"]
return True
def configure(env):
pass
def get_doc_path():
return "doc_classes"
def get_doc_classes():
return ["ImageFrames"]
|
tests/integration/platform/__init__.py | AnantTiwari-Naman/pyglet | 1,160 | 11132470 | <filename>tests/integration/platform/__init__.py
"""
Platform integration tests. These tests are specific to certain platforms.
"""
|
packages/pyright-internal/src/tests/samples/assignmentExpr1.py | Jasha10/pyright | 3,934 | 11132480 | # This sample tests the Python 3.8 assignment expressions.
# pyright: reportUnusedExpression=false
def func1():
b = 'a'
d = 'b'
a = (b := 3)
# This should generate an error because the
# item to the left of an assignment expression
# must be a name.
a + 3 := 3
# This should generate an error because parens
# are required in this case.
c = d := 3
# This should generate an error because parens are required in this case.
val if val := 1 + 2 else None
val2 if (val2 := 1 + 2) else None
|
tests/core/factory_test.py | ndennler/pyribs | 108 | 11132490 | """Tests for ribs.factory."""
import numpy as np
import pytest
import toml
import ribs.factory
from ribs.archives import GridArchive
from ribs.emitters import GaussianEmitter
from ribs.optimizers import Optimizer
@pytest.mark.parametrize(
"registration_func",
[
ribs.factory.register_archive,
ribs.factory.register_emitter,
ribs.factory.register_optimizer,
],
ids=[
"archive",
"emitter",
"optimizer",
],
)
def test_registering_again_fails(registration_func):
class NewClass:
"""Arbitrary class for registration."""
with pytest.raises(ribs.factory.RegistrationError):
registration_func("NewClass", NewClass)
# The second registration should fail.
registration_func("NewClass", NewClass)
@pytest.mark.parametrize("use_toml", [False, True], ids=["dict", "toml"])
def test_from_config_with_valid_input(use_toml, tmp_path):
seed = 42
batch_size = 4
archive = GridArchive([64, 64], [(-1, 1), (-1, 1)], seed=seed)
emitters = [
GaussianEmitter(archive, [0.0, 0.0],
0.1,
batch_size=batch_size,
seed=seed)
]
optimizer = Optimizer(archive, emitters)
config_dict = {
"archive": {
"type": "GridArchive",
"dims": [64, 64],
"ranges": [(-1, 1), (-1, 1)],
"seed": seed,
},
"emitters": [{
"type": "GaussianEmitter",
"x0": [0.0, 0.0],
"sigma0": 0.1,
"batch_size": batch_size,
"seed": seed,
}],
"optimizer": {
"type": "Optimizer",
},
}
if use_toml:
config_path = tmp_path / "config.toml"
with config_path.open("w") as file:
toml.dump(config_dict, file)
created_optimizer = ribs.factory.from_config(config_path)
else:
created_optimizer = ribs.factory.from_config(config_dict)
# Check types.
assert isinstance(created_optimizer, Optimizer)
assert isinstance(created_optimizer.archive, GridArchive)
assert len(created_optimizer.emitters) == 1
assert isinstance(created_optimizer.emitters[0], GaussianEmitter)
# Check results from ask() and tell() -- since seeds are the same, all
# results should be the same.
optimizer_sols = optimizer.ask()
created_optimizer_sols = created_optimizer.ask()
assert len(optimizer_sols) == batch_size
assert (optimizer_sols == created_optimizer_sols).all()
objective_values = [0.0] * batch_size
behavior_values = np.array([[1, 1], [-1, 1], [-1, -1], [1, -1]])
optimizer.tell(objective_values, behavior_values)
created_optimizer.tell(objective_values, behavior_values)
assert (optimizer.archive.as_pandas() ==
created_optimizer.archive.as_pandas()).all(None)
@pytest.mark.parametrize("entity_type", ["archive", "emitter", "optimizer"])
def test_from_config_fails_on_unknown_entity(entity_type):
config_dict = {
"archive": {
"type": "GridArchive",
"dims": [64, 64],
"ranges": [(-1, 1), (-1, 1)],
"seed": 42,
},
"emitters": [{
"type": "GaussianEmitter",
"x0": [0.0, 0.0],
"sigma0": 0.1,
"batch_size": 32,
"seed": 42,
}],
"optimizer": {
"type": "Optimizer",
},
}
if entity_type == "archive":
config_dict["archive"]["type"] = "NonexistentArchive"
elif entity_type == "emitter":
config_dict["emitters"][0]["type"] = "NonexistentEmitter"
elif entity_type == "optimizer":
config_dict["optimizer"]["type"] = "NonexistentOptimizer"
with pytest.raises(KeyError):
ribs.factory.from_config(config_dict)
|
colossalai/gemini/memory_tracer/memstats_collector.py | RichardoLuo/ColossalAI | 1,630 | 11132491 | from colossalai.gemini.memory_tracer import SyncCudaMemoryMonitor
from colossalai.utils.memory import colo_device_memory_used
from colossalai.gemini.stateful_tensor import StatefulTensor
import torch
import time
from typing import List
class MemStatsCollector:
"""
A Memory statistic collector.
It works in two phases.
Phase 1. Collection Phase: collect memory usage statistics of CPU and GPU.
The first iteration of DNN training.
Phase 2. Runtime Phase: use the read-only collected stats
The rest iterations of DNN training.
It has a Sampling counter which is reset after DNN training iteration.
"""
def __init__(self) -> None:
self._mem_monitor = SyncCudaMemoryMonitor()
self._model_data_cuda_list = []
self._overall_cuda_list = []
self._model_data_cpu_list = []
self._overall_cpu_list = []
self._non_model_data_cuda_list = []
self._non_model_data_cpu_list = []
self._sampling_time = []
self._start_flag = False
self._step_idx = 0
self._step_total = 0
def overall_mem_stats(self, device_type: str) -> List[int]:
if device_type == 'cuda':
return self._overall_cuda_list
elif device_type == 'cpu':
return self._overall_cpu_list
else:
raise TypeError
def model_data_list(self, device_type: str) -> List[int]:
if device_type == 'cuda':
return self._model_data_cuda_list
elif device_type == 'cpu':
return self._model_data_cpu_list
else:
raise TypeError
def non_model_data_list(self, device_type: str) -> List[int]:
if device_type == 'cuda':
return self._non_model_data_cuda_list
elif device_type == 'cpu':
return self._non_model_data_cpu_list
else:
raise TypeError
def next_period_non_model_data_usage(self, device_type: str) -> int:
"""Get max non model data memory usage of current sampling period
Args:
device_type (str): device type, can be 'cpu' or 'cuda'.
Returns:
int: max non model data memory usage of current sampling period
"""
assert not self._start_flag, 'Cannot get mem stats info during collection phase.'
assert self._step_total > 0, 'Cannot get mem stats info before collection phase.'
next_non_model_data = self.non_model_data_list(device_type)[self._step_idx]
self._step_idx = (self._step_idx + 1) % self._step_total
return next_non_model_data
@property
def sampling_time(self):
return [t - self._sampling_time[0] for t in self._sampling_time]
def start_collection(self):
self._start_flag = True
self._mem_monitor.start()
def finish_collection(self):
self.sample_overall_data()
self._step_total = len(self._sampling_time)
self._start_flag = False
self._mem_monitor.finish()
def sample_model_data(self) -> None:
"""Sampling model data statistics.
"""
if self._start_flag:
cuda_mem = StatefulTensor.GST_MGR.total_mem['cuda']
cpu_mem = StatefulTensor.GST_MGR.total_mem['cpu']
self._model_data_cuda_list.append(cuda_mem)
self._model_data_cpu_list.append(cpu_mem)
def sample_overall_data(self) -> None:
"""Sampling non model data statistics.
"""
if self._start_flag:
# overall data recording is after model data recording
if len(self._model_data_cuda_list) == 0:
return
self._overall_cuda_list.append(self._mem_monitor.finish())
self._overall_cpu_list.append(colo_device_memory_used(torch.device('cpu')))
assert len(self._model_data_cuda_list) == len(self._overall_cuda_list)
self._non_model_data_cuda_list.append(self._overall_cuda_list[-1] - self._model_data_cuda_list[-1])
self._non_model_data_cpu_list.append(self._overall_cpu_list[-1] - self._model_data_cpu_list[-1])
self._sampling_time.append(time.time())
self._mem_monitor.start()
def clear(self) -> None:
self._model_data_cuda_list = []
self._overall_cuda_list = []
self._model_data_cpu_list = []
self._overall_cpu_list = []
self._non_model_data_cpu_list = []
self._non_model_data_cuda_list = []
self._start_flag = False
self._step_idx = 0
self._step_total = 0
|
tools.py | lonePatient/lookahead_pytorch | 171 | 11132508 | import numpy as np
from pathlib import Path
import json
import random
import torch
import os
def save_json(data, file_path):
'''
save json
:param data:
:param json_path:
:param file_name:
:return:
'''
if not isinstance(file_path, Path):
file_path = Path(file_path)
# if isinstance(data,dict):
# data = json.dumps(data)
with open(str(file_path), 'w') as f:
json.dump(data, f)
def load_json(file_path):
'''
load json
:param json_path:
:param file_name:
:return:
'''
if not isinstance(file_path, Path):
file_path = Path(file_path)
with open(str(file_path), 'r') as f:
data = json.load(f)
return data
class AverageMeter(object):
'''
# computes and stores the average and current value
# Example:
# >>> loss = AverageMeter()
# >>> for step,batch in enumerate(train_data):
# >>> pred = self.model(batch)
# >>> raw_loss = self.metrics(pred,target)
# >>> loss.update(raw_loss.item(),n = 1)
# >>> cur_loss = loss.avg
# '''
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def seed_everything(seed=1029):
'''
:param seed:
:param device:
:return:
'''
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# some cudnn methods can be random even after fixing the seed
# unless you tell it to be deterministic
torch.backends.cudnn.deterministic = True |
scripts/bigrams.py | clim140/im2recipe-Pytorch | 217 | 11132526 | import json
import re
import utils
import copy
import pickle
from proc import *
from params import get_parser
parser = get_parser()
params = parser.parse_args()
create = params.create_bigrams # true to compute and store bigrams to disk
# false to go through top N bigrams and create annotations
print('Loading dataset.')
DATASET = params.dataset
dataset = utils.Layer.merge([utils.Layer.L1, utils.Layer.L2, utils.Layer.INGRS],DATASET)
if create:
print("Creating bigrams...")
titles = []
for i in range(len(dataset)):
title = dataset[i]['title']
if dataset[i]['partition'] == 'train':
titles.append(title)
fileinst = open('../data/titles' + params.suffix + '.txt','w')
for t in titles:
fileinst.write( t + " ");
fileinst.close()
import nltk
from nltk.corpus import stopwords
f = open('../data/titles' +params.suffix+'.txt')
raw = f.read()
tokens = nltk.word_tokenize(raw)
tokens = [i.lower() for i in tokens]
tokens = [i for i in tokens if i not in stopwords.words('english')]
#Create your bigrams
bgs = nltk.bigrams(tokens)
#compute frequency distribution for all the bigrams in the text
fdist = nltk.FreqDist(bgs)
pickle.dump(fdist,open('../data/bigrams'+params.suffix+'.pkl','wb'))
else:
N = 2000
MAX_CLASSES = 1000
MIN_SAMPLES = params.tsamples
n_class = 1
ind2class = {}
class_dict = {}
fbd_chars = ["," , "&" , "(" , ")" , "'", "'s", "!","?","%","*",".",
"free","slow","low","old","easy","super","best","-","fresh",
"ever","fast","quick","fat","ww","n'","'n","n","make","con",
"e","minute","minutes","portabella","de","of","chef","lo",
"rachael","poor","man","ii","i","year","new","style"]
print('Loading ingr vocab.')
with open(params.vocab) as f_vocab:
ingr_vocab = {w.rstrip(): i+2 for i, w in enumerate(f_vocab)} # +1 for lua
ingr_vocab['</i>'] = 1
# store number of ingredients (compute only once)
ningrs_list = []
for i,entry in enumerate(dataset):
ingr_detections = detect_ingrs(entry, ingr_vocab)
ningrs = len(ingr_detections)
ningrs_list.append(ningrs)
# load bigrams
fdist = pickle.load(open('../data/bigrams'+params.suffix+'.pkl','rb'))
Nmost = fdist.most_common(N)
# check bigrams
queries = []
for oc in Nmost:
counts = {'train': 0, 'val': 0,'test':0}
if oc[0][0] in fbd_chars or oc[0][1] in fbd_chars:
continue
query = oc[0][0] + ' ' + oc[0][1]
queries.append(query)
matching_ids = []
for i,entry in enumerate(dataset):
ninstrs = len(entry['instructions'])
imgs = entry.get('images')
ningrs =ningrs_list[i]
title = entry['title'].lower()
id = entry['id']
if query in title and ninstrs < params.maxlen and imgs and ningrs<params.maxlen and ningrs is not 0: # if match, add class to id
# we only add if previous class was background
# or if there is no class for the id
if id in class_dict:
if class_dict[id] == 0:
class_dict[id] = n_class
counts[dataset[i]['partition']] +=1
matching_ids.append(id)
else:
class_dict[id] = n_class
counts[dataset[i]['partition']] +=1
matching_ids.append(id)
else: # if there's no match
if not id in class_dict: # add background class unless not empty
class_dict[id] = 0 # background class
if counts['train'] > MIN_SAMPLES and counts['val'] > 0 and counts['test'] > 0:
ind2class[n_class] = query
print(n_class, query, counts)
n_class+=1
else:
for id in matching_ids: # reset classes to background
class_dict[id] = 0
if n_class > MAX_CLASSES:
break
# get food101 categories (if not present)
food101 = []
with open(params.f101_cats,'r') as f_classes:
for l in f_classes:
cls = l.lower().rstrip().replace('_', ' ')
if cls not in queries:
food101.append(cls)
for query in food101:
counts = {'train': 0, 'val': 0,'test':0}
matching_ids = []
for i,entry in enumerate(dataset):
ninstrs = len(entry['instructions'])
imgs = entry.get('images')
ningrs =ningrs_list[i]
title = entry['title'].lower()
id = entry['id']
if query in title and ninstrs < params.maxlen and imgs and ningrs<params.maxlen and ningrs is not 0: # if match, add class to id
# we only add if previous class was background
# or if there is no class for the id
if id in class_dict:
if class_dict[id] == 0:
class_dict[id] = n_class
counts[dataset[i]['partition']] +=1
matching_ids.append(id)
else:
class_dict[id] = n_class
counts[dataset[i]['partition']] +=1
matching_ids.append(id)
else: # if there's no match
if not id in class_dict: # add background class unless not empty
class_dict[id] = 0 # background class
if counts['train'] > MIN_SAMPLES and counts['val'] > 0 and counts['test'] > 0:
ind2class[n_class] = query
print(n_class, query, counts)
n_class+=1
else:
for id in matching_ids: # reset classes to background
class_dict[id] = 0
ind2class[0] = 'background'
print(len(ind2class))
with open('../data/classes'+params.suffix+'.pkl','wb') as f:
pickle.dump(class_dict,f)
pickle.dump(ind2class,f)
|
mayan/apps/sources/source_backends/mixins.py | bonitobonita24/Mayan-EDMS | 343 | 11132527 | <reponame>bonitobonita24/Mayan-EDMS
import json
import logging
from django import forms
from django.apps import apps
from django.utils.encoding import force_text
from django.utils.translation import ugettext, ugettext_lazy as _
from mayan.apps.documents.models.document_file_models import DocumentFile
from mayan.apps.documents.models.document_type_models import DocumentType
from mayan.apps.documents.tasks import task_document_file_upload
from ..classes import DocumentCreateWizardStep
from ..tasks import task_process_document_upload
from .literals import (
DEFAULT_PERIOD_INTERVAL, SOURCE_INTERACTIVE_UNCOMPRESS_CHOICES,
SOURCE_UNCOMPRESS_CHOICE_ALWAYS, SOURCE_UNCOMPRESS_CHOICE_ASK
)
logger = logging.getLogger(name=__name__)
class SourceBaseMixin:
def callback(self, document_file, **kwargs):
return
def clean(self):
return
def get_callback_kwargs(self):
return {}
def get_document(self):
raise NotImplementedError
def get_document_description(self):
return None
def get_document_file_action(self):
return None
def get_document_file_comment(self):
return None
def get_document_label(self):
return None
def get_document_language(self):
return None
def get_document_type(self):
raise NotImplementedError
def get_task_extra_kwargs(self):
return {}
def get_user(self):
return None
def process_document_file(self, **kwargs):
self.process_kwargs = kwargs
document = self.get_document()
user = self.get_user()
if user:
user_id = user.pk
else:
user_id = None
for self.shared_uploaded_file in self.get_shared_uploaded_files() or ():
# Call the hooks here too as in the model for early detection and
# exception raise when using the views.
DocumentFile.execute_pre_create_hooks(
kwargs={
'document': document,
'file_object': self.shared_uploaded_file,
'user': user
}
)
kwargs = {
'action': self.get_document_file_action(),
'comment': self.get_document_file_comment(),
'document_id': document.pk,
'shared_uploaded_file_id': self.shared_uploaded_file.pk,
'user_id': user_id
}
kwargs.update(self.get_task_extra_kwargs())
task_document_file_upload.apply_async(kwargs=kwargs)
def process_documents(self, **kwargs):
self.process_kwargs = kwargs
document_type = self.get_document_type()
user = self.get_user()
if user:
user_id = user.pk
else:
user_id = None
for self.shared_uploaded_file in self.get_shared_uploaded_files() or ():
kwargs = {
'callback_kwargs': self.get_callback_kwargs(),
'description': self.get_document_description(),
'document_type_id': document_type.pk,
'label': self.get_document_label(),
'language': self.get_document_language(),
'shared_uploaded_file_id': self.shared_uploaded_file.pk,
'source_id': self.model_instance_id,
'user_id': user_id
}
kwargs.update(self.get_task_extra_kwargs())
task_process_document_upload.apply_async(kwargs=kwargs)
class SourceBackendCompressedMixin:
uncompress_choices = SOURCE_INTERACTIVE_UNCOMPRESS_CHOICES
@classmethod
def get_setup_form_schema(cls):
result = super().get_setup_form_schema()
result['fields'].update(
{
'uncompress': {
'label': _('Uncompress'),
'class': 'django.forms.ChoiceField',
'default': SOURCE_UNCOMPRESS_CHOICE_ASK,
'help_text': _(
'Whether to expand or not compressed archives.'
), 'kwargs': {
'choices': cls.uncompress_choices,
}, 'required': True
}
}
)
result['field_order'] = ('uncompress',) + result['field_order']
result['widgets'].update(
{
'uncompress': {
'class': 'django.forms.widgets.Select', 'kwargs': {
'attrs': {'class': 'select2'},
}
}
}
)
return result
@classmethod
def get_upload_form_class(cls):
class CompressedSourceUploadForm(super().get_upload_form_class()):
expand = forms.BooleanField(
label=_('Expand compressed files'), required=False,
help_text=ugettext(
'Upload a compressed file\'s contained files as '
'individual documents.'
)
)
def __init__(self, *args, **kwargs):
self.field_order = ['expand']
super().__init__(*args, **kwargs)
return CompressedSourceUploadForm
def get_expand(self):
if self.kwargs['uncompress'] == SOURCE_UNCOMPRESS_CHOICE_ASK:
return self.process_kwargs['forms']['source_form'].cleaned_data.get('expand')
else:
if self.kwargs['uncompress'] == SOURCE_UNCOMPRESS_CHOICE_ALWAYS:
return True
else:
return False
def get_task_extra_kwargs(self):
return {'expand': self.get_expand()}
class SourceBackendInteractiveMixin:
is_interactive = True
def callback(self, document_file, **kwargs):
DocumentCreateWizardStep.post_upload_process(
document=document_file.document,
query_string=kwargs.get('query_string', '')
)
def get_callback_kwargs(self):
query_string = ''
query_dict = self.process_kwargs['request'].GET.copy()
query_dict.update(self.process_kwargs['request'].POST)
# Convert into a string. Make sure it is a QueryDict object from a
# request and not just a simple dictionary.
if hasattr(query_dict, 'urlencode'):
query_string = query_dict.urlencode()
return {
'query_string': query_string
}
def get_document(self):
return self.process_kwargs['document']
def get_document_description(self):
return self.process_kwargs['forms']['document_form'].cleaned_data.get('description')
def get_document_file_action(self):
return int(self.process_kwargs['forms']['document_form'].cleaned_data.get('action'))
def get_document_file_comment(self):
return self.process_kwargs['forms']['document_form'].cleaned_data.get('comment')
def get_document_label(self):
return self.process_kwargs['forms']['document_form'].get_final_label(
filename=force_text(self.shared_uploaded_file)
)
def get_document_language(self):
return self.process_kwargs['forms']['document_form'].cleaned_data.get('language')
def get_document_type(self):
return self.process_kwargs['document_type']
def get_user(self):
if not self.process_kwargs['request'].user.is_anonymous:
return self.process_kwargs['request'].user
else:
return None
class SourceBackendPeriodicMixin:
@classmethod
def get_setup_form_schema(cls):
result = super().get_setup_form_schema()
result['fields'].update(
{
'document_type_id': {
'class': 'django.forms.ChoiceField',
'default': '',
'help_text': _(
'Assign a document type to documents uploaded from this '
'source.'
),
'kwargs': {
'choices': [(document_type.id, document_type) for document_type in DocumentType.objects.all()],
},
'label': _('Document type'),
'required': True
},
'interval': {
'class': 'django.forms.IntegerField',
'default': DEFAULT_PERIOD_INTERVAL,
'help_text': _(
'Interval in seconds between checks for new '
'documents.'
),
'kwargs': {
'min_value': 0
},
'label': _('Interval'),
'required': True
},
}
)
result['field_order'] = ('document_type_id', 'interval',) + result['field_order']
result['widgets'].update(
{
'document_type_id': {
'class': 'django.forms.widgets.Select', 'kwargs': {
'attrs': {'class': 'select2'},
}
}
}
)
return result
def create(self):
IntervalSchedule = apps.get_model(
app_label='django_celery_beat', model_name='IntervalSchedule'
)
PeriodicTask = apps.get_model(
app_label='django_celery_beat', model_name='PeriodicTask'
)
# Create a new interval or use an existing one
interval_instance, created = IntervalSchedule.objects.get_or_create(
every=self.kwargs['interval'], period='seconds'
)
PeriodicTask.objects.create(
name=self.get_periodic_task_name(),
interval=interval_instance,
task='mayan.apps.sources.tasks.task_source_process_document',
kwargs=json.dumps(obj={'source_id': self.model_instance_id})
)
def delete(self):
self.delete_periodic_task(pk=self.model_instance_id)
def get_document_type(self):
return DocumentType.objects.get(pk=self.kwargs['document_type_id'])
def delete_periodic_task(self, pk=None):
PeriodicTask = apps.get_model(
app_label='django_celery_beat', model_name='PeriodicTask'
)
try:
periodic_task = PeriodicTask.objects.get(
name=self.get_periodic_task_name(pk=pk)
)
interval_instance = periodic_task.interval
if tuple(interval_instance.periodictask_set.values_list('id', flat=True)) == (periodic_task.pk,):
# Only delete the interval if nobody else is using it.
interval_instance.delete()
else:
periodic_task.delete()
except PeriodicTask.DoesNotExist:
logger.warning(
'Tried to delete non existent periodic task "%s"',
self.get_periodic_task_name(pk=pk)
)
def get_periodic_task_name(self, pk=None):
return 'check_interval_source-{}'.format(pk or self.model_instance_id)
def save(self):
self.delete_periodic_task()
self.create()
|
mmcv/fileio/file_client.py | lyttonhao/mmcv | 549 | 11132541 | import inspect
import warnings
from abc import ABCMeta, abstractmethod
class BaseStorageBackend(metaclass=ABCMeta):
"""Abstract class of storage backends.
All backends need to implement two apis: ``get()`` and ``get_text()``.
``get()`` reads the file as a byte stream and ``get_text()`` reads the file
as texts.
"""
@abstractmethod
def get(self, filepath):
pass
@abstractmethod
def get_text(self, filepath):
pass
class CephBackend(BaseStorageBackend):
"""Ceph storage backend.
Args:
path_mapping (dict|None): path mapping dict from local path to Petrel
path. When ``path_mapping={'src': 'dst'}``, ``src`` in ``filepath``
will be replaced by ``dst``. Default: None.
"""
def __init__(self, path_mapping=None):
try:
import ceph
warnings.warn('Ceph is deprecate in favor of Petrel.')
except ImportError:
raise ImportError('Please install ceph to enable CephBackend.')
self._client = ceph.S3Client()
assert isinstance(path_mapping, dict) or path_mapping is None
self.path_mapping = path_mapping
def get(self, filepath):
filepath = str(filepath)
if self.path_mapping is not None:
for k, v in self.path_mapping.items():
filepath = filepath.replace(k, v)
value = self._client.Get(filepath)
value_buf = memoryview(value)
return value_buf
def get_text(self, filepath):
raise NotImplementedError
class PetrelBackend(BaseStorageBackend):
"""Petrel storage backend (for internal use).
Args:
path_mapping (dict|None): path mapping dict from local path to Petrel
path. When `path_mapping={'src': 'dst'}`, `src` in `filepath` will
be replaced by `dst`. Default: None.
enable_mc (bool): whether to enable memcached support. Default: True.
"""
def __init__(self, path_mapping=None, enable_mc=True):
try:
from petrel_client import client
except ImportError:
raise ImportError('Please install petrel_client to enable '
'PetrelBackend.')
self._client = client.Client(enable_mc=enable_mc)
assert isinstance(path_mapping, dict) or path_mapping is None
self.path_mapping = path_mapping
def get(self, filepath):
filepath = str(filepath)
if self.path_mapping is not None:
for k, v in self.path_mapping.items():
filepath = filepath.replace(k, v)
value = self._client.Get(filepath)
value_buf = memoryview(value)
return value_buf
def get_text(self, filepath):
raise NotImplementedError
class MemcachedBackend(BaseStorageBackend):
"""Memcached storage backend.
Attributes:
server_list_cfg (str): Config file for memcached server list.
client_cfg (str): Config file for memcached client.
sys_path (str | None): Additional path to be appended to `sys.path`.
Default: None.
"""
def __init__(self, server_list_cfg, client_cfg, sys_path=None):
if sys_path is not None:
import sys
sys.path.append(sys_path)
try:
import mc
except ImportError:
raise ImportError(
'Please install memcached to enable MemcachedBackend.')
self.server_list_cfg = server_list_cfg
self.client_cfg = client_cfg
self._client = mc.MemcachedClient.GetInstance(self.server_list_cfg,
self.client_cfg)
# mc.pyvector servers as a point which points to a memory cache
self._mc_buffer = mc.pyvector()
def get(self, filepath):
filepath = str(filepath)
import mc
self._client.Get(filepath, self._mc_buffer)
value_buf = mc.ConvertBuffer(self._mc_buffer)
return value_buf
def get_text(self, filepath):
raise NotImplementedError
class LmdbBackend(BaseStorageBackend):
"""Lmdb storage backend.
Args:
db_path (str): Lmdb database path.
readonly (bool, optional): Lmdb environment parameter. If True,
disallow any write operations. Default: True.
lock (bool, optional): Lmdb environment parameter. If False, when
concurrent access occurs, do not lock the database. Default: False.
readahead (bool, optional): Lmdb environment parameter. If False,
disable the OS filesystem readahead mechanism, which may improve
random read performance when a database is larger than RAM.
Default: False.
Attributes:
db_path (str): Lmdb database path.
"""
def __init__(self,
db_path,
readonly=True,
lock=False,
readahead=False,
**kwargs):
try:
import lmdb
except ImportError:
raise ImportError('Please install lmdb to enable LmdbBackend.')
self.db_path = str(db_path)
self._client = lmdb.open(
self.db_path,
readonly=readonly,
lock=lock,
readahead=readahead,
**kwargs)
def get(self, filepath):
"""Get values according to the filepath.
Args:
filepath (str | obj:`Path`): Here, filepath is the lmdb key.
"""
filepath = str(filepath)
with self._client.begin(write=False) as txn:
value_buf = txn.get(filepath.encode('ascii'))
return value_buf
def get_text(self, filepath):
raise NotImplementedError
class HardDiskBackend(BaseStorageBackend):
"""Raw hard disks storage backend."""
def get(self, filepath):
filepath = str(filepath)
with open(filepath, 'rb') as f:
value_buf = f.read()
return value_buf
def get_text(self, filepath):
filepath = str(filepath)
with open(filepath, 'r') as f:
value_buf = f.read()
return value_buf
class FileClient:
"""A general file client to access files in different backend.
The client loads a file or text in a specified backend from its path
and return it as a binary file. it can also register other backend
accessor with a given name and backend class.
Attributes:
backend (str): The storage backend type. Options are "disk", "ceph",
"memcached" and "lmdb".
client (:obj:`BaseStorageBackend`): The backend object.
"""
_backends = {
'disk': HardDiskBackend,
'ceph': CephBackend,
'memcached': MemcachedBackend,
'lmdb': LmdbBackend,
'petrel': PetrelBackend,
}
def __init__(self, backend='disk', **kwargs):
if backend not in self._backends:
raise ValueError(
f'Backend {backend} is not supported. Currently supported ones'
f' are {list(self._backends.keys())}')
self.backend = backend
self.client = self._backends[backend](**kwargs)
@classmethod
def _register_backend(cls, name, backend, force=False):
if not isinstance(name, str):
raise TypeError('the backend name should be a string, '
f'but got {type(name)}')
if not inspect.isclass(backend):
raise TypeError(
f'backend should be a class but got {type(backend)}')
if not issubclass(backend, BaseStorageBackend):
raise TypeError(
f'backend {backend} is not a subclass of BaseStorageBackend')
if not force and name in cls._backends:
raise KeyError(
f'{name} is already registered as a storage backend, '
'add "force=True" if you want to override it')
cls._backends[name] = backend
@classmethod
def register_backend(cls, name, backend=None, force=False):
"""Register a backend to FileClient.
This method can be used as a normal class method or a decorator.
.. code-block:: python
class NewBackend(BaseStorageBackend):
def get(self, filepath):
return filepath
def get_text(self, filepath):
return filepath
FileClient.register_backend('new', NewBackend)
or
.. code-block:: python
@FileClient.register_backend('new')
class NewBackend(BaseStorageBackend):
def get(self, filepath):
return filepath
def get_text(self, filepath):
return filepath
Args:
name (str): The name of the registered backend.
backend (class, optional): The backend class to be registered,
which must be a subclass of :class:`BaseStorageBackend`.
When this method is used as a decorator, backend is None.
Defaults to None.
force (bool, optional): Whether to override the backend if the name
has already been registered. Defaults to False.
"""
if backend is not None:
cls._register_backend(name, backend, force=force)
return
def _register(backend_cls):
cls._register_backend(name, backend_cls, force=force)
return backend_cls
return _register
def get(self, filepath):
return self.client.get(filepath)
def get_text(self, filepath):
return self.client.get_text(filepath)
|
trankit/adapter_transformers/configuration_xlnet.py | jsteggink/trankit | 613 | 11132543 | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" XLNet configuration """
import logging
from .configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"xlnet-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-base-cased-config.json",
"xlnet-large-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/xlnet-large-cased-config.json",
}
class XLNetConfig(PretrainedConfig):
"""
This is the configuration class to store the configuration of a :class:`~transformers.XLNetModel`.
It is used to instantiate an XLNet model according to the specified arguments, defining the model
architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of
the `xlnet-large-cased <https://huggingface.co/xlnet-large-cased>`__ architecture.
Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used
to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`
for more information.
Args:
vocab_size (:obj:`int`, optional, defaults to 32000):
Vocabulary size of the XLNet model. Defines the different tokens that
can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.XLNetModel`.
d_model (:obj:`int`, optional, defaults to 1024):
Dimensionality of the encoder layers and the pooler layer.
n_layer (:obj:`int`, optional, defaults to 24):
Number of hidden layers in the Transformer encoder.
n_head (:obj:`int`, optional, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
d_inner (:obj:`int`, optional, defaults to 4096):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
ff_activation (:obj:`string`, optional, defaults to "gelu"):
The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
untie_r (:obj:`boolean`, optional, defaults to :obj:`True`):
Untie relative position biases
attn_type (:obj:`string`, optional, defaults to "bi"):
The attention type used by the model. Set 'bi' for XLNet, 'uni' for Transformer-XL.
initializer_range (:obj:`float`, optional, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, optional, defaults to 1e-12):
The epsilon used by the layer normalization layers.
dropout (:obj:`float`, optional, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
mem_len (:obj:`int` or :obj:`None`, optional, defaults to :obj:`None`):
The number of tokens to cache. The key/value pairs that have already been pre-computed
in a previous forward pass won't be re-computed. See the
`quickstart <https://huggingface.co/transformers/quickstart.html#using-the-past>`__
for more information.
reuse_len (:obj:`int` or :obj:`None`, optional, defaults to :obj:`None`):
The number of tokens in the current batch to be cached and reused in the future.
bi_data (:obj:`boolean`, optional, defaults to :obj:`False`):
Whether to use bidirectional input pipeline. Usually set to `True` during
pretraining and `False` during finetuning.
clamp_len (:obj:`int`, optional, defaults to -1):
Clamp all relative distances larger than clamp_len.
Setting this attribute to -1 means no clamping.
same_length (:obj:`boolean`, optional, defaults to :obj:`False`):
Whether to use the same attention length for each token.
summary_type (:obj:`string`, optional, defaults to "last"):
Argument used when doing sequence summary. Used in for the multiple choice head in
:class:transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.
Is one of the following options:
- 'last' => take the last token hidden state (like XLNet)
- 'first' => take the first token hidden state (like Bert)
- 'mean' => take the mean of all tokens hidden states
- 'cls_index' => supply a Tensor of classification token position (GPT/GPT-2)
- 'attn' => Not implemented now, use multi-head attention
summary_use_proj (:obj:`boolean`, optional, defaults to :obj:`True`):
Argument used when doing sequence summary. Used in for the multiple choice head in
:class:`~transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.
Add a projection after the vector extraction
summary_activation (:obj:`string` or :obj:`None`, optional, defaults to :obj:`None`):
Argument used when doing sequence summary. Used in for the multiple choice head in
:class:`~transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.
'tanh' => add a tanh activation to the output, Other => no activation.
summary_proj_to_labels (:obj:`boolean`, optional, defaults to :obj:`True`):
Argument used when doing sequence summary. Used in for the multiple choice head in
:class:`~transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.
If True, the projection outputs to config.num_labels classes (otherwise to hidden_size). Default: False.
summary_last_dropout (:obj:`float`, optional, defaults to 0.1):
Argument used when doing sequence summary. Used in for the multiple choice head in
:class:`~transformers.XLNetForSequenceClassification` and :class:`~transformers.XLNetForMultipleChoice`.
Add a dropout after the projection and activation
start_n_top (:obj:`int`, optional, defaults to 5):
Used in the SQuAD evaluation script for XLM and XLNet.
end_n_top (:obj:`int`, optional, defaults to 5):
Used in the SQuAD evaluation script for XLM and XLNet.
Example::
from transformers import XLNetConfig, XLNetModel
# Initializing a XLNet configuration
configuration = XLNetConfig()
# Initializing a model from the configuration
model = XLNetModel(configuration)
# Accessing the model configuration
configuration = model.config
"""
model_type = "xlnet"
def __init__(
self,
vocab_size=32000,
d_model=1024,
n_layer=24,
n_head=16,
d_inner=4096,
ff_activation="gelu",
untie_r=True,
attn_type="bi",
initializer_range=0.02,
layer_norm_eps=1e-12,
dropout=0.1,
mem_len=None,
reuse_len=None,
bi_data=False,
clamp_len=-1,
same_length=False,
summary_type="last",
summary_use_proj=True,
summary_activation="tanh",
summary_last_dropout=0.1,
start_n_top=5,
end_n_top=5,
pad_token_id=5,
bos_token_id=1,
eos_token_id=2,
**kwargs
):
"""Constructs XLNetConfig.
"""
super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
self.vocab_size = vocab_size
self.d_model = d_model
self.n_layer = n_layer
self.n_head = n_head
assert d_model % n_head == 0
self.d_head = d_model // n_head
self.ff_activation = ff_activation
self.d_inner = d_inner
self.untie_r = untie_r
self.attn_type = attn_type
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.dropout = dropout
self.mem_len = mem_len
self.reuse_len = reuse_len
self.bi_data = bi_data
self.clamp_len = clamp_len
self.same_length = same_length
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_last_dropout = summary_last_dropout
self.start_n_top = start_n_top
self.end_n_top = end_n_top
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.eos_token_id = eos_token_id
@property
def max_position_embeddings(self):
return -1
@property
def n_token(self): # Backward compatibility
return self.vocab_size
@n_token.setter
def n_token(self, value): # Backward compatibility
self.vocab_size = value
@property
def hidden_size(self):
return self.d_model
@property
def num_attention_heads(self):
return self.n_head
@property
def num_hidden_layers(self):
return self.n_layer
|
var/spack/repos/builtin/packages/cppcheck/package.py | LiamBindle/spack | 2,360 | 11132548 | <gh_stars>1000+
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Cppcheck(MakefilePackage):
"""A tool for static C/C++ code analysis."""
homepage = "http://cppcheck.sourceforge.net/"
url = "https://downloads.sourceforge.net/project/cppcheck/cppcheck/1.78/cppcheck-1.78.tar.bz2"
version('2.1', sha256='ab26eeef039e5b58aac01efb8cb664f2cc16bf9879c61bc93cd00c95be89a5f7')
version('2.0', sha256='5f77d36a37ed9ef58ea8b499e4b1db20468114c9ca12b5fb39b95906cab25a3f')
version('1.90', sha256='43758d56613596c29440e55ea96a5a13e36f81ca377a8939648b5242faf61883')
version('1.89', sha256='5f02389cb24554f5a7ac3d29db8ac19c740f23c92e97eb7fec3881fe86c26f2c')
version('1.88', sha256='bb25441749977713476dc630dfe7617b3d9e95c46fec0edbec4ff8ff6fda38ca')
version('1.87', sha256='e3b0a46747822471df275417d4b74b56ecac88367433e7428f39288a32c581ca')
version('1.81', sha256='bb694f37ae0b5fed48c6cdc2fb5e528daf32cefc64e16b1a520c5411323cf27e')
version('1.78', sha256='e42696f7d6321b98cb479ad9728d051effe543b26aca8102428f60b9850786b1')
version('1.72', sha256='9460b184ff2d8dd15344f3e2f42f634c86e4dd3303e1e9b3f13dc67536aab420')
version('1.68', sha256='add6e5e12b05ca02b356cd0ec7420ae0dcafddeaef183b4dfbdef59c617349b1')
variant('htmlreport', default=False, description="Install cppcheck-htmlreport")
depends_on('py-pygments', when='+htmlreport', type='run')
def build(self, spec, prefix):
make('CFGDIR={0}'.format(prefix.cfg))
def install(self, spec, prefix):
# Manually install the final cppcheck binary
mkdirp(prefix.bin)
install('cppcheck', prefix.bin)
install_tree('cfg', prefix.cfg)
if spec.satisfies('+htmlreport'):
install('htmlreport/cppcheck-htmlreport', prefix.bin)
|
third_party/cppclean/cpp/headers.py | thomwiggers/toggldesktop | 544 | 11132573 | <reponame>thomwiggers/toggldesktop
# Copyright 2007 <NAME>
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Find and print the headers #include'd in a source file."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
from . import utils
__author__ = '<EMAIL> (<NAME>)'
def read_source(relative_filename, include_paths):
source = None
for path in include_paths:
filename = os.path.join(path, relative_filename)
source = utils.read_file(filename, False)
if source is not None:
return source, filename
return None, relative_filename
|
scripts/calc_gt_distribution.py | gist-ailab/bop_toolkit | 201 | 11132576 | <reponame>gist-ailab/bop_toolkit<filename>scripts/calc_gt_distribution.py
# Author: <NAME> (<EMAIL>)
# Center for Machine Perception, Czech Technical University in Prague
"""Calculates distribution of GT poses."""
import math
import numpy as np
import matplotlib.pyplot as plt
from bop_toolkit_lib import config
from bop_toolkit_lib import dataset_params
from bop_toolkit_lib import inout
from bop_toolkit_lib import misc
# PARAMETERS.
################################################################################
p = {
# See dataset_params.py for options.
'dataset': 'lm',
# Dataset split. Options: 'train', 'val', 'test'.
'dataset_split': 'test',
# Dataset split type. None = default. See dataset_params.py for options.
'dataset_split_type': None,
# Folder containing the BOP datasets.
'datasets_path': config.datasets_path,
}
################################################################################
# Load dataset parameters.
dp_split = dataset_params.get_split_params(
p['datasets_path'], p['dataset'], p['dataset_split'], p['dataset_split_type'])
scene_ids = dp_split['scene_ids']
dists = []
azimuths = []
elevs = []
visib_fracts = []
ims_count = 0
for scene_id in scene_ids:
misc.log('Processing - dataset: {} ({}, {}), scene: {}'.format(
p['dataset'], p['dataset_split'], p['dataset_split_type'], scene_id))
# Load GT poses.
scene_gt = inout.load_scene_gt(
dp_split['scene_gt_tpath'].format(scene_id=scene_id))
# Load info about the GT poses.
scene_gt_info = inout.load_json(
dp_split['scene_gt_info_tpath'].format(scene_id=scene_id), keys_to_int=True)
ims_count += len(scene_gt)
for im_id in scene_gt.keys():
for gt_id, im_gt in enumerate(scene_gt[im_id]):
# Object distance.
dist = np.linalg.norm(im_gt['cam_t_m2c'])
dists.append(dist)
# Camera origin in the model coordinate system.
cam_orig_m = -np.linalg.inv(im_gt['cam_R_m2c']).dot(
im_gt['cam_t_m2c'])
# Azimuth from [0, 360].
azimuth = math.atan2(cam_orig_m[1, 0], cam_orig_m[0, 0])
if azimuth < 0:
azimuth += 2.0 * math.pi
azimuths.append((180.0 / math.pi) * azimuth)
# Elevation from [-90, 90].
a = np.linalg.norm(cam_orig_m)
b = np.linalg.norm([cam_orig_m[0, 0], cam_orig_m[1, 0], 0])
elev = math.acos(b / a)
if cam_orig_m[2, 0] < 0:
elev = -elev
elevs.append((180.0 / math.pi) * elev)
# Visibility fraction.
visib_fracts.append(scene_gt_info[im_id][gt_id]['visib_fract'])
# Print stats.
misc.log('Stats of the GT poses in dataset {} {}:'.format(
p['dataset'], p['dataset_split']))
misc.log('Number of images: ' + str(ims_count))
misc.log('Min dist: {}'.format(np.min(dists)))
misc.log('Max dist: {}'.format(np.max(dists)))
misc.log('Mean dist: {}'.format(np.mean(dists)))
misc.log('Min azimuth: {}'.format(np.min(azimuths)))
misc.log('Max azimuth: {}'.format(np.max(azimuths)))
misc.log('Mean azimuth: {}'.format(np.mean(azimuths)))
misc.log('Min elev: {}'.format(np.min(elevs)))
misc.log('Max elev: {}'.format(np.max(elevs)))
misc.log('Mean elev: {}'.format(np.mean(elevs)))
misc.log('Min visib fract: {}'.format(np.min(visib_fracts)))
misc.log('Max visib fract: {}'.format(np.max(visib_fracts)))
misc.log('Mean visib fract: {}'.format(np.mean(visib_fracts)))
# Visualize distributions.
plt.figure()
plt.hist(dists, bins=100)
plt.title('Object distance')
plt.figure()
plt.hist(azimuths, bins=100)
plt.title('Azimuth')
plt.figure()
plt.hist(elevs, bins=100)
plt.title('Elevation')
plt.figure()
plt.hist(visib_fracts, bins=100)
plt.title('Visibility fraction')
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.