max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
unit_test/options_test.py | bbayles/cibuildwheel | 371 | 42115 | import platform as platform_module
import pytest
from cibuildwheel.__main__ import get_build_identifiers
from cibuildwheel.environment import parse_environment
from cibuildwheel.options import Options, _get_pinned_docker_images
from .utils import get_default_command_line_arguments
PYPROJECT_1 = """
[tool.cibuildwheel]
build = ["cp38*", "cp37*"]
environment = {FOO="BAR"}
test-command = "pyproject"
manylinux-x86_64-image = "manylinux1"
environment-pass = ["<PASSWORD>"]
[tool.cibuildwheel.macos]
test-requires = "else"
[[tool.cibuildwheel.overrides]]
select = "cp37*"
test-command = "pyproject-override"
manylinux-x86_64-image = "manylinux2014"
"""
def test_options_1(tmp_path, monkeypatch):
with tmp_path.joinpath("pyproject.toml").open("w") as f:
f.write(PYPROJECT_1)
args = get_default_command_line_arguments()
args.package_dir = str(tmp_path)
monkeypatch.setattr(platform_module, "machine", lambda: "x86_64")
options = Options(platform="linux", command_line_arguments=args)
identifiers = get_build_identifiers(
platform="linux",
build_selector=options.globals.build_selector,
architectures=options.globals.architectures,
)
override_display = """\
test_command: 'pyproject'
cp37-manylinux_x86_64: 'pyproject-override'"""
print(options.summary(identifiers))
assert override_display in options.summary(identifiers)
default_build_options = options.build_options(identifier=None)
assert default_build_options.environment == parse_environment('FOO="BAR"')
all_pinned_docker_images = _get_pinned_docker_images()
pinned_x86_64_docker_image = all_pinned_docker_images["x86_64"]
local = options.build_options("cp38-manylinux_x86_64")
assert local.manylinux_images is not None
assert local.test_command == "pyproject"
assert local.manylinux_images["x86_64"] == pinned_x86_64_docker_image["manylinux1"]
local = options.build_options("cp37-manylinux_x86_64")
assert local.manylinux_images is not None
assert local.test_command == "pyproject-override"
assert local.manylinux_images["x86_64"] == pinned_x86_64_docker_image["manylinux2014"]
def test_passthrough(tmp_path, monkeypatch):
with tmp_path.joinpath("pyproject.toml").open("w") as f:
f.write(PYPROJECT_1)
args = get_default_command_line_arguments()
args.package_dir = str(tmp_path)
monkeypatch.setattr(platform_module, "machine", lambda: "x86_64")
monkeypatch.setenv("EXAMPLE_ENV", "ONE")
options = Options(platform="linux", command_line_arguments=args)
default_build_options = options.build_options(identifier=None)
assert default_build_options.environment.as_dictionary(prev_environment={}) == {
"FOO": "BAR",
"EXAMPLE_ENV": "ONE",
}
@pytest.mark.parametrize(
"env_var_value",
[
"normal value",
'"value wrapped in quotes"',
"an unclosed single-quote: '",
'an unclosed double-quote: "',
"string\nwith\ncarriage\nreturns\n",
"a trailing backslash \\",
],
)
def test_passthrough_evil(tmp_path, monkeypatch, env_var_value):
args = get_default_command_line_arguments()
args.package_dir = str(tmp_path)
monkeypatch.setattr(platform_module, "machine", lambda: "x86_64")
monkeypatch.setenv("CIBW_ENVIRONMENT_PASS_LINUX", "ENV_VAR")
options = Options(platform="linux", command_line_arguments=args)
monkeypatch.setenv("ENV_VAR", env_var_value)
parsed_environment = options.build_options(identifier=None).environment
assert parsed_environment.as_dictionary(prev_environment={}) == {"ENV_VAR": env_var_value}
|
tests/starkex/test_helpers.py | clifton/dydx-v3-python | 109 | 42125 | <gh_stars>100-1000
from dydx3.starkex.helpers import fact_to_condition
from dydx3.starkex.helpers import generate_private_key_hex_unsafe
from dydx3.starkex.helpers import get_transfer_erc20_fact
from dydx3.starkex.helpers import nonce_from_client_id
from dydx3.starkex.helpers import private_key_from_bytes
from dydx3.starkex.helpers import private_key_to_public_hex
from dydx3.starkex.helpers import private_key_to_public_key_pair_hex
class TestHelpers():
def test_nonce_from_client_id(self):
assert nonce_from_client_id('') == 2018687061
assert nonce_from_client_id('1') == 3079101259
assert nonce_from_client_id('a') == 2951628987
assert nonce_from_client_id(
'A really long client ID used to identify an order or withdrawal',
) == 2913863714
assert nonce_from_client_id(
'A really long client ID used to identify an order or withdrawal!',
) == 230317226
def test_get_transfer_erc20_fact(self):
assert get_transfer_erc20_fact(
recipient='0x1234567890123456789012345678901234567890',
token_decimals=3,
human_amount=123.456,
token_address='0xaAaAaAaaAaAaAaaAaAAAAAAAAaaaAaAaAaaAaaAa',
salt=int('0x1234567890abcdef', 16),
).hex() == (
'34052387b5efb6132a42b244cff52a85a507ab319c414564d7a89207d4473672'
)
def test_fact_to_condition(self):
fact = bytes.fromhex(
'cf9492ae0554c642b57f5d9cabee36fb512dd6b6629bdc51e60efb3118b8c2d8'
)
condition = fact_to_condition(
'0xe4a295420b58a4a7aa5c98920d6e8a0ef875b17a',
fact,
)
assert hex(condition) == (
'0x4d794792504b063843afdf759534f5ed510a3ca52e7baba2e999e02349dd24'
)
def test_generate_private_key_hex_unsafe(self):
assert (
generate_private_key_hex_unsafe() !=
generate_private_key_hex_unsafe()
)
def test_private_key_from_bytes(self):
assert (
private_key_from_bytes(b'0') ==
'<KEY>'
)
assert (
private_key_from_bytes(b'a') ==
'0x1d61128b46faa109512e0e00fe9adf5ff52047ed61718eeeb7c0525dfcd2f8e'
)
assert (
private_key_from_bytes(
b'really long input data for key generation with the '
b'keyPairFromData() function'
) ==
'0x7c4946831bde597b73f1d5721af9c67731eafeb75c1b8e92ac457a61819a29'
)
def test_private_key_to_public_hex(self):
assert private_key_to_public_hex(
'<KEY>',
) == (
'<KEY>'
)
def test_private_key_to_public_key_pair_hex(self):
x, y = private_key_to_public_key_pair_hex(
'<KEY>',
)
assert x == (
'<KEY>'
)
assert y == (
'<KEY>'
)
|
plugins/custom_resolver/run_test/custom_resolver.py | jingtaoh/USD-Cookbook | 332 | 42153 | <reponame>jingtaoh/USD-Cookbook<gh_stars>100-1000
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""This module tests if the custom resolver works."""
# IMPORT FUTURE LIBRARIES
from __future__ import print_function
# IMPORT THIRD-PARTY LIBRARIES
from pxr import Ar
def main():
"""Run the main execution of the current script."""
print("This should still print an empty string", Ar.GetResolver().Resolve("this_wont_resolve"))
print("This should print /bar", Ar.GetResolver().Resolve("/foo"))
if __name__ == "__main__":
main()
|
caffe2/python/operator_test/jsd_ops_test.py | chocjy/caffe2 | 585 | 42188 | # Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
def entropy(p):
q = 1. - p
return -p * np.log(p) - q * np.log(q)
def jsd(p, q):
return [entropy(p / 2. + q / 2.) - entropy(p) / 2. - entropy(q) / 2.]
def jsd_grad(go, o, pq_list):
p, q = pq_list
m = (p + q) / 2.
return [np.log(p * (1 - m) / (1 - p) / m) / 2. * go, None]
class TestJSDOps(hu.HypothesisTestCase):
@given(n=st.integers(10, 100), **hu.gcs_cpu_only)
def test_bernoulli_jsd(self, n, gc, dc):
p = np.random.rand(n).astype(np.float32)
q = np.random.rand(n).astype(np.float32)
op = core.CreateOperator("BernoulliJSD", ["p", "q"], ["l"])
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[p, q],
reference=jsd,
output_to_grad='l',
grad_reference=jsd_grad,
)
|
pandapower/shortcircuit/__init__.py | yougnen/pandapower | 104 | 42228 | from pandapower.shortcircuit.calc_sc import calc_sc
from pandapower.shortcircuit.toolbox import * |
test/test_simulation.py | andrea-simonetto/qiskit | 2,660 | 42230 | <gh_stars>1000+
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests for Aer simulation"""
import qiskit
from .base import QiskitTestCase
class TestAerSimulation(QiskitTestCase):
"""Tests for Aer simulation"""
def test_execute_in_aer(self):
"""Test executing a circuit in an Aer simulator"""
qr = qiskit.QuantumRegister(1)
cr = qiskit.ClassicalRegister(1)
circuit = qiskit.QuantumCircuit(qr, cr)
circuit.h(qr[0])
circuit.measure(qr, cr)
backend = qiskit.Aer.get_backend('qasm_simulator')
shots = 2000
results = qiskit.execute(circuit, backend, shots=shots).result()
self.assertDictAlmostEqual({'0': 1000, '1': 1000},
results.get_counts(),
delta=100)
|
ivy/test/afterinit1.py | b1f6c1c4/cfg-enum | 113 | 42268 |
from ivy import ivy_module as im
from ivy.ivy_compiler import ivy_from_string
from ivy.tk_ui import new_ui
from ivy import ivy_utils as iu
prog = """#lang ivy1.6
type t
individual x(X:t) : t
object foo(me:t) = {
after init {
x(me) := me;
assert false
}
}
isolate iso_foo(me:t) = foo(me) with x(me)
"""
with im.Module():
iu.set_parameters({'mode':'induction','isolate':'iso_foo','show_compiled':'true'})
main_ui = new_ui()
ui = main_ui.add(ivy_from_string(prog))
main_ui.tk.update_idletasks()
main_ui.answer("OK")
ui.check_safety_node(ui.node(0))
assert not ui.node(0).safe
# ui.check_inductiveness()
# # ui = ui.cti
# cg = ui.current_concept_graph
# cg.show_relation(cg.relation('link(X,Y)'),'+')
# cg.gather()
# main_ui.answer("OK")
# cg.strengthen()
# main_ui.answer("OK")
# ui.check_inductiveness()
# # cg.show_relation(cg.relation('semaphore'),'+')
# cg.gather()
# main_ui.answer("View")
# cg.bmc_conjecture(bound=1)
# main_ui.mainloop()
|
blitzdb/fields/integer.py | marcinguy/blitzdb3 | 252 | 42277 | <gh_stars>100-1000
from .base import BaseField
class IntegerField(BaseField):
pass
|
cotk/scripts/resources.py | ishine/cotk | 117 | 42295 | '''
A command library help user upload their results to dashboard.
'''
#!/usr/bin/env python
import argparse
from ..file_utils import get_resource_file_path, get_resource_list
from . import cli_constant as cli
def entry(args):
'''Entrance of show resources path and whether resource is cached or not'''
resource_names = get_resource_list()
parser = argparse.ArgumentParser(prog="cotk resources", \
description="check resources site and whether s specific resource cache is available")
parser.add_argument("--show_all", action="store_true", help="Show path of all resources")
parser.add_argument("--show_stored", action="store_true", help="Show path of all stored resource")
parser.add_argument("--show", type=str, help="Show path of a specific resource")
cargs = parser.parse_args(args)
if cargs.show_all:
cli.LOGGER.info("{:30}\t{:100}".format(
"Resource IDs", "Cache paths"))
for resource in resource_names:
cache_path = get_resource_file_path("resources://"+resource, download=False)
if cache_path is not None:
cli.LOGGER.info("{:30}\t{:100}".format(
resource, cache_path))
else:
cli.LOGGER.info("{:30}\t{:100}".format(
resource, "Not cached"))
elif cargs.show_stored:
cli.LOGGER.info("{:30}\t{:100}".format(
"Resource IDs", "Cache paths"))
for resource in resource_names:
cache_path = get_resource_file_path("resources://"+resource, download=False)
if cache_path is not None:
cli.LOGGER.info("{:30}\t{:100}".format(
resource, cache_path))
elif cargs.show is not None:
if cargs.show[:12] != ("resources://"):
raise RuntimeError('Please input a string starting with "resources://"')
if cargs.show[12:] not in resource_names:
raise RuntimeError("Unkown resource name {}".format(cargs.show[12:]))
cache_path = get_resource_file_path(cargs.show, download=False)
if cache_path is not None:
cli.LOGGER.info("{:30}\t{:100}".format(
"Resource IDs", "Cache paths"))
cli.LOGGER.info("{:30}\t{:100}".format(
cargs.show, cache_path))
else:
cli.LOGGER.info("resource {} is not cached.".format(cargs.show))
else:
raise RuntimeError("Unkown params.")
|
examples/highfreq/highfreq_ops.py | wan9c9/qlib | 8,637 | 42326 | <filename>examples/highfreq/highfreq_ops.py<gh_stars>1000+
import numpy as np
import pandas as pd
import importlib
from qlib.data.ops import ElemOperator, PairOperator
from qlib.config import C
from qlib.data.cache import H
from qlib.data.data import Cal
from qlib.contrib.ops.high_freq import get_calendar_day
class DayLast(ElemOperator):
"""DayLast Operator
Parameters
----------
feature : Expression
feature instance
Returns
----------
feature:
a series of that each value equals the last value of its day
"""
def _load_internal(self, instrument, start_index, end_index, freq):
_calendar = get_calendar_day(freq=freq)
series = self.feature.load(instrument, start_index, end_index, freq)
return series.groupby(_calendar[series.index]).transform("last")
class FFillNan(ElemOperator):
"""FFillNan Operator
Parameters
----------
feature : Expression
feature instance
Returns
----------
feature:
a forward fill nan feature
"""
def _load_internal(self, instrument, start_index, end_index, freq):
series = self.feature.load(instrument, start_index, end_index, freq)
return series.fillna(method="ffill")
class BFillNan(ElemOperator):
"""BFillNan Operator
Parameters
----------
feature : Expression
feature instance
Returns
----------
feature:
a backfoward fill nan feature
"""
def _load_internal(self, instrument, start_index, end_index, freq):
series = self.feature.load(instrument, start_index, end_index, freq)
return series.fillna(method="bfill")
class Date(ElemOperator):
"""Date Operator
Parameters
----------
feature : Expression
feature instance
Returns
----------
feature:
a series of that each value is the date corresponding to feature.index
"""
def _load_internal(self, instrument, start_index, end_index, freq):
_calendar = get_calendar_day(freq=freq)
series = self.feature.load(instrument, start_index, end_index, freq)
return pd.Series(_calendar[series.index], index=series.index)
class Select(PairOperator):
"""Select Operator
Parameters
----------
feature_left : Expression
feature instance, select condition
feature_right : Expression
feature instance, select value
Returns
----------
feature:
value(feature_right) that meets the condition(feature_left)
"""
def _load_internal(self, instrument, start_index, end_index, freq):
series_condition = self.feature_left.load(instrument, start_index, end_index, freq)
series_feature = self.feature_right.load(instrument, start_index, end_index, freq)
return series_feature.loc[series_condition]
class IsNull(ElemOperator):
"""IsNull Operator
Parameters
----------
feature : Expression
feature instance
Returns
----------
feature:
A series indicating whether the feature is nan
"""
def _load_internal(self, instrument, start_index, end_index, freq):
series = self.feature.load(instrument, start_index, end_index, freq)
return series.isnull()
class Cut(ElemOperator):
"""Cut Operator
Parameters
----------
feature : Expression
feature instance
l : int
l > 0, delete the first l elements of feature (default is None, which means 0)
r : int
r < 0, delete the last -r elements of feature (default is None, which means 0)
Returns
----------
feature:
A series with the first l and last -r elements deleted from the feature.
Note: It is deleted from the raw data, not the sliced data
"""
def __init__(self, feature, l=None, r=None):
self.l = l
self.r = r
if (self.l is not None and self.l <= 0) or (self.r is not None and self.r >= 0):
raise ValueError("Cut operator l shoud > 0 and r should < 0")
super(Cut, self).__init__(feature)
def _load_internal(self, instrument, start_index, end_index, freq):
series = self.feature.load(instrument, start_index, end_index, freq)
return series.iloc[self.l : self.r]
def get_extended_window_size(self):
ll = 0 if self.l is None else self.l
rr = 0 if self.r is None else abs(self.r)
lft_etd, rght_etd = self.feature.get_extended_window_size()
lft_etd = lft_etd + ll
rght_etd = rght_etd + rr
return lft_etd, rght_etd
|
test/unit/00.nop-commit.py | rescrv/Consus | 239 | 42333 | <gh_stars>100-1000
import consus
c1 = consus.Client()
t1 = c1.begin_transaction()
t1.commit()
c2 = consus.Client(b'127.0.0.1')
t2 = c1.begin_transaction()
t2.commit()
c3 = consus.Client('127.0.0.1')
t3 = c1.begin_transaction()
t3.commit()
c4 = consus.Client(b'127.0.0.1', 1982)
t4 = c1.begin_transaction()
t4.commit()
c5 = consus.Client('127.0.0.1', 1982)
t5 = c1.begin_transaction()
t5.commit()
c6 = consus.Client(b'127.0.0.1:1982')
t6 = c1.begin_transaction()
t6.commit()
c7 = consus.Client('127.0.0.1:1982')
t7 = c1.begin_transaction()
t7.commit()
c8 = consus.Client(b'[::]:1982,127.0.0.1:1982')
t8 = c1.begin_transaction()
t8.commit()
c9 = consus.Client('[::]:1982,127.0.0.1:1982')
t9 = c1.begin_transaction()
t9.commit()
|
syzygy/scripts/benchmark/ibmperf_test.py | nzeh/syzygy | 343 | 42361 | #!/usr/bin/python2.6
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittests for the ibmperf module.
If the IBM Performance Inspector tools are installed at "C:\ibmperf\bin" it
will run some tests using the actual tools. However, if the tools are not
installed it still runs a suite of tests using mocked versions of the tools.
"""
__author__ = "<EMAIL> (<NAME>)"
import ibmperf
import logging
import os
import random
import unittest
class MockPopen(object):
"""A mock subprocess.Popen object.
Implements "returncode" and "communicate", the only attributes/routines
used by the ibmperf module.
Attributes:
returncode: The return code of the mocked sub-process.
"""
def __init__(self, stdout="", stderr="", returncode=0,
raise_on_init=None, raise_on_communicate=None):
"""Initializes this mock Popen object with the given output and returncode.
Args:
stdout: The data to return for stdout in "communicate".
stderr: The data to return for stderr in "communicate".
returncode: The return code to expose via the "returncode" attribute.
raise_on_init: If this is not None, will cause the constructor to raise
an error. Expected to be a 2-tuple, containing (type, args), and will
call "raise type(args)".
raise_on_communicate: Similar to raise_on_init, but will cause the error
to be raised on calls to "communicate".
"""
if raise_on_init:
raise raise_on_init[0](*raise_on_init[1])
self._stdout = stdout
self._stderr = stderr
self.returncode = returncode
self._raise_on_communicate = raise_on_communicate
def communicate(self):
"""Simulates running the command, returning its stdout and stderr.
Raises an exception if raise_on_communicate was specified in the
constructor.
"""
if self._raise_on_communicate:
return self._raise_on_communicate[0](*self._raise_on_communicate[1])
return (self._stdout, self._stderr)
class MockHardwarePerformanceCounter(ibmperf.HardwarePerformanceCounter):
"""A mocked ibmperf.HardwarePerformanceCounter object.
Replaces the _Popen member function with one that returns canned results.
"""
def __init__(self, popen_results, *args, **kwargs):
"""Initializes the mock HardwarePerformanceCounter object.
Passes args and kwargs directly through to the
ibmperf.HardwarePerformanceCounter initializer.
Args:
popen_results: A list of (type, args, kwargs) 3-tuples that will be
returned from calls to _Popen, in order.
"""
self._popen_results = list(popen_results)
super(MockHardwarePerformanceCounter, self).__init__(*args, **kwargs)
def AddPopenResult(self, result_tuple):
"""Adds the given result tuple to the queue of results to return.
Args:
result_tuple: A (type, args, kwargs) triplet.
"""
self._popen_results.append(result_tuple)
def _Popen(self, dummy_command_line):
"""Overrides _Popen from ibmperf.HardwarePerformanceCounter.
Returns the mocked object from the head of the _popen_results queue.
"""
object_type, args, kwargs = self._popen_results.pop(0)
return object_type(*args, **kwargs)
# A few specific metric names.
_CYCLES = "CYCLES"
_UOPS = "UOPS"
# A list of metrics that we will simulate supporting.
_METRICS = {
_CYCLES: None,
"NONHALTED_CYCLES": ("Number of cycles during which the processor is not "
"halted (and not in Thermal Trip on Pentium Ms)"),
"INSTR": "Number of instructions retired",
_UOPS: "Number of uOps retired",
"BRANCH": "Number of branch instruction retired",
"MISPRED_BRANCH": "Number of mispredicted branch instructions retired"}
# A generic command that is successful outputs nothing and returns the default
# error code of 0.
_GENERIC_SUCCESS = (MockPopen, [], {})
# Simulates a successful run of "ddq", indicating that the toolkit is
# installed.
_DDQ_INSTALLED = _GENERIC_SUCCESS
# The simulated output of a successful call to "ptt".
_PTT_OUTPUT = "\n".join([" - %s" % _metric for _metric in _METRICS])
_PTT_SUCCESS = (MockPopen, [], {"stdout": _PTT_OUTPUT})
# The simulated output of a successful call to "mpevt -ld".
_MPEVT_OUTPUT = "Id Name Description\n-- ---- -----------"
for i, _metric in enumerate(_METRICS):
desc = _METRICS[_metric]
if desc:
_MPEVT_OUTPUT += "\n%d %s %s" % (100 + i, _metric, desc)
_MPEVT_SUCCESS = (MockPopen, [], {"stdout": _MPEVT_OUTPUT, "returncode": -1})
# This is a set of MockPopen results that imitates a successful initialization
# of the toolkit.
_SUCCESSFUL_INIT = [_DDQ_INSTALLED, _PTT_SUCCESS, _MPEVT_SUCCESS]
def _CreateQueryResults(metrics):
"""Returns a set of made up results for the given metrics.
Args:
metrics: An iterable collection of metric names.
"""
results = {}
pids = [1015, 1016]
for metric in metrics:
pid_results = {}
for pid in pids:
pid_results[pid] = random.randint(100000, 1000000)
results[metric] = pid_results
return results
def _CreateQueryStdout(results):
"""Returns a "ptt dump" stdout for the given dict of results.
See ibmperf.py for a full listing of sample output.
Args:
results: A dict of results as returned by
ibmperf.HardwarePerformanceCounters.Query.
"""
stdout = "***** ptt v2.0.8 for x86 ***** pid=1944/0x798 *****\n"
stdout += "\n"
pids = results[results.keys()[0]].keys()
for pid in pids:
stdout += " PID %d is foo\n" % pid
stdout += "\n"
stdout += "PTT Facility Per-Thread Information\n"
stdout += "-----------------------------------\n"
stdout += "\n"
stdout += " PID TID Disp Intr"
for metric in results:
stdout += " %s" % metric
stdout += "\n"
stdout += " --- --- ---- ----"
for metric in results:
stdout += " %s" % ("-" * len(metric))
stdout += "\n"
for pid in pids:
tid = random.randint(100, 1000)
disp = random.randint(1, 10000)
intr = random.randint(1, 10000)
metric_values = ""
for metric in results:
metric_values += " %d" % results[metric][pid]
stdout += " %d %d %d %d%s\n" % (pid, tid, disp, intr, metric_values)
stdout += " "
stdout += "-".join("%s" % ("-" * len(metric)) for metric in results)
stdout += "\n"
stdout += " "
stdout += metric_values
stdout += "\n\n"
stdout += "Execution ended: 1 iterations.\n"
return stdout
class TestHardwarePerformanceCounter(unittest.TestCase):
"""Unittests for ibmperf.HardwarePerformanceCounter."""
def setUp(self):
# By default we create a mock HardwarePerformanceCounter object that
# successfully initializes the toolkit.
self._hpc = MockHardwarePerformanceCounter(
_SUCCESSFUL_INIT)
def _TestStart(self, metrics):
"""Utility function for starting data collection.
Args:
metrics: Iterable collection of metrics to be started.
"""
self._hpc.AddPopenResult(_GENERIC_SUCCESS) # ptt term
self._hpc.AddPopenResult(_GENERIC_SUCCESS) # ptt noautoterm
self._hpc.AddPopenResult(_GENERIC_SUCCESS) # ptt init
self._hpc.Start(metrics)
def _TestStop(self):
"""Utility function for stopping data collection."""
self._hpc.AddPopenResult(_GENERIC_SUCCESS) # ptt term
self._hpc.Stop()
# Pylint complains that this need not be a member function, but the
# unittest machinery requires this.
# pylint: disable=R0201
def testInstallsIfNotInstalled(self):
MockHardwarePerformanceCounter(
[(MockPopen, [], {"returncode": -1}), # ddq failure.
(MockPopen, [], {"returncode": 0}), # tinstall success.
_PTT_SUCCESS, _MPEVT_SUCCESS])
def testFailedInstall(self):
self.assertRaises(ibmperf.ExecutionFailed,
MockHardwarePerformanceCounter,
[(MockPopen, [], {"returncode": -1}), # ddq failure.
(MockPopen, [], {"returncode": -1})]) # tinstall failure.
def testHaveMetrics(self):
self.assertEqual(set(_METRICS.keys()), set(self._hpc.metrics.keys()))
def testQueryFailsWhenNotRunning(self):
self.assertRaises(ibmperf.NotRunning, self._hpc.Query, "foo")
def testStopFailsWhenNotRunning(self):
self.assertRaises(ibmperf.NotRunning, self._hpc.Stop)
def testStartFailsOnInvalidMetric(self):
self.assertRaises(ibmperf.InvalidMetric,
self._TestStart,
["INVALID_METRIC_NAME"])
def testAllMetricsCanBeStartedIndividually(self):
for name in self._hpc.metrics:
self._TestStart([name])
self._TestStop()
def testDumpFails(self):
self._TestStart([_CYCLES])
# ptt returns 210 when it fails.
self._hpc.AddPopenResult((MockPopen, [], {"returncode": 210}))
self.assertRaises(ibmperf.ExecutionFailed,
MockHardwarePerformanceCounter.Query,
self._hpc,
"foo")
def testUnexpectedDumpOutput(self):
self._TestStart([_CYCLES])
stdout = "This is garbage, and is not parsable."
self._hpc.AddPopenResult((MockPopen, [], {"stdout": stdout}))
self.assertRaises(ibmperf.UnexpectedOutput,
MockHardwarePerformanceCounter.Query,
self._hpc,
"foo")
def testWrongMetricsDumped(self):
self._TestStart([_CYCLES])
results = _CreateQueryResults([_UOPS])
stdout = _CreateQueryStdout(results)
self._hpc.AddPopenResult((MockPopen, [], {"stdout": stdout}))
self.assertRaises(ibmperf.UnexpectedOutput,
MockHardwarePerformanceCounter.Query,
self._hpc,
"foo")
def _TestMetricsFully(self, metrics):
"""Collects the provided metrics for an imaginary process 'foo'.
This helper function starts the metrics, sleeps for 2 seconds, queries them
and finally stops them. It ensures that the reported metrics match those
that were requested to be collected.
Args:
metrics: Iterable collection of metrics to be started.
"""
self._TestStart(metrics)
expected_results = _CreateQueryResults(metrics)
query_stdout = _CreateQueryStdout(expected_results)
self._hpc.AddPopenResult((MockPopen, [], {"stdout": query_stdout}))
results = self._hpc.Query("foo")
self.assertTrue(isinstance(results, dict))
self.assertEqual(expected_results, results)
self._TestStop()
def testOneMetricFully(self):
name = self._hpc.metrics.keys()[0]
self._TestMetricsFully([name])
def _GetMaximalMetrics(self):
"""Helper function that returns a set of maximal metrics.
This returns all free metrics, plus max_counters non-free metrics.
"""
metrics = list(self._hpc.free_metrics)
metrics += list(self._hpc.non_free_metrics)[0:self._hpc.max_counters]
return metrics
def testMaximalMetricsFully(self):
metrics = self._GetMaximalMetrics()
self._TestMetricsFully(metrics)
def testMaximalMetricsFullyForReal(self):
# Only run this test if the toolkit is actually present at the
# default path.
if (not os.path.isdir(ibmperf.DEFAULT_DIR) or
not os.path.exists(os.path.join(ibmperf.DEFAULT_DIR, 'ddq.exe'))):
return
self._hpc = ibmperf.HardwarePerformanceCounter()
metrics = self._GetMaximalMetrics()
self._hpc.Start(metrics)
try:
results = self._hpc.Query("python")
self.assertTrue(isinstance(results, dict))
self.assertEqual(set(metrics), set(results))
except ibmperf.ExecutionFailed:
# We swallow this error, as it can happen if the local machine doesn't
# actually support per-thread metrics. Some versions of Windows don't.
pass
self._hpc.Stop()
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
hypermax/optimizer.py | 00sapo/hypermax | 107 | 42415 | import hyperopt
import csv
import json
import traceback
import os.path
from pprint import pprint
import datetime
import time
import numpy.random
import threading
import queue
import copy
import tempfile
import random
import subprocess
import concurrent.futures
import tempfile
import functools
import math
import atexit
import jsonschema
import pkg_resources
from hypermax.execution import Execution
from hypermax.hyperparameter import Hyperparameter
from hypermax.results_analyzer import ResultsAnalyzer
from hypermax.algorithms.atpe_optimizer import ATPEOptimizer
from hypermax.algorithms.human_guided_optimizer_wrapper import HumanGuidedOptimizerWrapper
from hypermax.algorithms.tpe_optimizer import TPEOptimizer
from hypermax.algorithms.random_search_optimizer import RandomSearchOptimizer
from hypermax.algorithms.adaptive_bayesian_hyperband_optimizer import AdaptiveBayesianHyperband
from hypermax.configuration import Configuration
class Optimizer:
resultInformationKeys = [
'trial',
'status',
'loss',
'time',
'log',
'error'
]
def __init__(self, configuration):
self.config = Configuration(configuration)
self.searchConfig = configuration.get('search', {})
# jsonschema.validate(self.searchConfig, self.configurationSchema())
self.space = self.config.createHyperparameterSpace()
self.threadExecutor = concurrent.futures.ThreadPoolExecutor()
self.resultsAnalyzer = ResultsAnalyzer(configuration)
self.results = []
self.resultFutures = []
self.best = None
self.bestLoss = None
self.thread = threading.Thread(target=lambda: self.optimizationThread(), daemon=True if configuration.get("ui", {}).get("enabled", True) else False)
self.totalTrials = self.searchConfig.get("iterations")
self.trialsSinceResultsUpload = None
self.resultsExportFuture = None
self.currentTrials = []
self.allWorkers = set(range(self.config.data['function'].get('parallel', 1)))
self.occupiedWorkers = set()
self.trialNumber = 0
self.lastATPEParameters = None
self.lastLockedParameters = None
self.atpeParamDetails = None
self.tpeOptimizer = TPEOptimizer()
self.atpeOptimizer = ATPEOptimizer()
self.abhOptimizer = AdaptiveBayesianHyperband(self.atpeOptimizer, self.searchConfig.get("min_budget", 1), self.searchConfig.get("max_budget", 100), self.searchConfig.get("eta", 3))
self.humanGuidedATPEOptimizer = HumanGuidedOptimizerWrapper(self.atpeOptimizer)
self.randomSearchOptimizer = RandomSearchOptimizer()
def __del__(self):
if self.threadExecutor:
self.threadExecutor.shutdown(wait=True)
@classmethod
def configurationSchema(self):
""" This method returns the configuration schema for the optimization module. The schema
is a standard JSON-schema object."""
return {
"type": "object",
"properties": {
"method": {"type": "string", "enum": ['atpe', 'tpe', 'random']},
"iterations": {"type": "number"},
"budget": {"type": "number"}
},
"required": ['method', 'iterations']
}
def completed(self):
return len(self.results)
def sampleNext(self):
if self.searchConfig['method'] == 'tpe':
return self.tpeOptimizer.recommendNextParameters(self.config.data['hyperparameters'], self.results, self.currentTrials)
elif self.searchConfig['method'] == 'random':
return self.randomSearchOptimizer.recommendNextParameters(self.config.data['hyperparameters'], self.results, self.currentTrials)
elif self.searchConfig['method'] == 'atpe':
params = self.humanGuidedATPEOptimizer.recommendNextParameters(self.config.data['hyperparameters'], self.results, self.currentTrials)
self.lastATPEParameters = self.atpeOptimizer.lastATPEParameters
self.lastLockedParameters = self.atpeOptimizer.lastLockedParameters
self.atpeParamDetails = self.atpeOptimizer.atpeParamDetails
return params
elif self.searchConfig['method'] == 'abh':
params = self.abhOptimizer.recommendNextParameters(self.config.data['hyperparameters'], self.results, self.currentTrials)
self.lastATPEParameters = self.atpeOptimizer.lastATPEParameters
self.lastLockedParameters = self.atpeOptimizer.lastLockedParameters
self.atpeParamDetails = self.atpeOptimizer.atpeParamDetails
return params
def computeCurrentBest(self):
best = None
bestLoss = None
for result in self.results:
if (best is None and result['loss'] is not None ) or (result['loss'] is not None and result['loss'] < bestLoss):
best = result
bestLoss = result['loss']
self.best = best
self.bestLoss = bestLoss
def startOptmizationJob(self):
availableWorkers = list(sorted(self.allWorkers.difference(self.occupiedWorkers)))
sampleWorker = availableWorkers[0]
sample = None
while sample is None:
# Hedge against any exceptions in the atpe optimizer.
try:
sample = self.sampleNext()
except Exception:
traceback.print_exc()
pass
def testSample(params, trial, worker):
currentTrial = {
"start": datetime.datetime.now(),
"trial": trial,
"worker": worker,
"params": copy.deepcopy(params)
}
self.currentTrials.append(currentTrial)
start = datetime.datetime.now()
execution = Execution(self.config.data['function'], parameters=params, worker_n=worker)
modelResult = execution.run()
end = datetime.datetime.now()
result = Hyperparameter(self.config.data['hyperparameters']).convertToFlatValues(params)
for key in params.keys():
if key.startswith("$"):
result[key] = params[key]
result['trial'] = trial
self.resultsAnalyzer.makeDirs(os.path.join(self.resultsAnalyzer.directory, "logs"))
if 'loss' in modelResult:
result['loss'] = modelResult['loss']
elif 'accuracy' in modelResult:
result['loss'] = modelResult['accuracy']
if 'status' in modelResult:
result['status'] = modelResult['status']
else:
result['status'] = 'ok'
if 'log' in modelResult:
fileName = os.path.join(self.resultsAnalyzer.directory, "logs", "trial_" + str(trial) + ".txt")
with open(fileName, "wt") as file:
file.write(modelResult['log'])
result['log'] = fileName
else:
result['log'] = ''
if 'error' in modelResult:
result['error'] = modelResult['error']
else:
result['error'] = ''
if 'time' in modelResult:
result['time'] = modelResult['time']
else:
result['time'] = (end-start).total_seconds()
self.currentTrials.remove(currentTrial)
return result
def onCompletion(worker, future):
self.occupiedWorkers.remove(worker)
self.results.append(future.result())
self.computeCurrentBest()
if not self.config.data.get("ui", {}).get("enabled", True):
pprint(future.result())
if self.resultsExportFuture is None or (self.resultsExportFuture.done() and len(self.results) > 5):
self.resultsExportFuture = self.threadExecutor.submit(
lambda: self.outputResultsWithBackup(self.config.data.get("results", {}).get("graphs", True)))
else:
self.outputResultsWithBackup(False)
if 'hypermax_results' in self.config.data:
if self.trialsSinceResultsUpload is None or self.trialsSinceResultsUpload >= self.config.data['hypermax_results']['upload_frequency']:
self.saveResultsToHypermaxResultsRepository()
self.trialsSinceResultsUpload = 1
else:
self.trialsSinceResultsUpload += 1
self.occupiedWorkers.add(sampleWorker)
sampleFuture = self.threadExecutor.submit(testSample, sample, self.trialNumber, sampleWorker)
sampleFuture.add_done_callback(functools.partial(onCompletion, sampleWorker))
self.trialNumber += 1
return sampleFuture
def runOptimizationThread(self):
self.thread.start()
def outputResultsWithBackup(self, graphs, workers=1):
self.resultsAnalyzer.outputResultsFolder(self, graphs, workers=workers)
directory_head, directory_tail = os.path.split(self.resultsAnalyzer.directory)
backup_directory = os.path.join(directory_head, ".backup_" + directory_tail + "~")
self.resultsAnalyzer.outputResultsFolder(self, graphs, directory=backup_directory, workers=workers)
def optimizationThread(self):
# Make sure we output basic results if the process is killed for some reason.
atexit.register(lambda: self.outputResultsWithBackup(False))
futures = []
for worker in range(min(len(self.allWorkers), self.totalTrials - len(self.results))):
futures.append(self.startOptmizationJob())
time.sleep(1.0)
while (len(self.results) + len(self.currentTrials)) < self.totalTrials:
completedFuture = list(concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED)[0])[0]
futures.remove(completedFuture)
time.sleep(0.05)
futures.append(self.startOptmizationJob())
concurrent.futures.wait(futures)
# We are completed, so we can allocate a full contingent of workers
self.outputResultsWithBackup(True, workers=4)
def exportGuidanceJSON(self, fileName):
with open(fileName, 'wt') as file:
json.dump(self.humanGuidedATPEOptimizer.guidanceOptions, file, indent=4, sort_keys=True)
def importGuidanceJSON(self, fileName):
with open(fileName, 'rt') as file:
self.humanGuidedATPEOptimizer.guidanceOptions = json.load(file)
def exportResultsCSV(self, fileName):
allKeys = set()
for result in self.results:
for key in result:
allKeys.add(key)
fieldNames = self.resultInformationKeys + sorted(allKeys.difference(set(self.resultInformationKeys))) # Make sure we keep the order of the field names consistent when writing the csv
with open(fileName, 'wt') as file:
writer = csv.DictWriter(file, fieldnames=fieldNames if len(self.results) > 0 else [], dialect='unix')
writer.writeheader()
writer.writerows(self.results)
def importResultsCSV(self, fileName):
with open(fileName) as file:
reader = csv.DictReader(file)
results = list(reader)
newResults = []
for result in results:
newResult = {}
for key,value in result.items():
if value is not None and value != "":
try:
if '.' in value or 'e' in value:
newResult[key] = float(value)
else:
newResult[key] = int(value)
except ValueError:
newResult[key] = value
elif key == 'loss':
newResult[key] = None
elif key == 'log':
newResult[key] = ''
else:
newResult[key] = None
newResults.append(newResult)
self.results = newResults
self.computeCurrentBest()
self.trialNumber = len(self.results)
def saveResultsToHypermaxResultsRepository(self):
try:
hypermaxResultsConfig = self.config.data['hypermax_results']
with tempfile.TemporaryDirectory() as directory:
process = subprocess.run(['git', 'clone', '<EMAIL>:electricbrainio/hypermax-results.git'], cwd=directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
hypermaxResultsDirectory = os.path.join(directory, 'hypermax-results', hypermaxResultsConfig['name'])
self.resultsAnalyzer.outputResultsFolder(self, detailed=False, directory=hypermaxResultsDirectory)
with open(os.path.join(hypermaxResultsDirectory, "metadata.json"), 'wt') as file:
json.dump(self.config.data['hypermax_results'], file, indent=4)
process = subprocess.run(['git', 'add', hypermaxResultsDirectory], cwd=os.path.join(directory, 'hypermax-results'))
process = subprocess.run(['git', 'commit', '-m', 'Hypermax automatically storing results for model ' + hypermaxResultsConfig['name'] + ' with ' + str(len(self.results)) + " trials."], cwd=os.path.join(directory, 'hypermax-results'), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process = subprocess.run(['git push'], cwd=os.path.join(directory, 'hypermax-results'), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
except Exception as e:
print(e)
|
alipay/aop/api/domain/AlipayCommerceTransportTaxiDriverlevelQueryModel.py | antopen/alipay-sdk-python-all | 213 | 42421 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommerceTransportTaxiDriverlevelQueryModel(object):
def __init__(self):
self._channel = None
self._driver_name = None
self._driver_phone = None
self._ext_info = None
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, value):
self._channel = value
@property
def driver_name(self):
return self._driver_name
@driver_name.setter
def driver_name(self, value):
self._driver_name = value
@property
def driver_phone(self):
return self._driver_phone
@driver_phone.setter
def driver_phone(self, value):
self._driver_phone = value
@property
def ext_info(self):
return self._ext_info
@ext_info.setter
def ext_info(self, value):
self._ext_info = value
def to_alipay_dict(self):
params = dict()
if self.channel:
if hasattr(self.channel, 'to_alipay_dict'):
params['channel'] = self.channel.to_alipay_dict()
else:
params['channel'] = self.channel
if self.driver_name:
if hasattr(self.driver_name, 'to_alipay_dict'):
params['driver_name'] = self.driver_name.to_alipay_dict()
else:
params['driver_name'] = self.driver_name
if self.driver_phone:
if hasattr(self.driver_phone, 'to_alipay_dict'):
params['driver_phone'] = self.driver_phone.to_alipay_dict()
else:
params['driver_phone'] = self.driver_phone
if self.ext_info:
if hasattr(self.ext_info, 'to_alipay_dict'):
params['ext_info'] = self.ext_info.to_alipay_dict()
else:
params['ext_info'] = self.ext_info
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceTransportTaxiDriverlevelQueryModel()
if 'channel' in d:
o.channel = d['channel']
if 'driver_name' in d:
o.driver_name = d['driver_name']
if 'driver_phone' in d:
o.driver_phone = d['driver_phone']
if 'ext_info' in d:
o.ext_info = d['ext_info']
return o
|
selim_sef-solution/lucid/misc/io/__init__.py | Hulihrach/RoadDetector | 4,537 | 42424 | <reponame>Hulihrach/RoadDetector
from lucid.misc.io.showing import show
from lucid.misc.io.loading import load
from lucid.misc.io.saving import save, CaptureSaveContext, batch_save
from lucid.misc.io.scoping import io_scope, scope_url
|
Stephanie/local_libs/search_module.py | JeremyARussell/stephanie-va | 866 | 42469 | class SearchModule:
def __init__(self):
pass
def search_for_competition_by_name(self, competitions, query):
m, answer = self.search(competitions, attribute_name="caption", query=query)
if m == 0:
return False
return answer
def search_for_competition_by_code(self, competitions, query):
return self.search_by_code(competitions, attribute_name="league", query=query)
def search_for_team_by_name(self, teams, query):
m, answer = self.search(teams, attribute_name="name", query=query)
if m == 0:
return False
return answer
def search_for_team_by_code(self, teams, query):
return self.search_by_code(teams, attribute_name="code", query=query)
def search_for_player_by_name(self, players, query):
m, answer = self.search(players, attribute_name="name", query=query)
if m == 0:
return False
return answer
def search_for_team_from_standing_by_name(self, teams, query):
m, answer = self.search(teams, attribute_name="team_name", query=query)
if m == 0:
return False
return answer
@staticmethod
def search_by_code(dataset, attribute_name, query):
search = query.lower()
for index, data in enumerate(dataset):
code = getattr(data, attribute_name).lower()
if code == search:
return dataset[index]
return False
@staticmethod
def search(dataset, attribute_name, query):
values = [0 for _ in range(0, len(dataset))]
search = query.lower().split()
upper_threshold = len(search)
for index, data in enumerate(dataset):
data_name = getattr(data, attribute_name).lower()
search_array = data_name.split()
for index2, text in enumerate(search_array):
if index2 >= upper_threshold:
break
threshold = len(search[index2])
for i in range(0, len(text)):
if i >= threshold - 1:
break
if text[i] == search[index2][i]:
values[index] += 1
max_value = max(values)
max_index = values.index(max_value)
return max_value, dataset[max_index]
|
drf_admin/apps/monitor/serializers/crud.py | guohaihan/myproject | 228 | 42490 | # -*- coding: utf-8 -*-
"""
@author : <NAME>
@github : https://github.com/tianpangji
@software : PyCharm
@file : crud.py
@create : 2020/12/9 20:44
"""
from django.contrib.contenttypes.models import ContentType
from easyaudit.models import CRUDEvent
from rest_framework import serializers
class CRUDSerializer(serializers.ModelSerializer):
event_type_display = serializers.SerializerMethodField()
datetime = serializers.DateTimeField(format='%Y-%m-%d %H:%M:%S', read_only=True)
username = serializers.SerializerMethodField()
content_type_display = serializers.SerializerMethodField()
class Meta:
model = CRUDEvent
fields = ['id', 'event_type_display', 'datetime', 'username', 'content_type_display', 'object_id',
'changed_fields']
def get_event_type_display(self, obj):
return obj.get_event_type_display()
def get_username(self, obj):
try:
username = obj.user.username
except AttributeError:
username = '未知'
return username
def get_content_type_display(self, obj):
content_type = ContentType.objects.get(id=obj.content_type_id)
return content_type.app_label + '.' + content_type.model
def to_representation(self, instance):
ret = super().to_representation(instance)
if ret.get('changed_fields') == 'null':
ret['changed_fields'] = ''
return ret
|
爬虫小demo/22 JDPython.py | lb2281075105/Python-Spider | 713 | 42499 | import time
from selenium import webdriver
from lxml import etree
driver = webdriver.PhantomJS(executable_path='./phantomjs-2.1.1-macosx/bin/phantomjs')
# 获取第一页的数据
def get_html():
url = "https://detail.tmall.com/item.htm?id=531993957001&skuId=3609796167425&user_id=268451883&cat_id=2&is_b=1&rn=71b9b0aeb233411c4f59fe8c610bc34b"
driver.get(url)
time.sleep(5)
driver.execute_script('window.scrollBy(0,3000)')
time.sleep(2)
driver.execute_script('window.scrollBy(0,5000)')
time.sleep(2)
# 累计评价
btnNext = driver.find_element_by_xpath('//*[@id="J_TabBar"]/li[3]/a')
btnNext.click()
html = driver.page_source
return html
def get_comments(html):
source = etree.HTML(html)
commens = source.xpath("//*[@id='J_TabBar']/li[3]/a/em/text()")
print('评论数:', commens)
# 将评论转为int类型
commens = (int(commens[0]) / 20) + 1
# 获取到总评论
print('评论数:', int(commens))
return int(commens)
def parse_html(html):
html = etree.HTML(html)
commentlist = html.xpath("//*[@class='rate-grid']/table/tbody")
for comment in commentlist:
# 评论
vercomment = comment.xpath(
"./tr/td[@class='tm-col-master']/div[@class='tm-rate-content']/div[@class='tm-rate-fulltxt']/text()")
# 机器类型
verphone = comment.xpath("./tr/td[@class='col-meta']/div[@class='rate-sku']/p[@title]/text()")
print(vercomment)
print(verphone)
# 用户(头尾各一个字,中间用****代替)
veruser = comment.xpath("./tr/td[@class='col-author']/div[@class='rate-user-info']/text()")
print(veruser)
def next_button_work(num):
if num != 0:
driver.execute_script('window.scrollBy(0,3000)')
time.sleep(2)
try:
driver.find_element_by_css_selector('#J_Reviews > div > div.rate-page > div > a:last-child').click()
except Exception as e:
print(e)
time.sleep(2)
driver.execute_script('window.scrollBy(0,3000)')
time.sleep(2)
driver.execute_script('window.scrollBy(0,5000)')
time.sleep(2)
html = driver.page_source
parse_html(html)
def selenuim_work(html):
parse_html(html)
next_button_work(1)
pass
def gettotalpagecomments(comments):
html = get_html()
for i in range(0, comments):
selenuim_work(html)
data = get_html()
# 得到评论
commens = get_comments(data)
# 根据评论内容进行遍历
gettotalpagecomments(commens)
|
demo/cities/urls.py | mrmikardo/django-map-widgets | 425 | 42534 | <reponame>mrmikardo/django-map-widgets
from django.urls import path, re_path
from cities.views import CityCreateView, CityListView, CityDetailView
app_name = 'cities'
urlpatterns = [
path('', CityListView.as_view(), name="list"),
re_path(r'^(?P<pk>\d+)/$', CityDetailView.as_view(), name="detail"),
re_path(r'^create/$', CityCreateView.as_view(), name="create"),
] |
docker/GetAndResizeImages.py | vaquarkhan/ecs-refarch-batch-processing | 105 | 42588 | #!/usr/bin/env python
# Copyright 2016 Amazon.com, Inc. or its
# affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import os
import json
import urllib
import boto3
from PIL import Image
from PIL.ExifTags import TAGS
resized_dir = '/images/resized'
thumb_dir = '/images/thumbs'
input_bucket_name = os.environ['s3InputBucket']
output_bucket_name = os.environ['s3OutputBucket']
sqsqueue_name = os.environ['SQSBatchQueue']
aws_region = os.environ['AWSRegion']
s3 = boto3.client('s3', region_name=aws_region)
sqs = boto3.resource('sqs', region_name=aws_region)
def create_dirs():
for dirs in [resized_dir, thumb_dir]:
if not os.path.exists(dirs):
os.makedirs(dirs)
def process_images():
"""Process the image
No real error handling in this sample code. In case of error we'll put
the message back in the queue and make it visable again. It will end up in
the dead letter queue after five failed attempts.
"""
for message in get_messages_from_sqs():
try:
message_content = json.loads(message.body)
image = urllib.unquote_plus(message_content
['Records'][0]['s3']['object']
['key']).encode('utf-8')
s3.download_file(input_bucket_name, image, image)
resize_image(image)
upload_image(image)
cleanup_files(image)
except:
message.change_visibility(VisibilityTimeout=0)
continue
else:
message.delete()
def cleanup_files(image):
os.remove(image)
os.remove(resized_dir + '/' + image)
os.remove(thumb_dir + '/' + image)
def upload_image(image):
s3.upload_file(resized_dir + '/' + image,
output_bucket_name, 'resized/' + image)
s3.upload_file(thumb_dir + '/' + image,
output_bucket_name, 'thumbs/' + image)
def get_messages_from_sqs():
results = []
queue = sqs.get_queue_by_name(QueueName=sqsqueue_name)
for message in queue.receive_messages(VisibilityTimeout=120,
WaitTimeSeconds=20,
MaxNumberOfMessages=10):
results.append(message)
return(results)
def resize_image(image):
img = Image.open(image)
exif = img._getexif()
if exif is not None:
for tag, value in exif.items():
decoded = TAGS.get(tag, tag)
if decoded == 'Orientation':
if value == 3:
img = img.rotate(180)
if value == 6:
img = img.rotate(270)
if value == 8:
img = img.rotate(90)
img.thumbnail((1024, 768), Image.ANTIALIAS)
try:
img.save(resized_dir + '/' + image, 'JPEG', quality=100)
except IOError as e:
print("Unable to save resized image")
img.thumbnail((192, 192), Image.ANTIALIAS)
try:
img.save(thumb_dir + '/' + image, 'JPEG')
except IOError as e:
print("Unable to save thumbnail")
def main():
create_dirs()
while True:
process_images()
if __name__ == "__main__":
main()
|
theano/compile/tests/test_function_name.py | mdda/Theano | 295 | 42592 | <gh_stars>100-1000
import unittest
import os
import re
import theano
from theano import tensor
class FunctionName(unittest.TestCase):
def test_function_name(self):
x = tensor.vector('x')
func = theano.function([x], x + 1.)
regex = re.compile(os.path.basename('.*test_function_name.pyc?:13'))
assert(regex.match(func.name) is not None)
|
util/data_type_util.py | Chandru01061997/pythonDB | 409 | 42601 | from uuid import UUID
from datetime import datetime
def uuid_from_string(string):
return UUID('{s}'.format(s=string))
def format_timestamp(string):
if isinstance(string, str):
return datetime.strptime(string, '%Y-%m-%dT%H:%M:%S.%fZ')
if isinstance(string, datetime):
return string
|
pushkin/util/__init__.py | Nordeus/pushkin | 281 | 42643 | from . import pool
from . import multiprocesslogging
from . import tools
|
tests/config_test.py | akshaysharma096/clusterman | 281 | 42646 | <reponame>akshaysharma096/clusterman
# Copyright 2019 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import mock
import pytest
import staticconf
import staticconf.testing
import yaml
import clusterman.config as config
from clusterman.config import POOL_NAMESPACE
from tests.conftest import mock_open
@pytest.fixture
def mock_config_files():
with staticconf.testing.PatchConfiguration({"cluster_config_directory": "/nail/whatever"}), mock_open(
config.get_pool_config_path("cluster-A", "pool-1", "mesos"),
contents=yaml.dump({"resource_groups": "cluster-A", "other_config": 18,}),
), mock_open(
config.get_pool_config_path("cluster-A", "pool-2", "mesos"),
contents=yaml.dump({"resource_groups": "cluster-A", "other_config": 20,}),
), mock_open(
config.get_pool_config_path("cluster-A", "pool-2", "kubernetes"),
contents=yaml.dump({"resource_groups": "cluster-A", "other_config": 29,}),
), mock_open(
config.get_pool_config_path("cluster-B", "pool-1", "mesos"),
contents=yaml.dump(
{"resource_groups": "cluster-B", "other_config": 200, "autoscale_signal": {"branch_or_tag": "v42"},}
),
), mock_open(
"/etc/no_cfg/clusterman.json",
contents=json.dumps({"accessKeyId": "foo", "secretAccessKey": "bar", "region": "nowhere-useful",}),
):
yield
@pytest.fixture(autouse=True)
def mock_config_namespaces():
# To avoid polluting staticconf for other tests, and clear out stuff from conftest that mocks configuration
with staticconf.testing.MockConfiguration(
{}, namespace=POOL_NAMESPACE.format(pool="pool-1", scheduler="mesos"),
), staticconf.testing.MockConfiguration(
{}, namespace=POOL_NAMESPACE.format(pool="pool-2", scheduler="mesos"),
), staticconf.testing.MockConfiguration(
{
"clusters": {"cluster-A": {"mesos_url_api": "service.leader", "aws_region": "us-test-3",},},
"aws": {"access_key_file": "/etc/no_cfg/clusterman.json",},
},
namespace=staticconf.config.DEFAULT,
):
yield
@pytest.mark.parametrize(
"cluster,pool,scheduler,tag",
[
("cluster-A", "pool-1", "mesos", None),
("cluster-A", "pool-2", "mesos", "v52"),
("cluster-A", "pool-2", "kubernetes", None),
("cluster-A", None, "mesos", None),
],
)
def test_setup_config_cluster(cluster, pool, scheduler, tag, mock_config_files):
args = argparse.Namespace(
env_config_path="/nail/etc/config.yaml",
cluster=cluster,
pool=pool,
scheduler=scheduler,
signals_branch_or_tag=tag,
)
with mock.patch("clusterman.config.load_cluster_pool_config", autospec=True,) as mock_pool_load, mock.patch(
"clusterman.config._load_module_configs",
) as mock_load_module_configs:
config.setup_config(args)
assert mock_load_module_configs.call_args == mock.call("/nail/etc/config.yaml")
assert staticconf.read_string("aws.region") == "us-test-3"
if pool:
assert mock_pool_load.call_args == mock.call(cluster, pool, scheduler, tag)
else:
assert mock_pool_load.call_count == 0
if tag:
assert staticconf.read_string("autoscale_signal.branch_or_tag") == tag
def test_setup_config_region_and_cluster():
args = argparse.Namespace(env_config_path="/nail/etc/config.yaml", cluster="foo", aws_region="bar",)
with mock.patch("clusterman.config._load_module_configs"), pytest.raises(argparse.ArgumentError):
config.setup_config(args)
@mock.patch("clusterman.config._load_module_configs")
def test_setup_config_region(mock_load_module_configs, mock_config_files):
args = argparse.Namespace(env_config_path="/nail/etc/config.yaml", aws_region="fake-region-A",)
config.setup_config(args)
assert staticconf.read_string("aws.region") == "fake-region-A"
assert mock_load_module_configs.call_args == mock.call("/nail/etc/config.yaml")
@pytest.mark.parametrize("cluster,pool,pool_other_config", [("cluster-B", "pool-1", 200)])
def test_load_cluster_pool_config(cluster, pool, pool_other_config, mock_config_files):
config.load_cluster_pool_config(cluster, pool, "mesos", None)
pool_namespace = POOL_NAMESPACE.format(pool=pool, scheduler="mesos")
assert staticconf.read_int("other_config", namespace=pool_namespace) == pool_other_config
assert staticconf.read_string("resource_groups", namespace=pool_namespace) == cluster
|
mayan/apps/acls/permissions.py | eshbeata/open-paperless | 2,743 | 42648 | <filename>mayan/apps/acls/permissions.py
from __future__ import absolute_import, unicode_literals
from django.utils.translation import ugettext_lazy as _
from permissions import PermissionNamespace
namespace = PermissionNamespace('acls', _('Access control lists'))
permission_acl_edit = namespace.add_permission(
name='acl_edit', label=_('Edit ACLs')
)
permission_acl_view = namespace.add_permission(
name='acl_view', label=_('View ACLs')
)
|
src/orders/tests/order_content_type_replacement/tests_order_item.py | iNerV/education-backend | 151 | 42660 | <reponame>iNerV/education-backend<filename>src/orders/tests/order_content_type_replacement/tests_order_item.py<gh_stars>100-1000
import pytest
pytestmark = [pytest.mark.django_db]
def test_order_without_items(order):
order = order()
assert order.item is None
def test_order_with_record(order, record):
order = order(record=record)
assert order.item == record
def test_order_with_course(order, course):
order = order(course=course)
assert order.item == course
def test_order_with_bundle(order, bundle):
order = order(bundle=bundle)
assert order.item == bundle
|
codigo/Live102/exemplo_2.py | cassiasamp/live-de-python | 572 | 42689 |
from expects import expect, contain, be_an
class Bacon:
...
sanduiche = 'sanduiche com queijo'
expect(sanduiche).to(contain('queijo'))
expect(sanduiche).to_not(be_an(Bacon))
|
tartiflette/language/ast/base.py | remorses/tartiflette-whl | 530 | 42736 | <gh_stars>100-1000
__all__ = (
"Node",
"DefinitionNode",
"ExecutableDefinitionNode",
"TypeSystemDefinitionNode",
"TypeSystemExtensionNode",
"TypeDefinitionNode",
"TypeExtensionNode",
"SelectionNode",
"ValueNode",
"TypeNode",
)
class Node:
__slots__ = ()
class DefinitionNode(Node):
__slots__ = ()
class ExecutableDefinitionNode(DefinitionNode):
__slots__ = ()
class TypeSystemDefinitionNode(DefinitionNode):
__slots__ = ()
class TypeSystemExtensionNode(DefinitionNode):
__slots__ = ()
class TypeDefinitionNode(TypeSystemDefinitionNode):
__slots__ = ()
class TypeExtensionNode(TypeSystemExtensionNode):
__slots__ = ()
class SelectionNode(Node):
__slots__ = ()
class ValueNode(Node):
__slots__ = ()
class TypeNode(Node):
__slots__ = ()
|
Chapter08/transformers_textgen.py | arifmudi/Advanced-Deep-Learning-with-Python | 107 | 42752 | <reponame>arifmudi/Advanced-Deep-Learning-with-Python
import torch
from transformers import TransfoXLLMHeadModel, TransfoXLTokenizer
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Instantiate pre-trained model-specific tokenizer and the model itself
tokenizer = TransfoXLTokenizer.from_pretrained('transfo-xl-wt103')
model = TransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103').to(device)
# Initial input sequence
text = "The company was founded in"
tokens_tensor = \
torch.tensor(tokenizer.encode(text)) \
.unsqueeze(0) \
.to(device)
mems = None # recurrence mechanism
predicted_tokens = list()
for i in range(50): # stop at 50 predicted tokens
# Generate predictions
predictions, mems = model(tokens_tensor, mems=mems)
# Get most probable word index
predicted_index = torch.topk(predictions[0, -1, :], 1)[1]
# Extract the word from the index
predicted_token = tokenizer.decode(predicted_index)
# break if [EOS] reached
if predicted_token == tokenizer.eos_token:
break
# Store the current token
predicted_tokens.append(predicted_token)
# Append new token to the existing sequence
tokens_tensor = torch.cat((tokens_tensor, predicted_index.unsqueeze(1)), dim=1)
print('Initial sequence: ' + text)
print('Predicted output: ' + " ".join(predicted_tokens))
|
nets/vgg.py | bubbliiiing/faster-rcnn-tf2 | 133 | 42760 | from tensorflow.keras.layers import (Conv2D, Dense, Flatten, MaxPooling2D,
TimeDistributed)
def VGG16(inputs):
x = Conv2D(64,(3,3),activation = 'relu',padding = 'same',name = 'block1_conv1')(inputs)
x = Conv2D(64,(3,3),activation = 'relu',padding = 'same', name = 'block1_conv2')(x)
x = MaxPooling2D((2,2), strides = (2,2), name = 'block1_pool')(x)
x = Conv2D(128,(3,3),activation = 'relu',padding = 'same',name = 'block2_conv1')(x)
x = Conv2D(128,(3,3),activation = 'relu',padding = 'same',name = 'block2_conv2')(x)
x = MaxPooling2D((2,2),strides = (2,2), name = 'block2_pool')(x)
x = Conv2D(256,(3,3),activation = 'relu',padding = 'same',name = 'block3_conv1')(x)
x = Conv2D(256,(3,3),activation = 'relu',padding = 'same',name = 'block3_conv2')(x)
x = Conv2D(256,(3,3),activation = 'relu',padding = 'same',name = 'block3_conv3')(x)
x = MaxPooling2D((2,2),strides = (2,2), name = 'block3_pool')(x)
# 第四个卷积部分
# 14,14,512
x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block4_conv1')(x)
x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block4_conv2')(x)
x = Conv2D(512,(3,3),activation = 'relu',padding = 'same', name = 'block4_conv3')(x)
x = MaxPooling2D((2,2),strides = (2,2), name = 'block4_pool')(x)
# 第五个卷积部分
# 7,7,512
x = Conv2D(512,(3,3),activation = 'relu', padding = 'same', name = 'block5_conv1')(x)
x = Conv2D(512,(3,3),activation = 'relu', padding = 'same', name = 'block5_conv2')(x)
x = Conv2D(512,(3,3),activation = 'relu', padding = 'same', name = 'block5_conv3')(x)
return x
def vgg_classifier_layers(x):
# num_rois, 14, 14, 1024 -> num_rois, 7, 7, 2048
x = TimeDistributed(Flatten(name='flatten'))(x)
x = TimeDistributed(Dense(4096, activation='relu'), name='fc1')(x)
x = TimeDistributed(Dense(4096, activation='relu'), name='fc2')(x)
return x
|
notebooks-text-format/vdvae_jax_cifar_demo.py | arpitvaghela/probml-notebooks | 166 | 42804 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/always-newbie161/probml-notebooks/blob/jax_vdvae/notebooks/vdvae_jax_cifar_demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="cTSe7I6g45v8"
# This notebook shows demo working with vdvae in jax and the code used is from [vdvae-jax](https://github.com/j-towns/vdvae-jax) from [<NAME>](https://j-towns.github.io/)
# + [markdown] id="PxtpxTPEMS4C"
# ## Setup
# + id="ipHVirxUHTDJ"
from google.colab import auth
auth.authenticate_user()
# + colab={"base_uri": "https://localhost:8080/"} id="Z6gM2ytSHnO0" outputId="3e63de9d-6808-4cd9-eb1f-08996a6a7fed"
project_id = 'probml'
# !gcloud config set project {project_id}
# + id="a3__DVx74sso" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="579bc832-9028-49f3-c164-c426d32f66a6"
'''
this should be the format of the checkpoint filetree:
checkpoint_path >> model(optimizer)_checkpoint_file.
checkpoint_path_ema >> ema_checkpoint_file
'''
checkpoint_path='/content/vdvae_cifar10_2.86/latest_cifar10'
# checkpoints are downloaded at these paths.
# vdvae_cifar10_2.86/latest_cifar10 - optimizer+mode
# vdvae_cifar10_2.86/latest_cifar10_ema - ema_params'
# + id="4_RnWXhwIV85" colab={"base_uri": "https://localhost:8080/"} cellView="form" outputId="de8dedaf-bdd3-4fb7-99ee-7cfe96229d1c"
#@title Download checkpoints
# !gsutil cp -r gs://gsoc_bucket/vdvae_cifar10_2.86 ./
# !ls -l /content/vdvae_cifar10_2.86/latest_cifar10
# !ls -l /content/vdvae_cifar10_2.86/latest_cifar10_ema
# + colab={"base_uri": "https://localhost:8080/"} id="z3fThb8PIYHG" outputId="8406f5b2-cb50-42f5-aa78-4dc4f85afb02"
# !git clone https://github.com/j-towns/vdvae-jax.git
# + colab={"base_uri": "https://localhost:8080/"} id="053XPypoMobJ" outputId="0e415f07-00a4-4815-c2c5-288236ac2c98"
# %cd vdvae-jax
# + colab={"base_uri": "https://localhost:8080/"} id="X1hY6VqmNApP" outputId="41014f01-32bf-4377-85e5-e18328d2161a"
# !pip install --quiet flax
# + id="y013geSvWQUg"
import os
try:
os.environ['COLAB_TPU_ADDR']
import jax.tools.colab_tpu
jax.tools.colab_tpu.setup_tpu()
except:
pass
# + colab={"base_uri": "https://localhost:8080/"} id="XDzBF1uZXOlu" outputId="929c368c-4610-49b0-bc94-76b891bc9b0e"
import jax
jax.local_devices()
# + [markdown] id="KrFas8alNwJ0"
# ## Model
# (for cifar10)
# + [markdown] id="4Mr89HhnTbaF"
# ### Setting up hyperparams
# + id="B0QZ6aKoP08z"
from hps import HPARAMS_REGISTRY, Hyperparams, add_vae_arguments
from train_helpers import setup_save_dirs
import argparse
import dataclasses
H = Hyperparams()
parser = argparse.ArgumentParser()
parser = add_vae_arguments(parser)
parser.set_defaults(hps= 'cifar10',conv_precision='highest')
H = dataclasses.replace(H, **vars(parser.parse_args([])))
hparam_sets = [x for x in H.hps.split(',') if x]
for hp_set in hparam_sets:
hps = HPARAMS_REGISTRY[hp_set]
parser.set_defaults(**hps)
H = dataclasses.replace(H, **vars(parser.parse_args([])))
H = setup_save_dirs(H)
# + [markdown] id="NisrtOPlfmef"
# This model is a hierarchical model with multiple stochastic blocks with multiple deterministic layers. You can know about model skeleton by observing the encoder and decoder "strings"
#
# **How to understand the string:**
# * blocks are comma seperated
# * `axb` implies there are `b` res blocks(set of Conv layers) for dimensions `axa`
# * `amb` implies it is a mixin block which increases the inter-image dims from `a` to `b` using **nearest neighbour upsampling** (used in decoder)
# * `adb` implies it's a block with avg-pooling layer which reduces the dims from `a` to `b`(used in encoder)
#
# for more understanding refer to this [paper](https://arxiv.org/abs/2011.10650)
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="-OyvG1KbP2qT" outputId="bc0a16e1-0cbb-4951-c5ef-e8310bc9deb4"
hparams = dataclasses.asdict(H)
for k in ['enc_blocks','dec_blocks','zdim','n_batch','device_count']:
print(f'{k}:{hparams[k]}')
# + id="FGD3wwRxvF3Y"
from utils import logger
from jax.interpreters.xla import DeviceArray
log = logger(H.logdir)
if H.log_wandb:
import wandb
def logprint(*args, pprint=False, **kwargs):
if len(args) > 0: log(*args)
wandb.log({k: np.array(x) if type(x) is DeviceArray else x for k, x in kwargs.items()})
wandb.init(config=dataclasses.asdict(H))
else:
logprint = log
# + colab={"base_uri": "https://localhost:8080/"} id="cABtXQvqSG2Z" outputId="2c43dea8-4c53-44cc-dd91-0c7577d07a7e"
import numpy as np
from jax import lax
import torch
import imageio
from PIL import Image
import glob
from torch.utils.data import DataLoader
from torchvision import transforms
np.random.seed(H.seed)
torch.manual_seed(H.seed)
H = dataclasses.replace(
H,
conv_precision = {'default': lax.Precision.DEFAULT,
'high': lax.Precision.HIGH,
'highest': lax.Precision.HIGHEST}[H.conv_precision],
seed_init =H.seed,
seed_sample=H.seed + 1,
seed_train =H.seed + 2 + H.host_id,
seed_eval =H.seed + 2 + H.host_count + H.host_id,
)
print('training model on ', H.dataset)
# + [markdown] id="Gs8bNNXpTMxZ"
# ### Downloading cifar10 dataset
# + colab={"base_uri": "https://localhost:8080/"} id="4An20_C-SvCT" outputId="023f5c9a-87fd-4ad8-abc3-0945b9fe4374"
# !./setup_cifar10.sh
# + [markdown] id="Js-LK-vojdSw"
# ### Setting up the model, data and the preprocess fn.
# + id="AylLXttfTSca"
from data import set_up_data
H, data_train, data_valid_or_test, preprocess_fn = set_up_data(H)
# + colab={"base_uri": "https://localhost:8080/"} id="GWsr1xszZ_te" outputId="a5ba8d4e-b088-46ec-ac31-b4fbd250618d"
from train_helpers import load_vaes
H = dataclasses.replace(H, restore_path=checkpoint_path)
optimizer, ema_params, start_epoch = load_vaes(H, logprint)
# + colab={"base_uri": "https://localhost:8080/"} id="PEH8BtbmaK4O" outputId="f32e3fa2-746e-404b-bbae-aaca80078568"
start_epoch # no.of.epochs trained
# + colab={"base_uri": "https://localhost:8080/"} id="9nAJ3EGLICEh" outputId="6a47c0b6-aaf0-45a3-8a1c-b0c6bb6b3d40"
# Hparams for the current model
hparams = dataclasses.asdict(H)
for i, k in enumerate(sorted(hparams)):
logprint(f'type=hparam, key={k}, value={getattr(H, k)}')
# + [markdown] id="HS2o9uFqjgyv"
# ### Evaluation
# + colab={"base_uri": "https://localhost:8080/"} id="jhiF_NjEuWQv" outputId="b0d88a47-5af0-4452-d1c0-88d90ef1a71e"
from train import run_test_eval
run_test_eval(H, ema_params, data_valid_or_test, preprocess_fn, logprint)
# + [markdown] id="tppWoc_hypdn"
# ### Function to save and show of batch of images given as a numpy array.
#
#
# + id="AJbKzeuzzGcS"
def zoom_in(fname, shape):
im = Image.open(fname)
resized_im = im.resize(shape)
resized_im.save(fname)
def save_n_show(images, order, image_shape, fname, zoom=True, show=False):
n_rows, n_images = order
im = images.reshape((n_rows, n_images, *image_shape))\
.transpose([0, 2, 1, 3, 4])\
.reshape([n_rows * image_shape[0],
n_images * image_shape[1], 3])
print(f'printing samples to {fname}')
imageio.imwrite(fname, im)
if zoom:
zoom_in(fname, (640, 64)) # w=640, h=64
if show:
display(Image.open(fname))
# + [markdown] id="9TlNptkdd5ME"
# ## Generations
# + id="EcnvaTn3iJfo"
n_images = 10
num_temperatures = 3
image_shape = [H.image_size,H.image_size,H.image_channels]
H = dataclasses.replace(H, num_images_visualize=n_images, num_temperatures_visualize=num_temperatures)
# + [markdown] id="LDHUzIgBbjuX"
# Images will be saved in the following dir
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="EhJ17q1dfSNu" outputId="fb923dee-dc4d-4e68-e2c5-20f3f41874c1"
H.save_dir
# + [markdown] id="Xm_BYJYjiuzt"
# As the model params are replicated over multiple devices, unreplicated copy of them is made to use it for sampling and generations.
# + id="VJbqZRxWilR9"
from jax import random
from vae import VAE
from flax import jax_utils
from functools import partial
rng = random.PRNGKey(H.seed_sample)
ema_apply = partial(VAE(H).apply,{'params': jax_utils.unreplicate(ema_params)})
forward_uncond_samples = partial(ema_apply, method=VAE(H).forward_uncond_samples)
# + colab={"base_uri": "https://localhost:8080/"} id="XF5dvNqeRcIC" outputId="477884a0-d016-43c3-96ac-26b3cfd65d55"
temperatures = [1.0, 0.9, 0.8, 0.7]
for t in temperatures[:H.num_temperatures_visualize]:
im = forward_uncond_samples(n_images, rng, t=t)
im = np.asarray(im)
save_n_show(im, [1,n_images], image_shape, f'{H.save_dir}/generations-tem-{t}.png')
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="RdypV3PJfyfN" outputId="bc5042cf-54c7-4380-e2f2-d36ab4951d65"
for t in temperatures[:H.num_temperatures_visualize]:
print("="*25)
print(f"Generation of {n_images} new images for t={t}")
print("="*25)
fname = f'{H.save_dir}/generations-tem-{t}.png'
display(Image.open(fname))
# + [markdown] id="89M1-l8Ogd2k"
# ## Reconstructions
# + id="014yXaJfgfhq"
n_images = 10
image_shape = [H.image_size,H.image_size,H.image_channels]
# + [markdown] id="z5xtClDEYTI-"
# Preprocessing images before getting the latents
# + id="81EExYe0glPu"
from train import get_sample_for_visualization
viz_batch_original, viz_batch_processed = get_sample_for_visualization(
data_valid_or_test, preprocess_fn, n_images, H.dataset)
# + [markdown] id="eDENCERSiMm6"
# Getting the partial functions from the model methods
# + id="vPpzIoM_hQHK"
forward_get_latents = partial(ema_apply, method=VAE(H).forward_get_latents)
forward_samples_set_latents = partial(
ema_apply, method=VAE(H).forward_samples_set_latents)
# + [markdown] id="AnNFN7S7YZe1"
# Getting latents of different levels.
# + id="nt2_Zjqlha1U"
zs = [s['z'] for s in forward_get_latents(viz_batch_processed, rng)]
# + [markdown] id="7RA8e6qJYcqF"
# No of latent observations used depends on `H.num_variables_visualize `, altering it gives different resolutions of the reconstructions.
# + id="ThgwoF6ihe9e"
recons = []
lv_points = np.floor(np.linspace(0, 1, H.num_variables_visualize + 2) * len(zs)).astype(int)[1:-1]
for i in lv_points:
recons.append(forward_samples_set_latents(n_images, zs[:i], rng, t=0.1))
# + [markdown] id="iawVwy7XYp9Z"
# Original Images
# + colab={"base_uri": "https://localhost:8080/", "height": 115} id="ih0D1sfRhy6F" outputId="8696bbaf-2a7c-4d89-9d7d-ebea19d37e7a"
orig_im = np.array(viz_batch_original)
print("Original test images")
save_n_show(orig_im, [1, n_images], image_shape, f'{H.save_dir}/orig_test.png', show=True)
# + [markdown] id="vbFgprJuYr7R"
# Reconstructions.
# + colab={"base_uri": "https://localhost:8080/", "height": 809} id="Ol7rNCgfh57R" outputId="e8d562cf-206e-42ae-a84b-5a5fd02489e8"
for i,r in enumerate(recons):
r = np.array(r)
print("="*25)
print(f"Generation of {n_images} new images for {i+1}x resolution")
print("="*25)
fname = f'{H.save_dir}/recon_test-res-{i+1}x.png'
save_n_show(r, [1, n_images], image_shape, fname, show=True)
|
kivy/tests/pyinstaller/video_widget/main.py | Galland/kivy | 13,889 | 42807 | <reponame>Galland/kivy
from project import VideoApp
if __name__ == '__main__':
from kivy.core.video import Video
assert Video is not None
VideoApp().run()
|
scripts/ln_jnas_subset.py | nameless-writer/become-yukarin | 562 | 42817 | <gh_stars>100-1000
import argparse
import multiprocessing
from pathlib import Path
from jnas_metadata_loader import load_from_directory
from jnas_metadata_loader.jnas_metadata import JnasMetadata
parser = argparse.ArgumentParser()
parser.add_argument('jnas', type=Path)
parser.add_argument('output', type=Path)
parser.add_argument('--format', default='{sex}{text_id}_{mic}_atr_{subset}{sen_id}.wav')
argument = parser.parse_args()
jnas = argument.jnas # type: Path
output = argument.output # type: Path
jnas_list = load_from_directory(str(jnas))
atr_list = jnas_list.subset_news_or_atr('B')
output.mkdir(exist_ok=True)
def process(d: JnasMetadata):
p = d.path
out = output / argument.format.format(**d._asdict())
out.symlink_to(p)
pool = multiprocessing.Pool()
pool.map(process, atr_list)
|
speedtest/python3/speedtest.py | guyue/google-diff-match-patch | 304 | 42819 | #!/usr/bin/python3
#
# Copyright 2010 Google Inc.
# All Rights Reserved.
"""Diff Speed Test
"""
__author__ = "<EMAIL> (<NAME>)"
import imp
import gc
import sys
import time
import diff_match_patch as dmp_module
# Force a module reload. Allows one to edit the DMP module and rerun the test
# without leaving the Python interpreter.
imp.reload(dmp_module)
def main():
text1 = open("speedtest1.txt").read()
text2 = open("speedtest2.txt").read()
dmp = dmp_module.diff_match_patch()
dmp.Diff_Timeout = 0.0
# Execute one reverse diff as a warmup.
dmp.diff_main(text2, text1, False)
gc.collect()
start_time = time.time()
dmp.diff_main(text1, text2, False)
end_time = time.time()
print("Elapsed time: %f" % (end_time - start_time))
if __name__ == "__main__":
main()
|
chap8/mxnet/benchmark_model.py | wang420349864/dlcv_for_beginners | 1,424 | 42826 | import time
import mxnet as mx
benchmark_dataiter = mx.io.ImageRecordIter(
path_imgrec="../data/test.rec",
data_shape=(1, 28, 28),
batch_size=64,
mean_r=128,
scale=0.00390625,
)
mod = mx.mod.Module.load('mnist_lenet', 35, context=mx.gpu(2))
mod.bind(
data_shapes=benchmark_dataiter.provide_data,
label_shapes=benchmark_dataiter.provide_label,
for_training=False)
start = time.time()
for i, batch in enumerate(benchmark_dataiter):
mod.forward(batch)
time_elapsed = time.time() - start
msg = '{} batches iterated!\nAverage forward time per batch: {:.6f} ms'
print(msg.format(i+1, 1000*time_elapsed/float(i)))
|
www/apis.py | yumaojun03/blog-python-app | 200 | 42842 | <reponame>yumaojun03/blog-python-app
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
实现以Json数据格式进行交换的RESTful API
设计原因:
由于API就是把Web App的功能全部封装了,所以,通过API操作数据,
可以极大地把前端和后端的代码隔离,使得后端代码易于测试,
前端代码编写更简单
实现方式:
一个API也是一个URL的处理函数,我们希望能直接通过一个@api来
把函数变成JSON格式的REST API, 因此我们需要实现一个装饰器,
由该装饰器将 函数返回的数据 处理成 json 格式
"""
import json
import logging
import functools
from transwarp.web import ctx
def dumps(obj):
"""
Serialize ``obj`` to a JSON formatted ``str``.
序列化对象
"""
return json.dumps(obj)
class APIError(StandardError):
"""
the base APIError which contains error(required), data(optional) and message(optional).
存储所有API 异常对象的数据
"""
def __init__(self, error, data='', message=''):
super(APIError, self).__init__(message)
self.error = error
self.data = data
self.message = message
class APIValueError(APIError):
"""
Indicate the input value has error or invalid. The data specifies the error field of input form.
输入不合法 异常对象
"""
def __init__(self, field, message=''):
super(APIValueError, self).__init__('value:invalid', field, message)
class APIResourceNotFoundError(APIError):
"""
Indicate the resource was not found. The data specifies the resource name.
资源未找到 异常对象
"""
def __init__(self, field, message=''):
super(APIResourceNotFoundError, self).__init__('value:notfound', field, message)
class APIPermissionError(APIError):
"""
Indicate the api has no permission.
权限 异常对象
"""
def __init__(self, message=''):
super(APIPermissionError, self).__init__('permission:forbidden', 'permission', message)
def api(func):
"""
A decorator that makes a function to json api, makes the return value as json.
将函数返回结果 转换成json 的装饰器
@api需要对Error进行处理。我们定义一个APIError,
这种Error是指API调用时发生了逻辑错误(比如用户不存在)
其他的Error视为Bug,返回的错误代码为internalerror
@app.route('/api/test')
@api
def api_test():
return dict(result='123', items=[])
"""
@functools.wraps(func)
def _wrapper(*args, **kw):
try:
r = dumps(func(*args, **kw))
except APIError, e:
r = json.dumps(dict(error=e.error, data=e.data, message=e.message))
except Exception, e:
logging.exception(e)
r = json.dumps(dict(error='internalerror', data=e.__class__.__name__, message=e.message))
ctx.response.content_type = 'application/json'
return r
return _wrapper
if __name__ == '__main__':
import doctest
doctest.testmod()
|
tools/bin/pythonSrc/pychecker-0.8.18/test_input/test25.py | YangHao666666/hawq | 450 | 42859 | 'doc'
import sys
class A:
'doc'
z = 1
def x(self): pass
def xxx():
print A.x()
print A.z
print A.a
print A.y()
print sys.lkjsdflksjasdlf
|
vakt/storage/sql/__init__.py | chuxuantinh/vakt | 132 | 42863 | <filename>vakt/storage/sql/__init__.py
"""
SQL Storage for Policies.
"""
import logging
from sqlalchemy import and_, or_, literal, func
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm.exc import FlushError
from .model import PolicyModel, PolicyActionModel, PolicyResourceModel, PolicySubjectModel
from ..abc import Storage
from ...checker import StringExactChecker, StringFuzzyChecker, RegexChecker, RulesChecker
from ...exceptions import PolicyExistsError, UnknownCheckerType
from ...policy import TYPE_STRING_BASED, TYPE_RULE_BASED
log = logging.getLogger(__name__)
class SQLStorage(Storage):
"""Stores all policies in SQL Database"""
def __init__(self, scoped_session):
"""
Initialize SQL Storage
:param scoped_session: SQL Alchemy scoped session
"""
self.session = scoped_session
self.dialect = self.session.bind.engine.dialect.name
def add(self, policy):
try:
policy_model = PolicyModel.from_policy(policy)
self.session.add(policy_model)
self.session.commit()
except IntegrityError:
self.session.rollback()
log.error('Error trying to create already existing policy with UID=%s.', policy.uid)
raise PolicyExistsError(policy.uid)
# todo - figure out why FlushError is raised instead of IntegrityError on PyPy tests
except FlushError as e:
if 'conflicts with persistent instance' in str(e):
self.session.rollback()
log.error('Error trying to create already existing policy with UID=%s.', policy.uid)
raise PolicyExistsError(policy.uid)
log.info('Added Policy: %s', policy)
def get(self, uid):
policy_model = self.session.query(PolicyModel).get(uid)
if not policy_model:
return None
return policy_model.to_policy()
def get_all(self, limit, offset):
self._check_limit_and_offset(limit, offset)
cur = self.session.query(PolicyModel).order_by(PolicyModel.uid.asc()).slice(offset, offset + limit)
for policy_model in cur:
yield policy_model.to_policy()
def find_for_inquiry(self, inquiry, checker=None):
cur = self._get_filtered_cursor(inquiry, checker)
for policy_model in cur:
yield policy_model.to_policy()
def update(self, policy):
try:
policy_model = self.session.query(PolicyModel).get(policy.uid)
if not policy_model:
return
policy_model.update(policy)
self.session.commit()
except IntegrityError:
self.session.rollback()
raise
log.info('Updated Policy with UID=%s. New value is: %s', policy.uid, policy)
def delete(self, uid):
self.session.query(PolicyModel).filter(PolicyModel.uid == uid).delete()
log.info('Deleted Policy with UID=%s.', uid)
def _get_filtered_cursor(self, inquiry, checker):
"""
Returns cursor with proper query-filter based on the checker type.
"""
cur = self.session.query(PolicyModel)
if isinstance(checker, StringFuzzyChecker):
return cur.filter(
PolicyModel.type == TYPE_STRING_BASED,
PolicyModel.actions.any(PolicyActionModel.action_string.like('%{}%'.format(inquiry.action))),
PolicyModel.resources.any(PolicyResourceModel.resource_string.like('%{}%'.format(inquiry.resource))),
PolicyModel.subjects.any(PolicySubjectModel.subject_string.like('%{}%'.format(inquiry.subject))))
elif isinstance(checker, StringExactChecker):
return cur.filter(
PolicyModel.type == TYPE_STRING_BASED,
PolicyModel.actions.any(PolicyActionModel.action_string == inquiry.action),
PolicyModel.resources.any(PolicyResourceModel.resource_string == inquiry.resource),
PolicyModel.subjects.any(PolicySubjectModel.subject_string == inquiry.subject))
elif isinstance(checker, RegexChecker):
if not self._supports_regex_operator():
return cur.filter(PolicyModel.type == TYPE_STRING_BASED)
return cur.filter(
PolicyModel.type == TYPE_STRING_BASED,
PolicyModel.actions.any(
or_(
and_(PolicyActionModel.action_regex.is_(None),
PolicyActionModel.action_string == inquiry.action),
and_(PolicyActionModel.action_regex.isnot(None),
self._regex_operation(inquiry.action, PolicyActionModel.action_regex))
),
),
PolicyModel.resources.any(
or_(
and_(PolicyResourceModel.resource_regex.is_(None),
PolicyResourceModel.resource_string == inquiry.resource),
and_(PolicyResourceModel.resource_regex.isnot(None),
self._regex_operation(inquiry.resource, PolicyResourceModel.resource_regex))
),
),
PolicyModel.subjects.any(
or_(
and_(PolicySubjectModel.subject_regex.is_(None),
PolicySubjectModel.subject_string == inquiry.subject),
and_(PolicySubjectModel.subject_regex.isnot(None),
self._regex_operation(inquiry.subject, PolicySubjectModel.subject_regex))
),
)
)
elif isinstance(checker, RulesChecker):
return cur.filter(PolicyModel.type == TYPE_RULE_BASED)
elif not checker:
return cur
else:
log.error('Provided Checker type is not supported.')
raise UnknownCheckerType(checker)
def _supports_regex_operator(self):
"""
Does database support regex operator?
"""
return self.dialect in ['mysql', 'postgresql', 'oracle']
def _regex_operation(self, left, right):
"""
Get database-specific regex operation.
Don't forget to check if there is a support for regex operator before using it.
"""
if self.dialect == 'mysql':
return literal(left).op('REGEXP BINARY', is_comparison=True)(right)
elif self.dialect == 'postgresql':
return literal(left).op('~', is_comparison=True)(right)
elif self.dialect == 'oracle':
return func.REGEXP_LIKE(left, right)
return None
|
objax/functional/parallel.py | kihyuks/objax | 715 | 42868 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['partial', 'pmax', 'pmean', 'pmin', 'psum']
from functools import partial
import jax
from jax import lax
def pmax(x: jax.interpreters.pxla.ShardedDeviceArray, axis_name: str = 'device'):
"""Compute a multi-device reduce max on x over the device axis axis_name."""
return lax.pmax(x, axis_name)
def pmean(x: jax.interpreters.pxla.ShardedDeviceArray, axis_name: str = 'device'):
"""Compute a multi-device reduce mean on x over the device axis axis_name."""
return lax.pmean(x, axis_name)
def pmin(x: jax.interpreters.pxla.ShardedDeviceArray, axis_name: str = 'device'):
"""Compute a multi-device reduce min on x over the device axis axis_name."""
return lax.pmin(x, axis_name)
def psum(x: jax.interpreters.pxla.ShardedDeviceArray, axis_name: str = 'device'):
"""Compute a multi-device reduce sum on x over the device axis axis_name."""
return lax.psum(x, axis_name)
|
doubanfm/model.py | fakegit/douban.fm | 783 | 42911 | <gh_stars>100-1000
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
数据层
"""
from threading import RLock, Thread
import logging
import functools
from six.moves import queue
from doubanfm.API.api import Doubanfm
from doubanfm.API.netease_api import Netease
from doubanfm.config import db_config
logger = logging.getLogger('doubanfm')
douban = Doubanfm()
mutex = RLock()
QUEUE_SIZE = 5
class Playlist(object):
"""
播放列表, 各个方法互斥
使用方法:
playlist = Playlist()
playingsong = playlist.get_song()
获取当前播放歌曲
playingsong = playlist.get_playingsong()
"""
def __init__(self):
self._playlist = queue.Queue(QUEUE_SIZE)
self._daily_playlist = [] # 每日推荐歌曲
self._daily_playlist_index = -1 # 歌曲
self._playingsong = None
self._get_first_song()
# 根据歌曲来改变歌词
self._lrc = {}
self._pre_playingsong = None
# 会有重复的歌曲避免重复播放
self.hash_sid = {}
def lock(func):
"""
互斥锁
"""
@functools.wraps(func)
def _func(*args, **kwargs):
mutex.acquire()
try:
return func(*args, **kwargs)
finally:
mutex.release()
return _func
def _watchdog(self):
"""
更新队列线程
"""
sid = self._playingsong['sid']
while 1:
# 本次播放里去重
while 1:
song = douban.get_song(sid)
if not song:
continue
sid = song['sid']
if sid not in self.hash_sid:
break
self._playlist.put(song)
if not self._playingsong:
self._playlist.get(False)
@lock
def _get_first_song(self):
song = douban.get_first_song()
if song:
self._playlist.put(song)
self._playingsong = song
Thread(target=self._watchdog).start()
def get_lrc(self):
"""
返回当前播放歌曲歌词
"""
if self._playingsong != self._pre_playingsong:
self._lrc = douban.get_lrc(self._playingsong)
self._pre_playingsong = self._playingsong
return self._lrc
def set_channel(self, channel_num):
"""
设置api发送的FM频道
:params channel_num: channel_list的索引值 int
"""
douban.set_channel(channel_num)
self.empty()
if channel_num != 2:
self._get_first_song()
def set_song_like(self, playingsong):
douban.rate_music(playingsong['sid'])
def set_song_unlike(self, playingsong):
douban.unrate_music(playingsong['sid'])
def get_daily_songs(self):
"""
获取每日推荐歌曲
"""
self._daily_playlist = douban.get_daily_songs()
# 加入索引
for index, i in enumerate(self._daily_playlist):
i['title'] = str(index + 1) + '/' + str(len(self._daily_playlist)) + ' ' + i['title']
def get_daily_song(self, netease=False):
"""
获取单个歌曲
"""
if not self._daily_playlist:
self.get_daily_songs()
self._daily_playlist_index = 0
else:
self._daily_playlist_index = (self._daily_playlist_index + 1) % len(self._daily_playlist)
song = self._daily_playlist[self._daily_playlist_index]
song['index'] = self._daily_playlist_index
self.get_netease_song(song, netease) # 判断是否网易320k
self._playingsong = song
return song
@lock
def bye(self):
"""
不再播放, 返回新列表
"""
douban.bye(self._playingsong['sid'])
@lock
def get_song(self, netease=False):
"""
获取歌曲, 对外统一接口
"""
song = self._playlist.get(True)
self.hash_sid[song['sid']] = True # 去重
self.get_netease_song(song, netease) # 判断是否网易320k
self._playingsong = song
return song
def get_netease_song(self, song, netease):
# 网易320k音乐
if netease:
url, kbps = Netease().get_url_and_bitrate(song['title'])
if url and kbps:
song['url'], song['kbps'] = url, kbps
@lock
def get_playingsong(self):
return self._playingsong
@lock
def empty(self):
"""
清空playlist
"""
self._playingsong = None
self._playlist = queue.Queue(QUEUE_SIZE)
def submit_music(self, playingsong):
douban.submit_music(playingsong['sid'])
class History(object):
def __init__(self):
db_config.history
class Channel(object):
def __init__(self):
self.lines = douban.channels
|
froide/campaign/listeners.py | xenein/froide | 198 | 42920 | <gh_stars>100-1000
from .utils import connect_foirequest
def connect_campaign(sender, **kwargs):
reference = kwargs.get("reference")
if not reference:
return
if "@" in reference:
parts = reference.split("@", 1)
else:
parts = reference.split(":", 1)
if len(parts) != 2:
return
namespace = parts[0]
connect_foirequest(sender, namespace)
|
rayleigh/searchable_collection.py | mgsh/rayleigh | 185 | 42921 | """
Methods to search an ImageCollection with brute force, exhaustive search.
"""
import cgi
import abc
import cPickle
import numpy as np
from sklearn.decomposition import PCA
from sklearn.metrics.pairwise import \
manhattan_distances, euclidean_distances, additive_chi2_kernel
import pyflann
from scipy.spatial import cKDTree
import util
from image import Image
from rayleigh.util import TicToc
tt = TicToc()
class SearchableImageCollection(object):
"""
Initialize with a rayleigh.ImageCollection, a distance_metric, and the
number of dimensions to reduce the histograms to.
Parameters
----------
image_collection : rayleigh.ImageCollection
dist_metric : string
must be in self.DISTANCE_METRICS
sigma : nonnegative float
Amount of smoothing applied to histograms.
If 0, none.
num_dimensions : int
number of dimensions to reduce the histograms to, using PCA.
If 0, do not reduce dimensions.
"""
def __init__(self, image_collection, dist_metric, sigma, num_dimensions):
self.ic = image_collection
self.id_ind_map = self.ic.get_id_ind_map()
self.distance_metric = dist_metric
if self.distance_metric not in self.DISTANCE_METRICS:
raise Exception("Unsupported distance metric.")
self.num_dimensions = num_dimensions
self.hists_reduced = self.ic.get_hists()
self.sigma = sigma
if self.sigma > 0:
self.smooth_histograms()
if self.num_dimensions > 0:
self.reduce_dimensionality()
@staticmethod
def load(filename):
"""
Load ImageCollection from filename.
"""
return cPickle.load(open(filename))
def save(self, filename):
"""
Save self to filename.
"""
cPickle.dump(self, open(filename, 'w'), 2)
def smooth_histograms(self):
"""
Smooth histograms with a Gaussian.
"""
for i in range(self.hists_reduced.shape[0]):
color_hist = self.hists_reduced[i, :]
self.hists_reduced[i, :] = util.smooth_histogram(
color_hist, self.ic.palette, self.sigma)
def reduce_dimensionality(self):
"""
Compute and store PCA dimensionality-reduced histograms.
"""
tt.tic('reduce_dimensionality')
self.pca = PCA(n_components=self.num_dimensions, whiten=True)
self.pca.fit(self.hists_reduced)
self.hists_reduced = self.pca.transform(self.hists_reduced)
tt.toc('reduce_dimensionality')
def get_image_hist(self, img_id):
"""
Return the smoothed image histogram of the image with the given id.
Parameters
----------
img_id : string
Returns
-------
color_hist : ndarray
"""
img_ind = self.id_ind_map[img_id]
color_hist = self.hists_reduced[img_ind, :]
return color_hist
def search_by_image_in_dataset(self, img_id, num=20):
"""
Search images in database for similarity to the image with img_id in
the database.
See search_by_color_hist() for implementation.
Parameters
----------
img_id : string
num : int, optional
Returns
-------
query_img_data : dict
results : list
list of dicts of nearest neighbors to query
"""
query_img_data = self.ic.get_image(img_id, no_hist=True)
color_hist = self.get_image_hist(img_id)
results, time_elapsed = self.search_by_color_hist(color_hist, num, reduced=True)
return query_img_data, results, time_elapsed
def search_by_image(self, image_filename, num=20):
"""
Search images in database by color similarity to image.
See search_by_color_hist().
"""
query_img = Image(image_filename)
color_hist = util.histogram_colors_smoothed(
query_img.lab_array, self.ic.palette,
sigma=self.sigma, direct=False)
results, time_elapsed = self.search_by_color_hist(color_hist)
return query_img.as_dict(), results, time_elapsed
def search_by_color_hist(self, color_hist, num=20, reduced=False):
"""
Search images in database by color similarity to the given histogram.
Parameters
----------
color_hist : (K,) ndarray
histogram over the color palette
num : int, optional
number of nearest neighbors to ret
reduced : boolean, optional
is the given color_hist already reduced in dimensionality?
Returns
-------
query_img : dict
info about the query image
results : list
list of dicts of nearest neighbors to query
"""
if self.num_dimensions > 0 and not reduced:
color_hist = self.pca.transform(color_hist)
tt.tic('nn_ind')
nn_ind, nn_dists = self.nn_ind(color_hist, num)
time_elapsed = tt.qtoc('nn_ind')
results = []
# TODO: tone up the amount of data returned: don't need resized size,
# _id, maybe something else?
for ind, dist in zip(nn_ind, nn_dists):
img_id = self.id_ind_map[ind]
img = self.ic.get_image(img_id, no_hist=True)
img['url'] = cgi.escape(img['url'])
img['distance'] = dist
results.append(img)
return results, time_elapsed
@abc.abstractmethod
def nn_ind(self, color_hist, num):
"""
Return num closest nearest neighbors (potentially approximate) to the
query color_hist, and the distances to them.
Override this search method in extending classes.
Parameters
----------
color_hist : (K,) ndarray
histogram over the color palette
num : int
number of nearest neighbors to return.
Returns
-------
nn_ind : (num,) ndarray
Indices of the neighbors in the dataset.
nn_dists (num,) ndarray
Distances to the neighbors returned.
"""
pass
class SearchableImageCollectionExact(SearchableImageCollection):
"""
Search the image collection exhaustively (mainly through np.dot).
"""
DISTANCE_METRICS = ['manhattan', 'euclidean', 'chi_square']
def nn_ind(self, color_hist, num):
"""
Exact nearest neighbor seach through exhaustive comparison.
"""
if self.distance_metric == 'manhattan':
dists = manhattan_distances(color_hist, self.hists_reduced)
elif self.distance_metric == 'euclidean':
dists = euclidean_distances(color_hist, self.hists_reduced, squared=True)
elif self.distance_metric == 'chi_square':
dists = -additive_chi2_kernel(color_hist, self.hists_reduced)
dists = dists.flatten()
nn_ind = np.argsort(dists).flatten()[:num]
nn_dists = dists[nn_ind]
return nn_ind, nn_dists
class SearchableImageCollectionFLANN(SearchableImageCollection):
"""
Search the image collection using the FLANN library for aNN indexing.
The FLANN index is built with automatic tuning of the search algorithm,
which can take a while (~90s on 25K images).
"""
DISTANCE_METRICS = ['manhattan', 'euclidean', 'chi_square']
@staticmethod
def load(filename):
# Saving the flann object results in memory errors, so we use its own
# method to save its index in a separate file.
sic = cPickle.load(open(filename))
return sic.build_index(filename + '_flann_index')
def save(self, filename):
# See comment in load().
flann = self.flann
self.flann = None
cPickle.dump(self, open(filename, 'w'), 2)
flann.save_index(filename + '_flann_index')
self.flann = flann
def __init__(self, image_collection, distance_metric, sigma, dimensions):
super(SearchableImageCollectionFLANN, self).__init__(
image_collection, distance_metric, sigma, dimensions)
self.build_index()
def build_index(self, index_filename=None):
tt.tic('build_index')
pyflann.set_distance_type(self.distance_metric)
self.flann = pyflann.FLANN()
if index_filename:
self.flann.load_index(index_filename, self.hists_reduced)
else:
self.params = self.flann.build_index(
self.hists_reduced, algorithm='autotuned',
sample_fraction=0.3, target_precision=.8,
build_weight=0.01, memory_weight=0.)
print(self.params)
tt.toc('build_index')
return self
def nn_ind(self, color_hist, num):
nn_ind, nn_dists = self.flann.nn_index(
color_hist, num, checks=self.params['checks'])
return nn_ind.flatten(), nn_dists.flatten()
class SearchableImageCollectionCKDTree(SearchableImageCollection):
"""
Use the cKDTree data structure from scipy.spatial for the index.
Parameters:
- LEAF_SIZE (int): The number of points at which the algorithm switches
over to brute-force.
- EPS (non-negative float): Parameter for query(), such that the
k-th returned value is guaranteed to be no further than (1 + eps)
times the distance to the real k-th nearest neighbor.
NOTE: These parameters have not been tuned.
"""
DISTANCE_METRICS = ['manhattan', 'euclidean']
Ps = {'manhattan': 1, 'euclidean': 2}
LEAF_SIZE = 5
EPSILON = 1
@staticmethod
def load(filename):
return cPickle.load(open(filename)).build_index()
def __init__(self, image_collection, distance_metric, sigma, dimensions):
super(SearchableImageCollectionCKDTree, self).__init__(
image_collection, distance_metric, sigma, dimensions)
self.build_index()
def build_index(self):
tt.tic('build_index_ckdtree')
self.ckdtree = cKDTree(self.hists_reduced, self.LEAF_SIZE)
self.p = self.Ps[self.distance_metric]
tt.toc('build_index_ckdtree')
return self
def nn_ind(self, color_hist, num):
nn_dists, nn_ind = self.ckdtree.query(
color_hist, num, eps=self.EPSILON, p=self.p)
return nn_ind.flatten(), nn_dists.flatten()
|
Code/coupon_collector.py | PacktPublishing/Modern-Python-Cookbook | 107 | 42922 | <reponame>PacktPublishing/Modern-Python-Cookbook<gh_stars>100-1000
"""Python Cookbook
See
http://www.brynmawr.edu/math/people/anmyers/PAPERS/SIGEST_Coupons.pdf
and
https://en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind
and
https://en.wikipedia.org/wiki/Binomial_coefficient
"""
from math import factorial
def expected(n, population=8):
"""
What is the probability p(n, d) that exactly n boxes of cereal will
have to be purchased in order to obtain, for the first time,
a complete collection of at least one of each of the d kinds of souvenir
coupons?
.. math::
p(n, d) = \frac{d!}{d^n} \lbrace\textstyle{ n-1 \atop d-1 }\rbrace
"""
return factorial(population)/population**n * stirling2(n-1, population-1)
def binom(n, k):
"""
.. math::
\binom n k = \frac{n!}{k!\,(n-k)!} \quad \text{for }\ 0\leq k\leq n
"""
return factorial(n)/(factorial(k)*factorial(n-k))
def stirling2(n, k):
"""
The Stirling numbers of the second kind,
written S(n,k) or :math:`\lbrace\textstyle{n\atop k}\rbrace`
count the number of ways to partition a set of n labelled objects
into k nonempty unlabelled subsets.
.. math::
\lbrace\textstyle{n\atop n}\rbrace = 1 \\
\lbrace\textstyle{n\atop 1}\rbrace = 1 \\
\lbrace\textstyle{n\atop k}\rbrace = k \lbrace\textstyle{n-1 \atop k}\rbrace + \lbrace\textstyle{n-1 \atop k-1}\rbrace
Or
.. math::
\left\{ {n \atop k}\right\} = \frac{1}{k!}\sum_{j=0}^{k} (-1)^{k-j} \binom{k}{j} j^n
"""
return 1/factorial(k)*sum( (-1 if (k-j)%2 else 1)*binom(k,j)*j**n for j in range(0,k+1) )
if __name__ == "__main__":
for i in range(8,30):
print(i, expected(i, 8))
print(binom(24,12))
|
kaldi/base/__init__.py | mxmpl/pykaldi | 916 | 42930 | from ._kaldi_error import *
from ._timer import *
__all__ = [name for name in dir()
if name[0] != '_'
and not name.endswith('Base')]
|
saas/dataops/api/dataset/APP-META-PRIVATE/postrun/00_init_job.py | iuskye/SREWorks | 407 | 42963 | <filename>saas/dataops/api/dataset/APP-META-PRIVATE/postrun/00_init_job.py<gh_stars>100-1000
# coding: utf-8
from common import checker
from warehouse import entry as warehouse_entry
from pmdb import entry as pmdb_entry
from dataset import entry as dataset_entry
from health import entry as health_entry
from job import entry as job_entry
from es import entry as es_entry
checker.check_sreworks_data_service_ready()
print("======start init warehouse======")
warehouse_entry.init()
print("======end init warehouse======")
print("======start init pmdb======")
pmdb_entry.init()
print("======end init pmdb======")
print("======start init dataset======")
dataset_entry.init()
print("======end init dataset======")
print("======start init health======")
health_entry.init()
print("======end init health======")
print("======start init es======")
es_entry.init()
print("======end init es======")
print("======start init job======")
job_entry.init()
print("======end init job======")
|
tests/util_test.py | gurpradeep/securitybot | 1,053 | 42970 | <gh_stars>1000+
from unittest2 import TestCase
from datetime import datetime, timedelta
import securitybot.util as util
class VarTest(TestCase):
def test_hours(self):
assert util.OPENING_HOUR < util.CLOSING_HOUR, 'Closing hour must be after opening hour.'
class NamedTupleTest(TestCase):
def test_empty(self):
tup = util.tuple_builder()
assert tup.answer is None
assert tup.text == ''
def test_full(self):
tup = util.tuple_builder(True, 'Yes')
assert tup.answer is True
assert tup.text == 'Yes'
class BusinessHoursTest(TestCase):
def test_weekday(self):
'''Test business hours during a weekday.'''
# 18 July 2016 is a Monday. If this changes, please contact the IERS.
morning = datetime(year=2016, month=7, day=18, hour=util.OPENING_HOUR,
tzinfo=util.LOCAL_TZ)
assert util.during_business_hours(morning)
noon = datetime(year=2016, month=7, day=18, hour=12, tzinfo=util.LOCAL_TZ)
assert util.during_business_hours(noon), \
'This may fail if noon is no longer during business hours.'
afternoon = datetime(year=2016, month=7, day=18, hour=util.CLOSING_HOUR - 1,
minute=59, second=59, tzinfo=util.LOCAL_TZ)
assert util.during_business_hours(afternoon)
breakfast = datetime(year=2016, month=7, day=18, hour=util.OPENING_HOUR - 1, minute=59,
second=59, tzinfo=util.LOCAL_TZ)
assert not util.during_business_hours(breakfast)
supper = datetime(year=2016, month=7, day=18, hour=util.CLOSING_HOUR,
tzinfo=util.LOCAL_TZ)
assert not util.during_business_hours(supper)
def test_weekend(self):
'''Test "business hours" during a weekend.'''
# As such, 17 July 2016 is a Sunday.
sunday_morning = datetime(year=2016, month=7, day=17, hour=util.OPENING_HOUR,
tzinfo=util.LOCAL_TZ)
assert not util.during_business_hours(sunday_morning)
class ExpirationTimeTest(TestCase):
def test_same_day(self):
'''Test time delta within the same day.'''
date = datetime(year=2016, month=7, day=18, hour=util.OPENING_HOUR, tzinfo=util.LOCAL_TZ)
td = timedelta(hours=((util.CLOSING_HOUR - util.OPENING_HOUR) % 24) / 2)
after = date + td
assert util.get_expiration_time(date, td) == after
def test_next_weekday(self):
'''Test time delta overnight.'''
date = datetime(year=2016, month=7, day=18, hour=util.CLOSING_HOUR - 1,
tzinfo=util.LOCAL_TZ)
next_date = datetime(year=2016, month=7, day=19, hour=util.OPENING_HOUR + 1,
tzinfo=util.LOCAL_TZ)
assert util.get_expiration_time(date, timedelta(hours=2)) == next_date
def test_edge_weekday(self):
'''Test time delta overnight just barely within range.'''
date = datetime(year=2016, month=7, day=18, hour=util.CLOSING_HOUR - 1, minute=59,
second=59, tzinfo=util.LOCAL_TZ)
td = timedelta(seconds=1)
after = datetime(year=2016, month=7, day=19, hour=util.OPENING_HOUR,
tzinfo=util.LOCAL_TZ)
assert util.get_expiration_time(date, td) == after
def test_next_weekend(self):
'''Test time delta over a weekend.'''
date = datetime(year=2016, month=7, day=15, hour=util.CLOSING_HOUR - 1,
tzinfo=util.LOCAL_TZ)
next_date = datetime(year=2016, month=7, day=18, hour=util.OPENING_HOUR + 1,
tzinfo=util.LOCAL_TZ)
assert util.get_expiration_time(date, timedelta(hours=2)) == next_date
def test_edge_weekend(self):
'''Test time delta over a weekend just barely within range.'''
date = datetime(year=2016, month=7, day=15, hour=util.CLOSING_HOUR - 1, minute=59,
second=59, tzinfo=util.LOCAL_TZ)
td = timedelta(seconds=1)
after = datetime(year=2016, month=7, day=18, hour=util.OPENING_HOUR,
tzinfo=util.LOCAL_TZ)
assert util.get_expiration_time(date, td) == after
|
observations/r/swahili.py | hajime9652/observations | 199 | 42973 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def swahili(path):
"""Swahili
Attitudes towards the Swahili language among Kenyan school children
A dataset with 480 observations on the following 4 variables.
`Province`
`NAIROBI` or `PWANI`
`Sex`
`female` or `male`
`Attitude.Score`
Score (out a possible 200 points) on a survey of attitude towards the
Swahili language
`School`
Code for the school: `A` through `L`
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `swahili.csv`.
Returns:
Tuple of np.ndarray `x_train` with 480 rows and 4 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'swahili.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Stat2Data/Swahili.csv'
maybe_download_and_extract(path, url,
save_file_name='swahili.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
|
gdal/perftests/overview.py | jpapadakis/gdal | 3,100 | 43035 | <reponame>jpapadakis/gdal<filename>gdal/perftests/overview.py
# SPDX-License-Identifier: MIT
# Copyright 2020 <NAME>
from osgeo import gdal
import time
def doit(compress, threads):
gdal.SetConfigOption('GDAL_NUM_THREADS', str(threads))
filename = '/vsimem/test.tif'
ds = gdal.GetDriverByName('GTiff').Create(filename, 20000, 20000, 3,
options = ['COMPRESS=' + compress,
'TILED=YES'])
ds.GetRasterBand(1).Fill(50)
ds.GetRasterBand(3).Fill(100)
ds.GetRasterBand(3).Fill(200)
ds = None
ds = gdal.Open(filename, gdal.GA_Update)
start = time.time()
ds.BuildOverviews('CUBIC', [2,4,8])
end = time.time()
print('COMPRESS=%s, NUM_THREADS=%d: %.2f' % (compress, threads, end - start))
gdal.SetConfigOption('GDAL_NUM_THREADS', None)
doit('NONE', 0)
doit('NONE', 2)
doit('NONE', 4)
doit('NONE', 8)
doit('ZSTD', 0)
doit('ZSTD', 2)
doit('ZSTD', 4)
doit('ZSTD', 8)
|
tests/perf_test/mind_expression_perf/generate_report.py | PowerOlive/mindspore | 3,200 | 43055 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import sys
import re
import json
import os
import time
import openpyxl as opx
def parse_arguments():
print(sys.argv)
me_report_path = sys.argv[1]
log_path = sys.argv[2]
n_iter = sys.argv[3]
out = sys.argv[4]
assert n_iter.isdigit()
return me_report_path, log_path, int(n_iter), out
def extract_by_keyword(doc, keyword, pattern):
rst = []
for i, s in enumerate(doc):
if keyword in s:
p = re.findall(pattern, s)
print("L%d: extracted %s from '%s'" % (i, p, s.strip()))
rst.extend(p)
return rst
def process_log(fname, log_path, n_iter, keyword, pattern):
rnt = {}
for i in range(1, 1+n_iter):
fname_path = os.path.join(log_path, fname % i)
with open(fname_path) as f:
print("\nLoading %s" % fname_path)
rst = extract_by_keyword(f, keyword, pattern)
rnt[fname % i] = rst
return rnt
def summarize(func):
def wrapper(*args, **kwargs):
log = func(*args, **kwargs)
times = list(log.items())
times.sort(key=lambda x: x[1])
min_file, min_time = times[0]
avg = sum(map(lambda x: x[1], times)) / len(times)
log["min_time"] = min_time
log["min_file"] = min_file
log["avg_time"] = avg
return log
return wrapper
@summarize
def process_bert_log(log_path, n_iter):
fname = "bert%d.log"
total = process_log(fname, log_path, n_iter, "TotalTime", r"\d+.\d+")
task = process_log(fname, log_path, n_iter, "task_emit", r"\d+.\d+")
log = {}
for fname in total:
log[fname] = float(total[fname][0]) - float(task[fname][0])
return log
@summarize
def process_resnet_log(log_path, n_iter):
fname = "resnet%d.log"
total = process_log(fname, log_path, n_iter, "TotalTime", r"\d+.\d+")
task = process_log(fname, log_path, n_iter, "task_emit", r"\d+.\d+")
log = {}
for fname in total:
log[fname] = float(total[fname][0]) - float(task[fname][0])
return log
@summarize
def process_gpt_log(log_path, n_iter):
fname = "gpt%d.log"
total = process_log(fname, log_path, n_iter, "TotalTime", r"\d+.\d+")
task = process_log(fname, log_path, n_iter, "task_emit", r"\d+.\d+")
log = {}
for fname in total:
log[fname] = float(total[fname][0]) - float(task[fname][0])
return log
@summarize
def process_reid_log(log_path, n_iter):
log = {}
for i in range(8):
fname = "reid_%d_"+str(i)+".log"
total = process_log(fname, log_path, n_iter, "TotalTime", r"\d+.\d+")
task = process_log(fname, log_path, n_iter, "task_emit", r"\d+.\d+")
for fname in total:
log[fname] = float(total[fname][0]) - float(task[fname][0])
return log
def write_to_me_report(log, me_report_path):
wb = opx.load_workbook(me_report_path)
sheet = wb["Sheet"]
idx = sheet.max_row + 1
date = time.strftime('%m%d', time.localtime())
sheet['A%d' % idx] = date
sheet['B%d' % idx] = round(log["reid"]["min_time"], 2)
sheet['C%d' % idx] = round(log["bert"]["min_time"], 2)
sheet['D%d' % idx] = round(log['resnet']["min_time"], 2)
sheet['E%d' % idx] = round(log['gpt']["min_time"], 2)
wb.save(me_report_path)
def generate_report():
me_report_path, log_path, n_iter, out = parse_arguments()
log_data = {}
bert_log = process_bert_log(log_path, n_iter)
resnet_log = process_resnet_log(log_path, n_iter)
gpt_log = process_gpt_log(log_path, n_iter)
reid_log = process_reid_log(log_path, n_iter)
log_data["bert"] = bert_log
log_data["resnet"] = resnet_log
log_data["gpt"] = gpt_log
log_data["reid"] = reid_log
with open(out, "w") as f:
json.dump(log_data, f, indent=2)
write_to_me_report(log_data, me_report_path)
if __name__ == "__main__":
generate_report()
|
Model_Free_L2O/L2O-Swarm/src/loss.py | JohnZ03/Open-L2O | 112 | 43067 | import pickle
import numpy as np
import matplotlib.pyplot as plt
with open('./quadratic/eval_record.pickle','rb') as loss:
data = pickle.load(loss)
print('Mat_record',len(data['Mat_record']))
#print('bias',data['inter_gradient_record'])
#print('constant',data['intra_record'])
with open('./quadratic/evaluate_record.pickle','rb') as loss1:
data1 = pickle.load(loss1)
x = np.array(data1['x_record'])
print('x_record',x.shape)
#print('bias',data1['inter_gradient_record'])
#print('constant',data1['intra_record'])
#x = range(10000)
#ax = plt.axes(yscale='log')
#ax.plot(x,data,'b')
#plt.show('loss') |
tools/similarity.py | bruinxiong/gnerf | 137 | 43069 | <reponame>bruinxiong/gnerf
import torch
from kornia.losses import ssim as dssim
from lpips_pytorch import LPIPS
lpips_fn = LPIPS(net_type='alex', version='0.1')
lpips_fn.eval()
def mse(image_pred, image_gt, valid_mask=None, reduction='mean'):
value = (image_pred - image_gt) ** 2
if valid_mask is not None:
value = value[valid_mask]
if reduction == 'mean':
return torch.mean(value)
return value
def psnr(image_pred, image_gt, valid_mask=None, reduction='mean'):
image_pred = image_pred / 2 + 0.5
image_gt = image_gt / 2 + 0.5
return -10 * torch.log10(mse(image_pred, image_gt, valid_mask, reduction))
def ssim(image_pred, image_gt, reduction='mean'):
image_pred = image_pred / 2 + 0.5
image_gt = image_gt / 2 + 0.5
dssim_ = dssim(image_pred, image_gt, 3, reduction) # dissimilarity in [0, 1]
return 1 - 2 * dssim_ # in [-1, 1]
def lpips(image_pred, image_gt, device='cpu'):
lpips_fn.to(device)
with torch.no_grad():
lpips_ = lpips_fn(image_pred, image_gt)
return lpips_.mean().item()
|
tests/utils.py | sjamgade/python-socks | 158 | 43090 | import socket
def is_connectable(host, port):
sock = None
try:
sock = socket.create_connection((host, port), 1)
result = True
except socket.error:
result = False
finally:
if sock:
sock.close()
return result
|
pypyr/steps/append.py | mofm/pypyr | 261 | 43174 | <reponame>mofm/pypyr<filename>pypyr/steps/append.py
"""pypyr step that appends items to a mutable sequence, such as a list."""
import logging
from pypyr.utils.asserts import assert_key_exists, assert_key_is_truthy
logger = logging.getLogger(__name__)
def run_step(context):
"""Append item to a mutable sequence.
Expects input:
append:
list (list or str): Add addMe to this mutable sequence.
addMe (any): Add this item to the list.
unpack (bool): Optional. Defaults False. If True, enumerate addMe
and append each item individually.
If append.list is a str, it refers to a key in context which contains a
list, e.g context['my_list'] = [1, 2, 3]. If no such key exists, will
create a list with that name and add addMe as the 1st item on the new list.
This is an append, not an extend, unless append.unpack = True.
If you want to add to a set, use pypyr.steps.add instead.
Args:
context (pypyr.context.Context): Mandatory. Context is a dictionary or
dictionary-like.
Context must contain key 'append'
"""
logger.debug("started")
context.assert_key_has_value(key='append', caller=__name__)
step_input = context.get_formatted('append')
assert_key_is_truthy(obj=step_input,
key='list',
caller=__name__,
parent='append')
assert_key_exists(obj=step_input,
key='addMe',
caller=__name__,
parent='append')
lst = step_input['list']
add_me = step_input['addMe']
is_extend = step_input.get('unpack', False)
# str value means referring to a key in context rather than list instance
if isinstance(lst, str):
existing_sequence = context.get(lst, None)
if existing_sequence:
append_or_extend_list(existing_sequence, add_me, is_extend)
else:
# list(x) works only if x is iterable, [x] works when x != iterable
context[lst] = list(add_me) if is_extend else [add_me]
else:
# anything that supports append: list, deque, array... if not is_extend
append_or_extend_list(lst, add_me, is_extend)
logger.debug("started")
def append_or_extend_list(lst, add_me, is_extend):
"""Append or extend list.
Args:
lst (list-like): The list to append/extend
add_me (any): Item(s) to append/extend to lst
is_extend (bool): If True does extend rather than append.
Returns: None
"""
if is_extend:
lst.extend(add_me)
else:
lst.append(add_me)
|
dvc/utils/pkg.py | lucasalavapena/dvc | 9,136 | 43199 | <filename>dvc/utils/pkg.py
try:
# file is created during dvc build
from .build import PKG # noqa, pylint:disable=unused-import
except ImportError:
PKG = None # type: ignore[assignment]
|
utils/cpu_affinity.py | jie311/RangeDet | 125 | 43224 | <filename>utils/cpu_affinity.py<gh_stars>100-1000
import psutil
import os
import subprocess
import logging
def simple_bind_cpus(rank, num_partition, logical=False):
pid = os.getpid()
p = psutil.Process(pid)
cpu_count = psutil.cpu_count(logical=logical)
cpu_count_per_worker = cpu_count // num_partition
cpu_list = list(range(rank * cpu_count_per_worker, (rank + 1) * cpu_count_per_worker))
print("bind cpu list:{}".format(cpu_list))
p.cpu_affinity(cpu_list)
logging.info("rank: {}, pid:{}, affinity to cpu {}".format(rank, pid, cpu_list))
def simple_bind_cpus_with_superthread(rank, num_partition):
pid = os.getpid()
p = psutil.Process(pid)
phy_cpu_count = psutil.cpu_count(logical=False)
cpu_count_per_worker = phy_cpu_count // num_partition
cpu_list = list(range(rank * cpu_count_per_worker, (rank + 1) * cpu_count_per_worker))
cpu_list += list(
range(phy_cpu_count + rank * cpu_count_per_worker, phy_cpu_count + (rank + 1) * cpu_count_per_worker))
p.cpu_affinity(cpu_list)
logging.info("rank: {}, pid:{}, affinity to cpu {}".format(rank, pid, cpu_list))
def bind_cpus_with_list(cpu_list):
pid = os.getpid()
p = psutil.Process(pid)
p.cpu_affinity(cpu_list)
logging.info("pid:{}, affinity to cpu {}".format(pid, cpu_list))
def bind_cpus_on_ecos(rank, num_partition):
pid = os.getpid()
p = psutil.Process(pid)
allowed_list = cpu_allowed_list()
if rank == 0:
print("cpu allowed list len:{}, {}".format(len(allowed_list), allowed_list))
cpu_count_per_worker = len(allowed_list) // num_partition
cpu_list = allowed_list[int(rank * cpu_count_per_worker):int((rank + 1) * cpu_count_per_worker)]
p.cpu_affinity(cpu_list)
logging.info("rank: {}, pid:{}, affinity to cpu {}".format(rank, pid, cpu_list))
def cpu_allowed_list():
byte_info = subprocess.check_output("cat /proc/$$/status|grep Cpus_allowed_list|awk '{print $2}'", shell=True)
cpu_list = byte_info.decode("utf-8").replace("\n", "").split(",")
allowed_list = []
for item in cpu_list:
ranges = [int(cpuid) for cpuid in item.split('-')]
if len(ranges) == 1:
allowed_list.append(ranges[0])
else:
allowed_list += list(range(ranges[0], ranges[1] + 1))
return allowed_list
|
rosetta/__init__.py | UnitedLexCorp/rosetta | 132 | 43258 | from rosetta.text.api import *
|
core/management/commands/sync_events_dashboard.py | vanessa/djangogirls | 446 | 43268 | import datetime
import re
import time
from collections import namedtuple
from django.conf import settings
from django.core.management.base import BaseCommand
from trello import ResourceUnavailable, TrelloClient
from core.models import Event
# Create new command
class Command(BaseCommand):
help = 'Syncs event in trello board. Need a token.'
missing_args_message = (
'You need to add a token! Get one here: '
'https://trello.com/1/authorize?key=01ab0348ca020573e7f728ae7400928a&scope=read%2Cwrite&'
'name=My+Application&expiration=1hour&response_type=token'
)
def add_arguments(self, parser):
parser.add_argument('trello_token', type=str)
def handle(self, *args, **options):
token = options['trello_token']
events = event_list()
sync(events, token)
# Get data
EventTuple = namedtuple('EventTuple', 'name id city date')
def event_list():
event = Event.objects.all()
result = []
for e in event:
name = e.name
_id = str(e.pk)
city = e.city
date = datetime.date(e.date.year, e.date.month, e.date.day or 1)
result.append(EventTuple(name, _id, city, date))
return result
# Sync to trello
ADMIN_BASE_URL = 'https://djangogirls.org/admin/core/event/'
def sync(events, token):
trello = TrelloClient(api_key=settings.TRELLO_API_KEY, token=token)
board = trello.get_board('55f7167c46760fcb5d68b385')
far_away, less_2_months, less_1_month, less_1_week, today, past = board.all_lists()
all_cards = {card_id(c): c for c in board.all_cards()}
date_today = datetime.date.today()
for e in events:
card = all_cards.get(e.id)
if not card:
card = create_card(e, far_away)
create_checklist(card)
# fetch card to get due date
try:
card.fetch()
except ResourceUnavailable:
print("Oopsie: too many requests! Let's wait 10 seconds!")
time.sleep(10)
card.fetch()
if e.date != card.due_date.date():
print('Changing due date of {} to {}'.format(e.city, e.date))
card.set_due(e.date)
distance = (e.date - date_today).days
if distance < 0:
right_list = past
elif distance == 0:
right_list = today
elif distance < 7:
right_list = less_1_week
elif distance < 30:
right_list = less_1_month
elif distance < 60:
right_list = less_2_months
else:
right_list = far_away
ensure_card_in_list(card, right_list)
def card_id(card):
m = re.search(ADMIN_BASE_URL + r'(\d+)',
card.desc)
return m.group(1)
def create_card(event, list):
print('Creating card {} ({})'.format(event.city, event.date.isoformat()))
return list.add_card(name=event.city,
desc=ADMIN_BASE_URL + event.id,
due=event.date.isoformat())
def create_checklist(card):
card.add_checklist("Things to do:", [
"2 month check", "1 month check", "Thank you email and request for stats", "Stats obtained"])
def ensure_checklist_in_card(card):
if not card.checklists:
print("Adding checklist to {} card.".format(card.name))
create_checklist(card)
def ensure_card_in_list(card, list):
if card.list_id != list.id:
print('Moving {} to {}'.format(
card.name, list.name))
card.change_list(list.id)
|
session/vad/finetune-inception-v4.py | ishine/malaya-speech | 111 | 43287 | <reponame>ishine/malaya-speech<gh_stars>100-1000
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import tensorflow as tf
import collections
import re
import random
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match('^(.*):\\d+$', name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name
assignment_map[name] = name_to_variable[name]
initialized_variable_names[name] = 1
initialized_variable_names[name + ':0'] = 1
tf.logging.info('**** Trainable Variables ****')
for var in tvars:
init_string = ''
if var.name in initialized_variable_names:
init_string = ', *INIT_FROM_CKPT*'
tf.logging.info(
' name = %s, shape = %s%s', var.name, var.shape, init_string
)
return (assignment_map, initialized_variable_names)
import malaya_speech.train as train
import malaya_speech
from glob import glob
import librosa
import numpy as np
def lin_spectogram_from_wav(wav, hop_length, win_length, n_fft=1024):
linear = librosa.stft(
wav, n_fft=n_fft, win_length=win_length, hop_length=hop_length
) # linear spectrogram
return linear.T
def load_data(
wav,
win_length=400,
sr=16000,
hop_length=24,
n_fft=512,
spec_len=100,
mode='train',
):
linear_spect = lin_spectogram_from_wav(wav, hop_length, win_length, n_fft)
mag, _ = librosa.magphase(linear_spect) # magnitude
mag_T = mag.T
freq, time = mag_T.shape
if mode == 'train':
if time < spec_len:
spec_mag = np.pad(mag_T, ((0, 0), (0, spec_len - time)), 'constant')
else:
spec_mag = mag_T
else:
spec_mag = mag_T
# preprocessing, subtract mean, divided by time-wise var
mu = np.mean(spec_mag, 0, keepdims=True)
std = np.std(spec_mag, 0, keepdims=True)
return (spec_mag - mu) / (std + 1e-5)
n_mels = 257
def calc(v):
r = load_data(v, mode='train')
return r
def preprocess_inputs(example):
s = tf.compat.v1.numpy_function(calc, [example['waveforms']], tf.float32)
s = tf.reshape(s, (n_mels, -1, 1))
example['inputs'] = s
return example
def parse(serialized_example):
data_fields = {
'waveforms': tf.VarLenFeature(tf.float32),
'targets': tf.VarLenFeature(tf.int64),
}
features = tf.parse_single_example(
serialized_example, features=data_fields
)
for k in features.keys():
features[k] = features[k].values
features = preprocess_inputs(features)
keys = list(features.keys())
for k in keys:
if k not in ['inputs', 'targets']:
features.pop(k, None)
return features
def get_dataset(files, batch_size=16, shuffle_size=5, thread_count=24):
def get():
dataset = tf.data.TFRecordDataset(files)
dataset = dataset.map(parse, num_parallel_calls=thread_count)
dataset = dataset.padded_batch(
batch_size,
padded_shapes={
'inputs': tf.TensorShape([n_mels, None, 1]),
'targets': tf.TensorShape([None]),
},
padding_values={
'inputs': tf.constant(0, dtype=tf.float32),
'targets': tf.constant(0, dtype=tf.int64),
},
)
dataset = dataset.shuffle(shuffle_size)
dataset = dataset.repeat()
return dataset
return get
import tf_slim as slim
import inception_utils
def block_inception_a(inputs, scope=None, reuse=None):
"""Builds Inception-A block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope(
[slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1,
padding='SAME',
):
with tf.variable_scope(
scope, 'BlockInceptionA', [inputs], reuse=reuse
):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
inputs, 96, [1, 1], scope='Conv2d_0a_1x1'
)
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
inputs, 64, [1, 1], scope='Conv2d_0a_1x1'
)
branch_1 = slim.conv2d(
branch_1, 96, [3, 3], scope='Conv2d_0b_3x3'
)
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
inputs, 64, [1, 1], scope='Conv2d_0a_1x1'
)
branch_2 = slim.conv2d(
branch_2, 96, [3, 3], scope='Conv2d_0b_3x3'
)
branch_2 = slim.conv2d(
branch_2, 96, [3, 3], scope='Conv2d_0c_3x3'
)
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(
inputs, [3, 3], scope='AvgPool_0a_3x3'
)
branch_3 = slim.conv2d(
branch_3, 96, [1, 1], scope='Conv2d_0b_1x1'
)
return tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3]
)
def block_reduction_a(inputs, scope=None, reuse=None):
"""Builds Reduction-A block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope(
[slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1,
padding='SAME',
):
with tf.variable_scope(
scope, 'BlockReductionA', [inputs], reuse=reuse
):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
inputs,
384,
[3, 3],
stride=2,
padding='VALID',
scope='Conv2d_1a_3x3',
)
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
inputs, 192, [1, 1], scope='Conv2d_0a_1x1'
)
branch_1 = slim.conv2d(
branch_1, 224, [3, 3], scope='Conv2d_0b_3x3'
)
branch_1 = slim.conv2d(
branch_1,
256,
[3, 3],
stride=2,
padding='VALID',
scope='Conv2d_1a_3x3',
)
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(
inputs,
[3, 3],
stride=2,
padding='VALID',
scope='MaxPool_1a_3x3',
)
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
def block_inception_b(inputs, scope=None, reuse=None):
"""Builds Inception-B block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope(
[slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1,
padding='SAME',
):
with tf.variable_scope(
scope, 'BlockInceptionB', [inputs], reuse=reuse
):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
inputs, 384, [1, 1], scope='Conv2d_0a_1x1'
)
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
inputs, 192, [1, 1], scope='Conv2d_0a_1x1'
)
branch_1 = slim.conv2d(
branch_1, 224, [1, 7], scope='Conv2d_0b_1x7'
)
branch_1 = slim.conv2d(
branch_1, 256, [7, 1], scope='Conv2d_0c_7x1'
)
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
inputs, 192, [1, 1], scope='Conv2d_0a_1x1'
)
branch_2 = slim.conv2d(
branch_2, 192, [7, 1], scope='Conv2d_0b_7x1'
)
branch_2 = slim.conv2d(
branch_2, 224, [1, 7], scope='Conv2d_0c_1x7'
)
branch_2 = slim.conv2d(
branch_2, 224, [7, 1], scope='Conv2d_0d_7x1'
)
branch_2 = slim.conv2d(
branch_2, 256, [1, 7], scope='Conv2d_0e_1x7'
)
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(
inputs, [3, 3], scope='AvgPool_0a_3x3'
)
branch_3 = slim.conv2d(
branch_3, 128, [1, 1], scope='Conv2d_0b_1x1'
)
return tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3]
)
def block_reduction_b(inputs, scope=None, reuse=None):
"""Builds Reduction-B block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope(
[slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1,
padding='SAME',
):
with tf.variable_scope(
scope, 'BlockReductionB', [inputs], reuse=reuse
):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
inputs, 192, [1, 1], scope='Conv2d_0a_1x1'
)
branch_0 = slim.conv2d(
branch_0,
192,
[3, 3],
stride=2,
padding='VALID',
scope='Conv2d_1a_3x3',
)
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
inputs, 256, [1, 1], scope='Conv2d_0a_1x1'
)
branch_1 = slim.conv2d(
branch_1, 256, [1, 7], scope='Conv2d_0b_1x7'
)
branch_1 = slim.conv2d(
branch_1, 320, [7, 1], scope='Conv2d_0c_7x1'
)
branch_1 = slim.conv2d(
branch_1,
320,
[3, 3],
stride=2,
padding='VALID',
scope='Conv2d_1a_3x3',
)
with tf.variable_scope('Branch_2'):
branch_2 = slim.max_pool2d(
inputs,
[3, 3],
stride=2,
padding='VALID',
scope='MaxPool_1a_3x3',
)
return tf.concat(axis=3, values=[branch_0, branch_1, branch_2])
def block_inception_c(inputs, scope=None, reuse=None):
"""Builds Inception-C block for Inception v4 network."""
# By default use stride=1 and SAME padding
with slim.arg_scope(
[slim.conv2d, slim.avg_pool2d, slim.max_pool2d],
stride=1,
padding='SAME',
):
with tf.variable_scope(
scope, 'BlockInceptionC', [inputs], reuse=reuse
):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
inputs, 256, [1, 1], scope='Conv2d_0a_1x1'
)
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
inputs, 384, [1, 1], scope='Conv2d_0a_1x1'
)
branch_1 = tf.concat(
axis=3,
values=[
slim.conv2d(
branch_1, 256, [1, 3], scope='Conv2d_0b_1x3'
),
slim.conv2d(
branch_1, 256, [3, 1], scope='Conv2d_0c_3x1'
),
],
)
with tf.variable_scope('Branch_2'):
branch_2 = slim.conv2d(
inputs, 384, [1, 1], scope='Conv2d_0a_1x1'
)
branch_2 = slim.conv2d(
branch_2, 448, [3, 1], scope='Conv2d_0b_3x1'
)
branch_2 = slim.conv2d(
branch_2, 512, [1, 3], scope='Conv2d_0c_1x3'
)
branch_2 = tf.concat(
axis=3,
values=[
slim.conv2d(
branch_2, 256, [1, 3], scope='Conv2d_0d_1x3'
),
slim.conv2d(
branch_2, 256, [3, 1], scope='Conv2d_0e_3x1'
),
],
)
with tf.variable_scope('Branch_3'):
branch_3 = slim.avg_pool2d(
inputs, [3, 3], scope='AvgPool_0a_3x3'
)
branch_3 = slim.conv2d(
branch_3, 256, [1, 1], scope='Conv2d_0b_1x1'
)
return tf.concat(
axis=3, values=[branch_0, branch_1, branch_2, branch_3]
)
def inception_v4_base(inputs, final_endpoint='Mixed_7d', scope=None):
"""Creates the Inception V4 network up to the given final endpoint.
Args:
inputs: a 4-D tensor of size [batch_size, height, width, 3].
final_endpoint: specifies the endpoint to construct the network up to.
It can be one of [ 'Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
'Mixed_3a', 'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e',
'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c',
'Mixed_7d']
scope: Optional variable_scope.
Returns:
logits: the logits outputs of the model.
end_points: the set of end_points from the inception model.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
"""
end_points = {}
def add_and_check_final(name, net):
end_points[name] = net
return name == final_endpoint
with tf.variable_scope(scope, 'InceptionV4', [inputs]):
with slim.arg_scope(
[slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1,
padding='SAME',
):
# 299 x 299 x 3
net = slim.conv2d(
inputs,
32,
[3, 3],
stride=2,
padding='VALID',
scope='Conv2d_1a_3x3',
)
if add_and_check_final('Conv2d_1a_3x3', net):
return net, end_points
# 149 x 149 x 32
net = slim.conv2d(
net, 32, [3, 3], padding='VALID', scope='Conv2d_2a_3x3'
)
if add_and_check_final('Conv2d_2a_3x3', net):
return net, end_points
# 147 x 147 x 32
net = slim.conv2d(net, 64, [3, 3], scope='Conv2d_2b_3x3')
if add_and_check_final('Conv2d_2b_3x3', net):
return net, end_points
# 147 x 147 x 64
with tf.variable_scope('Mixed_3a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.max_pool2d(
net,
[3, 3],
stride=2,
padding='VALID',
scope='MaxPool_0a_3x3',
)
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net,
96,
[3, 3],
stride=2,
padding='VALID',
scope='Conv2d_0a_3x3',
)
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_3a', net):
return net, end_points
# 73 x 73 x 160
with tf.variable_scope('Mixed_4a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net, 64, [1, 1], scope='Conv2d_0a_1x1'
)
branch_0 = slim.conv2d(
branch_0,
96,
[3, 3],
padding='VALID',
scope='Conv2d_1a_3x3',
)
with tf.variable_scope('Branch_1'):
branch_1 = slim.conv2d(
net, 64, [1, 1], scope='Conv2d_0a_1x1'
)
branch_1 = slim.conv2d(
branch_1, 64, [1, 7], scope='Conv2d_0b_1x7'
)
branch_1 = slim.conv2d(
branch_1, 64, [7, 1], scope='Conv2d_0c_7x1'
)
branch_1 = slim.conv2d(
branch_1,
96,
[3, 3],
padding='VALID',
scope='Conv2d_1a_3x3',
)
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_4a', net):
return net, end_points
# 71 x 71 x 192
with tf.variable_scope('Mixed_5a'):
with tf.variable_scope('Branch_0'):
branch_0 = slim.conv2d(
net,
192,
[3, 3],
stride=2,
padding='VALID',
scope='Conv2d_1a_3x3',
)
with tf.variable_scope('Branch_1'):
branch_1 = slim.max_pool2d(
net,
[3, 3],
stride=2,
padding='VALID',
scope='MaxPool_1a_3x3',
)
net = tf.concat(axis=3, values=[branch_0, branch_1])
if add_and_check_final('Mixed_5a', net):
return net, end_points
# 35 x 35 x 384
# 4 x Inception-A blocks
for idx in range(4):
block_scope = 'Mixed_5' + chr(ord('b') + idx)
net = block_inception_a(net, block_scope)
if add_and_check_final(block_scope, net):
return net, end_points
# 35 x 35 x 384
# Reduction-A block
net = block_reduction_a(net, 'Mixed_6a')
if add_and_check_final('Mixed_6a', net):
return net, end_points
# 17 x 17 x 1024
# 7 x Inception-B blocks
for idx in range(7):
block_scope = 'Mixed_6' + chr(ord('b') + idx)
net = block_inception_b(net, block_scope)
if add_and_check_final(block_scope, net):
return net, end_points
# 17 x 17 x 1024
# Reduction-B block
net = block_reduction_b(net, 'Mixed_7a')
if add_and_check_final('Mixed_7a', net):
return net, end_points
# 8 x 8 x 1536
# 3 x Inception-C blocks
for idx in range(3):
block_scope = 'Mixed_7' + chr(ord('b') + idx)
net = block_inception_c(net, block_scope)
if add_and_check_final(block_scope, net):
return net, end_points
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def model(
inputs,
is_training=True,
dropout_keep_prob=0.8,
reuse=None,
scope='InceptionV4',
bottleneck_dim=512,
):
# inputs = tf.image.grayscale_to_rgb(inputs)
with tf.variable_scope(
scope, 'InceptionV4', [inputs], reuse=reuse
) as scope:
with slim.arg_scope(
[slim.batch_norm, slim.dropout], is_training=is_training
):
net, end_points = inception_v4_base(inputs, scope=scope)
print(net.shape)
with slim.arg_scope(
[slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1,
padding='SAME',
):
with tf.variable_scope('Logits'):
# 8 x 8 x 1536
kernel_size = net.get_shape()[1:3]
print(kernel_size)
if kernel_size.is_fully_defined():
net = slim.avg_pool2d(
net,
kernel_size,
padding='VALID',
scope='AvgPool_1a',
)
else:
net = tf.reduce_mean(
input_tensor=net,
axis=[1, 2],
keepdims=True,
name='global_pool',
)
end_points['global_pool'] = net
# 1 x 1 x 1536
net = slim.dropout(
net, dropout_keep_prob, scope='Dropout_1b'
)
net = slim.flatten(net, scope='PreLogitsFlatten')
end_points['PreLogitsFlatten'] = net
bottleneck = slim.fully_connected(
net, bottleneck_dim, scope='bottleneck'
)
logits = slim.fully_connected(
bottleneck,
2,
activation_fn=None,
scope='Logits_vad',
)
return logits
init_lr = 1e-3
epochs = 300000
init_checkpoint = 'output-inception-v4/model.ckpt-401000'
def model_fn(features, labels, mode, params):
Y = tf.cast(features['targets'][:, 0], tf.int32)
with slim.arg_scope(inception_utils.inception_arg_scope()):
logits = model(features['inputs'])
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=Y
)
)
tf.identity(loss, 'train_loss')
accuracy = tf.metrics.accuracy(
labels=Y, predictions=tf.argmax(logits, axis=1)
)
tf.identity(accuracy[1], name='train_accuracy')
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
assignment_map, initialized_variable_names = get_assignment_map_from_checkpoint(
variables, init_checkpoint
)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(
value=init_lr, shape=[], dtype=tf.float32
)
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
epochs,
end_learning_rate=0.00001,
power=1.0,
cycle=False,
)
optimizer = tf.train.RMSPropOptimizer(
learning_rate, decay=0.9, momentum=0.9, epsilon=1.0
)
train_op = optimizer.minimize(loss, global_step=global_step)
estimator_spec = tf.estimator.EstimatorSpec(
mode=mode, loss=loss, train_op=train_op
)
elif mode == tf.estimator.ModeKeys.EVAL:
estimator_spec = tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
eval_metric_ops={'accuracy': accuracy},
)
return estimator_spec
train_hooks = [
tf.train.LoggingTensorHook(
['train_accuracy', 'train_loss'], every_n_iter=1
)
]
train_files = glob('vad2/data/vad-train-*') + glob('noise/data/vad-train-*')
train_dataset = get_dataset(train_files, batch_size=32)
dev_files = glob('vad2/data/vad-dev-*') + glob('noise/data/vad-dev-*')
dev_dataset = get_dataset(dev_files, batch_size=16)
save_directory = 'output-inception-v4-vad'
train.run_training(
train_fn=train_dataset,
model_fn=model_fn,
model_dir=save_directory,
num_gpus=1,
log_step=1,
save_checkpoint_step=25000,
max_steps=epochs,
eval_fn=dev_dataset,
train_hooks=train_hooks,
)
|
testsuite/array-reg/run.py | LongerVision/OpenShadingLanguage | 1,105 | 43293 | #!/usr/bin/env python
# Copyright Contributors to the Open Shading Language project.
# SPDX-License-Identifier: BSD-3-Clause
# https://github.com/AcademySoftwareFoundation/OpenShadingLanguage
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_index_float.tif test_varying_index_float")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_index_int.tif test_varying_index_int")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_index_string.tif test_varying_index_string")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_index_matrix.tif test_varying_index_matrix")
outputs.append ("out_varying_index_float.tif")
outputs.append ("out_varying_index_int.tif")
outputs.append ("out_varying_index_string.tif")
outputs.append ("out_varying_index_matrix.tif")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_index_color.tif test_varying_index_color")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_index_point.tif test_varying_index_point")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_index_vector.tif test_varying_index_vector")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_index_normal.tif test_varying_index_normal")
outputs.append ("out_varying_index_color.tif")
outputs.append ("out_varying_index_point.tif")
outputs.append ("out_varying_index_vector.tif")
outputs.append ("out_varying_index_normal.tif")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_out_of_bounds_index_int.tif test_varying_out_of_bounds_index_int")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_out_of_bounds_index_float.tif test_varying_out_of_bounds_index_float")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_out_of_bounds_index_string.tif test_varying_out_of_bounds_index_string")
outputs.append ("out_varying_out_of_bounds_index_int.tif")
outputs.append ("out_varying_out_of_bounds_index_float.tif")
outputs.append ("out_varying_out_of_bounds_index_string.tif")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_index_ray.tif test_varying_index_ray")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_index_cube.tif test_varying_index_cube")
outputs.append ("out_varying_index_ray.tif")
outputs.append ("out_varying_index_cube.tif")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_index_varying_float.tif test_varying_index_varying_float")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_index_varying_int.tif test_varying_index_varying_int")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_index_varying_point.tif test_varying_index_varying_point")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_index_varying_normal.tif test_varying_index_varying_normal")
outputs.append ("out_varying_index_varying_float.tif")
outputs.append ("out_varying_index_varying_int.tif")
outputs.append ("out_varying_index_varying_point.tif")
outputs.append ("out_varying_index_varying_normal.tif")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_index_varying_vector.tif test_varying_index_varying_vector")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_index_varying_color.tif test_varying_index_varying_color")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_index_varying_string.tif test_varying_index_varying_string")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_index_varying_matrix.tif test_varying_index_varying_matrix")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_varying_index_varying_ray.tif test_varying_index_varying_ray")
outputs.append ("out_varying_index_varying_vector.tif")
outputs.append ("out_varying_index_varying_color.tif")
outputs.append ("out_varying_index_varying_string.tif")
outputs.append ("out_varying_index_varying_matrix.tif")
outputs.append ("out_varying_index_varying_ray.tif")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_uniform_index_varying_float.tif test_uniform_index_varying_float")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_uniform_index_varying_int.tif test_uniform_index_varying_int")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_uniform_index_varying_point.tif test_uniform_index_varying_point")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_uniform_index_varying_normal.tif test_uniform_index_varying_normal")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_uniform_index_varying_vector.tif test_uniform_index_varying_vector")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_uniform_index_varying_color.tif test_uniform_index_varying_color")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_uniform_index_varying_string.tif test_uniform_index_varying_string")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_uniform_index_varying_matrix.tif test_uniform_index_varying_matrix")
command += testshade("-t 1 -g 256 256 -od uint8 -o Cout out_uniform_index_varying_ray.tif test_uniform_index_varying_ray")
outputs.append ("out_uniform_index_varying_float.tif")
outputs.append ("out_uniform_index_varying_int.tif")
outputs.append ("out_uniform_index_varying_point.tif")
outputs.append ("out_uniform_index_varying_normal.tif")
outputs.append ("out_uniform_index_varying_vector.tif")
outputs.append ("out_uniform_index_varying_color.tif")
outputs.append ("out_uniform_index_varying_string.tif")
outputs.append ("out_uniform_index_varying_matrix.tif")
outputs.append ("out_uniform_index_varying_ray.tif")
# expect a few LSB failures
failthresh = 0.008
failpercent = 3
|
bigflow_python/python/bigflow/util/log.py | advancedxy/bigflow_python | 1,236 | 43312 | #!/usr/bin/env python
#encoding=utf-8
# Copyright (c) 2012 Baidu, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A utility wraps Python built-in loggings
"""
import logging
import logging.handlers
import os
import platform
import sys
unicode_type = unicode
bytes_type = str
basestring_type = str
try:
import curses
except ImportError:
curses = None
logger = logging.getLogger("com.baidu.bigflow")
def _safe_unicode(obj, encoding='utf-8'):
"""
Converts any given object to unicode string.
>>> safeunicode('hello')
u'hello'
>>> safeunicode(2)
u'2'
>>> safeunicode('\xe1\x88\xb4')
u'\u1234'
"""
t = type(obj)
if t is unicode:
return obj
elif t is str:
return obj.decode(encoding, 'ignore')
elif t in [int, float, bool]:
return unicode(obj)
elif hasattr(obj, '__unicode__') or isinstance(obj, unicode):
try:
return unicode(obj)
except Exception as e:
return u""
else:
return str(obj).decode(encoding, 'ignore')
def _stderr_supports_color():
import sys
color = False
if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():
try:
curses.setupterm()
if curses.tigetnum("colors") > 0:
color = True
except Exception:
pass
return color
class LogFormatter(logging.Formatter):
"""Log formatter used in Tornado.
Key features of this formatter are:
* Color support when logging to a terminal that supports it.
* Timestamps on every log line.
* Robust against str/bytes encoding problems.
This formatter is enabled automatically by
`tornado.options.parse_command_line` (unless ``--logging=none`` is
used).
"""
DEFAULT_FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s'
DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S'
DEFAULT_COLORS = {
logging.DEBUG: 4, # Blue
logging.INFO: 2, # Green
logging.WARNING: 3, # Yellow
logging.ERROR: 1, # Red
}
def __init__(self, color=True, fmt=DEFAULT_FORMAT,
datefmt=DEFAULT_DATE_FORMAT, colors=None):
r"""
:arg bool color: Enables color support.
:arg string fmt: Log message format.
It will be applied to the attributes dict of log records. The
text between ``%(color)s`` and ``%(end_color)s`` will be colored
depending on the level if color support is on.
:arg dict colors: color mappings from logging level to terminal color
code
:arg string datefmt: Datetime format.
Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
.. versionchanged:: 3.2
Added ``fmt`` and ``datefmt`` arguments.
"""
logging.Formatter.__init__(self, datefmt=datefmt)
self._fmt = fmt
if colors is None:
colors = LogFormatter.DEFAULT_COLORS
self._colors = {}
if color and _stderr_supports_color():
# The curses module has some str/bytes confusion in
# python3. Until version 3.2.3, most methods return
# bytes, but only accept strings. In addition, we want to
# output these strings with the logging module, which
# works with unicode strings. The explicit calls to
# unicode() below are harmless in python2 but will do the
# right conversion in python 3.
fg_color = (curses.tigetstr("setaf") or
curses.tigetstr("setf") or "")
if (3, 0) < sys.version_info < (3, 2, 3):
fg_color = unicode_type(fg_color, "ascii")
for levelno, code in colors.items():
self._colors[levelno] = unicode_type(curses.tparm(fg_color, code), "ascii")
self._normal = unicode_type(curses.tigetstr("sgr0"), "ascii")
else:
self._normal = ''
def format(self, record):
try:
message = record.getMessage()
# assert isinstance(message, basestring_type) # guaranteed by logging
# Encoding notes: The logging module prefers to work with character
# strings, but only enforces that log messages are instances of
# basestring. In python 2, non-ascii bytestrings will make
# their way through the logging framework until they blow up with
# an unhelpful decoding error (with this formatter it happens
# when we attach the prefix, but there are other opportunities for
# exceptions further along in the framework).
#
# If a byte string makes it this far, convert it to unicode to
# ensure it will make it out to the logs. Use repr() as a fallback
# to ensure that all byte strings can be converted successfully,
# but don't do it by default so we don't add extra quotes to ascii
# bytestrings. This is a bit of a hacky place to do this, but
# it's worth it since the encoding errors that would otherwise
# result are so useless (and tornado is fond of using utf8-encoded
# byte strings whereever possible).
record.message = _safe_unicode(message)
except Exception as e:
record.message = "Bad message (%r): %r" % (e, record.__dict__)
record.asctime = self.formatTime(record, self.datefmt)
if record.levelno in self._colors:
record.color = self._colors[record.levelno]
record.end_color = self._normal
else:
record.color = record.end_color = ''
formatted = self._fmt % record.__dict__
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
# exc_text contains multiple lines. We need to _safe_unicode
# each line separately so that non-utf8 bytes don't cause
# all the newlines to turn into '\n'.
lines = [formatted.rstrip()]
lines.extend(_safe_unicode(ln) for ln in record.exc_text.split('\n'))
formatted = '\n'.join(lines)
return formatted.replace("\n", "\n ")
def enable_pretty_logging(
logger,
level,
log_file="",
backupCount=10,
maxBytes=10000000):
"""Turns on formatted logging output as configured.
"""
if logger is None:
raise error.BigflowPlanningException("logger cannot be None")
if "__PYTHON_IN_REMOTE_SIDE" in os.environ:
# Do not do logging at runtime
logger.addHandler(logging.NullHandler())
else:
logger.setLevel(level)
if log_file:
channel = logging.handlers.RotatingFileHandler(
filename=log_file,
maxBytes=maxBytes,
backupCount=backupCount)
channel.setFormatter(LogFormatter(color=False))
logger.addHandler(channel)
if not logger.handlers:
# Set up color if we are in a tty and curses is installed
channel = logging.StreamHandler()
channel.setFormatter(LogFormatter())
logger.addHandler(channel)
def enable_pretty_logging_at_debug(
logger,
level,
log_file="",
backupCount=10,
maxBytes=10000000):
"""Turns on formatted logging output only at DEBUG level
"""
if level == logging.DEBUG:
enable_pretty_logging(logger, level, log_file, backupCount, maxBytes)
else:
logger.addHandler(logging.NullHandler())
def init_log(level=logging.INFO):
""" init_log - initialize log module
Args:
level (str): msg above the level will be displayed
DEBUG < INFO < WARNING < ERROR < CRITICAL \n
``the default value is logging.INFO``
Raises:
OSError: fail to create log directories
IOError: fail to open log file
"""
log_file = os.environ.get("BIGFLOW_LOG_FILE", "")
if log_file:
log_file = os.path.abspath(log_file + ".log")
print >> sys.stderr, "Bigflow Log file is written to [%s]" % log_file
enable_pretty_logging(logger, level, log_file=log_file)
#enable_pretty_logging_at_debug(
# logging.getLogger("pbrpc"),
# level,
# log_file=log_file)
#enable_pretty_logging_at_debug(
# logging.getLogger("pbrpcrpc_client"),
# level,
# log_file=log_file)
init_log(logging.INFO)
|
tests/slack_bolt/app/test_dev_server.py | hirosassa/bolt-python | 504 | 43322 | from slack_sdk import WebClient
from slack_bolt.app.app import SlackAppDevelopmentServer, App
from tests.mock_web_api_server import (
setup_mock_web_api_server,
cleanup_mock_web_api_server,
)
from tests.utils import remove_os_env_temporarily, restore_os_env
class TestDevServer:
signing_secret = "secret"
valid_token = "<PASSWORD>"
mock_api_server_base_url = "http://localhost:8888"
web_client = WebClient(
token=valid_token,
base_url=mock_api_server_base_url,
)
def setup_method(self):
self.old_os_env = remove_os_env_temporarily()
setup_mock_web_api_server(self)
def teardown_method(self):
cleanup_mock_web_api_server(self)
restore_os_env(self.old_os_env)
def test_instance(self):
server = SlackAppDevelopmentServer(
port=3001,
path="/slack/events",
app=App(signing_secret=self.signing_secret, client=self.web_client),
)
assert server is not None
|
sas_kernel/magics/sas_session_magic.py | gvelasq/sas_kernel | 207 | 43342 | #
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from metakernel import Magic
class SASsessionMagic(Magic):
def __init__(self, *args, **kwargs):
super(SASsessionMagic, self).__init__(*args, **kwargs)
def line_SASsession(self, *args):
"""
SAS Kernel magic allows a programatic way to submit configuration
details.
This magic is only available within the SAS Kernel
"""
if len(args) > 1:
args = ''.join(args)
elif len(args) == 1:
args = ''.join(args[0])
args = args.replace(' ', '')
args = args.replace('"', '')
args = args.replace("'", '')
sess_params = dict(s.split('=') for s in args.split(','))
self.kernel._allow_stdin = True
self.kernel._start_sas(**sess_params)
def register_magics(kernel):
kernel.register_magics(SASsessionMagic)
def register_ipython_magics():
from metakernel import IPythonKernel
from IPython.core.magic import register_line_magic
kernel = IPythonKernel()
magic = SASsessionMagic(kernel)
# Make magics callable:
kernel.line_magics["SASsession"] = magic
@register_line_magic
def SASsession(line):
kernel.call_magic("%SASsession " + line)
|
examples/protocols/http_server/file_serving/http_server_file_serving_test.py | iPlon-org/esp-idf | 8,747 | 43347 | #!/usr/bin/env python
#
# Copyright 2021 Espressif Systems (Shanghai) CO LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import http.client
import os
import re
import tiny_test_fw
import ttfw_idf
from idf_http_server_test import adder as client
from tiny_test_fw import Utility
@ttfw_idf.idf_example_test(env_tag='Example_WIFI_Protocols')
def test_examples_protocol_http_server_file_serving(env, extra_data): # type: (tiny_test_fw.Env.Env, None) -> None # pylint: disable=unused-argument
# Acquire DUT
dut1 = env.get_dut('http file_serving', 'examples/protocols/http_server/file_serving', dut_class=ttfw_idf.ESP32DUT)
# Get binary file
binary_file = os.path.join(dut1.app.binary_path, 'file_server.bin')
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('file_server_bin_size', '{}KB'.format(bin_size // 1024))
Utility.console_log('Erasing the flash on the chip')
# erase the flash
dut1.erase_flash()
# Upload binary and start testing
Utility.console_log('Starting http file serving simple test app')
dut1.start_app()
# Parse IP address of STA
Utility.console_log('Waiting to connect with AP')
got_ip = dut1.expect(re.compile(r'IPv4 address: (\d+\.\d+\.\d+\.\d+)'), timeout=30)[0]
# Expected logs
dut1.expect('Initializing SPIFFS', timeout=30)
got_port = dut1.expect(re.compile(r"Starting HTTP Server on port: '(\d+)'"), timeout=30)[0]
Utility.console_log('Got IP : ' + got_ip)
Utility.console_log('Got Port : ' + got_port)
# Run test script
conn = client.start_session(got_ip, got_port)
# upload a file onto the server
upload_data = 'Test data to be sent to the server'
upload_file_name = 'example.txt'
upload_file_hash = hashlib.md5(upload_data.encode('UTF-8'))
upload_file_digest = upload_file_hash.digest()
Utility.console_log('\nTesting the uploading of file on the file server')
client.postreq(conn, '/upload/' + str(upload_file_name), upload_data)
try:
dut1.expect('File reception complete', timeout=10)
except Exception:
Utility.console_log('Failed the test to upload file on the file server')
raise
Utility.console_log('Passed the test to uploaded file on the file server')
# Download the uploaded file from the file server
Utility.console_log("\nTesting for Download of \"existing\" file from the file server")
download_data = client.getreq(conn, '/' + str(upload_file_name))
try:
dut1.expect('File sending complete', timeout=10)
except Exception:
Utility.console_log('Failed the test to download existing file from the file server')
raise
Utility.console_log('Passed the test to downloaded existing file from the file server')
download_file_hash = hashlib.md5(download_data)
download_file_digest = download_file_hash.digest()
if download_file_digest != upload_file_digest:
raise RuntimeError('The md5 hash of the downloaded file does not match with that of the uploaded file')
# Upload existing file on the file server
Utility.console_log("\nTesting the upload of \"already existing\" file on the file server")
client.postreq(conn, '/upload/' + str(upload_file_name), data=None)
try:
dut1.expect('File already exists : /spiffs/' + str(upload_file_name), timeout=10)
except Exception:
Utility.console_log('Failed the test for uploading existing file on the file server')
raise
Utility.console_log('Passed the test for uploading existing file on the file server')
# Previous URI was an invalid URI so the server should have closed the connection.
# Trying to send request to the server
try:
client.getreq(conn, '/')
except http.client.RemoteDisconnected:
# It is correct behavior that the connection was closed by the server
pass
except Exception:
Utility.console_log('Connection was not closed successfully by the server after last invalid URI')
raise
conn = client.start_session(got_ip, got_port)
# Delete the existing file from the file server
Utility.console_log("\nTesting the deletion of \"existing\" file on the file server")
client.postreq(conn, '/delete/' + str(upload_file_name), data=None)
try:
dut1.expect('Deleting file : /' + str(upload_file_name), timeout=10)
except Exception:
Utility.console_log('Failed the test for deletion of existing file on the file server')
raise
Utility.console_log('Passed the test for deletion of existing file on the file server')
conn = client.start_session(got_ip, got_port)
# Try to delete non existing file from the file server
Utility.console_log("\nTesting the deletion of \"non existing\" file on the file server")
client.postreq(conn, '/delete/' + str(upload_file_name), data=None)
try:
dut1.expect('File does not exist : /' + str(upload_file_name), timeout=10)
except Exception:
Utility.console_log('Failed the test for deleting non existing file on the file server')
raise
Utility.console_log('Passed the test for deleting non existing file on the file server')
conn = client.start_session(got_ip, got_port)
# Try to download non existing file from the file server
Utility.console_log("\nTesting for Download of \"non existing\" file from the file server")
download_data = client.getreq(conn, '/' + str(upload_file_name))
try:
dut1.expect('Failed to stat file : /spiffs/' + str(upload_file_name), timeout=10)
except Exception:
Utility.console_log('Failed the test to download non existing file from the file server')
raise
Utility.console_log('Passed the test to downloaded non existing file from the file server')
if __name__ == '__main__':
test_examples_protocol_http_server_file_serving() # pylint: disable=no-value-for-parameter
|
modules/dbnd/test_dbnd/py2only/test_python2_newstr.py | busunkim96/dbnd | 224 | 43362 | <reponame>busunkim96/dbnd
from __future__ import absolute_import
import logging
import pytest
import six
from dbnd import parameter, task
from dbnd._core.current import try_get_current_task
from dbnd._core.task_ctrl.task_ctrl import TaskCtrl
from targets.values import ObjectValueType, StrValueType
if six.PY2:
from future.builtins import *
__future_module__ = True
py_2_only_import = pytest.importorskip("__builtin__")
@task
def task_with_str_param(something=parameter(default=None)[str]):
current_task = try_get_current_task()
ctrl = current_task.ctrl # type: TaskCtrl
task_as_cmd_line = ctrl.task_repr.calculate_command_line_for_task()
logging.info("Str type: %s, task repr: %s", type(str), task_as_cmd_line)
assert "newstr.BaseNewStr" in str(type(str))
assert "@" not in task_as_cmd_line
return "task_with_str"
@task
def task_with_object_param(something=parameter(default=None)[object]):
current_task = try_get_current_task()
ctrl = current_task.ctrl # type: TaskCtrl
task_as_cmd_line = ctrl.task_repr.calculate_command_line_for_task()
logging.info("Object type: %s, task repr: %s", type(object), task_as_cmd_line)
assert "newobject" in object.__name__
assert "@" not in task_as_cmd_line
return "task_with_object_param"
class TestPy3ObjectsBuiltins(object):
def test_newstr_as_type(self):
# future.builtins.str is actually "newstr",
# we want to check that correct value type is selected
assert "newstr" in repr(str)
p = parameter(default=None)[str]
assert isinstance(p.parameter.value_type, StrValueType)
def test_newstr_run(self):
a = task_with_str_param.dbnd_run(something="333")
print(a.root_task.something)
def test_object_as_type(self):
# future.builtins.str is actually "newstr",
# we want to check that correct value type is selected
assert "newobject" in repr(object)
p = parameter(default=None)[object]
assert isinstance(p.parameter.value_type, ObjectValueType)
def test_object_run(self):
a = task_with_object_param.dbnd_run(something="333")
print(a.root_task.something)
|
testing/merge_scripts/code_coverage/merge_lib_test.py | zealoussnow/chromium | 14,668 | 43376 | <filename>testing/merge_scripts/code_coverage/merge_lib_test.py
#!/usr/bin/env vpython
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
import unittest
import mock
import merge_lib as merger
class MergeLibTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(MergeLibTest, self).__init__(*args, **kwargs)
self.maxDiff = None
@mock.patch.object(subprocess, 'check_output')
def test_validate_and_convert_profraw(self, mock_cmd):
test_cases = [
([''], [['mock.profdata'], [], []]),
(['Counter overflow'], [[], ['mock.profraw'], ['mock.profraw']]),
(subprocess.CalledProcessError(
255,
'llvm-cov merge -o mock.profdata -sparse=true mock.profraw',
output='Malformed profile'), [[], ['mock.profraw'], []]),
]
for side_effect, expected_results in test_cases:
mock_cmd.side_effect = side_effect
output_profdata_files = []
invalid_profraw_files = []
counter_overflows = []
merger._validate_and_convert_profraw(
'mock.profraw', output_profdata_files, invalid_profraw_files,
counter_overflows, '/usr/bin/llvm-cov')
self.assertEqual(
expected_results,
[output_profdata_files, invalid_profraw_files, counter_overflows])
if __name__ == '__main__':
unittest.main()
|
pex/tools/command.py | ShellAddicted/pex | 2,160 | 43386 | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
from abc import abstractmethod
from pex.commands.command import Command, Result
from pex.pex import PEX
class PEXCommand(Command):
@abstractmethod
def run(self, pex):
# type: (PEX) -> Result
raise NotImplementedError()
|
src/python/tests/integration/test_web/test_handler/test_auto_queue.py | annihilatethee/seedsync | 255 | 43418 | <gh_stars>100-1000
# Copyright 2017, <NAME>, All rights reserved.
import json
from urllib.parse import quote
from controller import AutoQueuePattern
from tests.integration.test_web.test_web_app import BaseTestWebApp
class TestAutoQueueHandler(BaseTestWebApp):
def test_get(self):
self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="one"))
self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="t wo"))
self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="thr'ee"))
self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="fo\"ur"))
self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="fi%ve"))
resp = self.test_app.get("/server/autoqueue/get")
self.assertEqual(200, resp.status_int)
json_list = json.loads(str(resp.html))
self.assertEqual(5, len(json_list))
self.assertIn({"pattern": "one"}, json_list)
self.assertIn({"pattern": "t wo"}, json_list)
self.assertIn({"pattern": "thr'ee"}, json_list)
self.assertIn({"pattern": "fo\"ur"}, json_list)
self.assertIn({"pattern": "fi%ve"}, json_list)
def test_get_is_ordered(self):
self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="a"))
self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="b"))
self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="c"))
self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="d"))
self.auto_queue_persist.add_pattern(AutoQueuePattern(pattern="e"))
resp = self.test_app.get("/server/autoqueue/get")
self.assertEqual(200, resp.status_int)
json_list = json.loads(str(resp.html))
self.assertEqual(5, len(json_list))
self.assertEqual([
{"pattern": "a"},
{"pattern": "b"},
{"pattern": "c"},
{"pattern": "d"},
{"pattern": "e"}
], json_list)
def test_add_good(self):
resp = self.test_app.get("/server/autoqueue/add/one")
self.assertEqual(200, resp.status_int)
self.assertEqual(1, len(self.auto_queue_persist.patterns))
self.assertIn(AutoQueuePattern("one"), self.auto_queue_persist.patterns)
uri = quote(quote("/value/with/slashes", safe=""), safe="")
resp = self.test_app.get("/server/autoqueue/add/" + uri)
self.assertEqual(200, resp.status_int)
self.assertEqual(2, len(self.auto_queue_persist.patterns))
self.assertIn(AutoQueuePattern("/value/with/slashes"), self.auto_queue_persist.patterns)
uri = quote(quote(" value with spaces", safe=""), safe="")
resp = self.test_app.get("/server/autoqueue/add/" + uri)
self.assertEqual(200, resp.status_int)
self.assertEqual(3, len(self.auto_queue_persist.patterns))
self.assertIn(AutoQueuePattern(" value with spaces"), self.auto_queue_persist.patterns)
uri = quote(quote("value'with'singlequote", safe=""), safe="")
resp = self.test_app.get("/server/autoqueue/add/" + uri)
self.assertEqual(200, resp.status_int)
self.assertEqual(4, len(self.auto_queue_persist.patterns))
self.assertIn(AutoQueuePattern("value'with'singlequote"), self.auto_queue_persist.patterns)
uri = quote(quote("value\"with\"doublequote", safe=""), safe="")
resp = self.test_app.get("/server/autoqueue/add/" + uri)
self.assertEqual(200, resp.status_int)
self.assertEqual(5, len(self.auto_queue_persist.patterns))
self.assertIn(AutoQueuePattern("value\"with\"doublequote"), self.auto_queue_persist.patterns)
def test_add_double(self):
resp = self.test_app.get("/server/autoqueue/add/one")
self.assertEqual(200, resp.status_int)
resp = self.test_app.get("/server/autoqueue/add/one", expect_errors=True)
self.assertEqual(400, resp.status_int)
self.assertEqual("Auto-queue pattern 'one' already exists.", str(resp.html))
def test_add_empty_value(self):
uri = quote(quote(" ", safe=""), safe="")
resp = self.test_app.get("/server/autoqueue/add/" + uri, expect_errors=True)
self.assertEqual(400, resp.status_int)
self.assertEqual(0, len(self.auto_queue_persist.patterns))
resp = self.test_app.get("/server/autoqueue/add/", expect_errors=True)
self.assertEqual(404, resp.status_int)
self.assertEqual(0, len(self.auto_queue_persist.patterns))
def test_remove_good(self):
self.auto_queue_persist.add_pattern(AutoQueuePattern("one"))
self.auto_queue_persist.add_pattern(AutoQueuePattern("/value/with/slashes"))
self.auto_queue_persist.add_pattern(AutoQueuePattern(" value with spaces"))
self.auto_queue_persist.add_pattern(AutoQueuePattern("value'with'singlequote"))
self.auto_queue_persist.add_pattern(AutoQueuePattern("value\"with\"doublequote"))
resp = self.test_app.get("/server/autoqueue/remove/one")
self.assertEqual(200, resp.status_int)
self.assertEqual(4, len(self.auto_queue_persist.patterns))
self.assertNotIn(AutoQueuePattern("one"), self.auto_queue_persist.patterns)
uri = quote(quote("/value/with/slashes", safe=""), safe="")
resp = self.test_app.get("/server/autoqueue/remove/" + uri)
self.assertEqual(200, resp.status_int)
self.assertEqual(3, len(self.auto_queue_persist.patterns))
self.assertNotIn(AutoQueuePattern("/value/with/slashes"), self.auto_queue_persist.patterns)
uri = quote(quote(" value with spaces", safe=""), safe="")
resp = self.test_app.get("/server/autoqueue/remove/" + uri)
self.assertEqual(200, resp.status_int)
self.assertEqual(2, len(self.auto_queue_persist.patterns))
self.assertNotIn(AutoQueuePattern(" value with spaces"), self.auto_queue_persist.patterns)
uri = quote(quote("value'with'singlequote", safe=""), safe="")
resp = self.test_app.get("/server/autoqueue/remove/" + uri)
self.assertEqual(200, resp.status_int)
self.assertEqual(1, len(self.auto_queue_persist.patterns))
self.assertNotIn(AutoQueuePattern("value'with'singlequote"), self.auto_queue_persist.patterns)
uri = quote(quote("value\"with\"doublequote", safe=""), safe="")
resp = self.test_app.get("/server/autoqueue/remove/" + uri)
self.assertEqual(200, resp.status_int)
self.assertEqual(0, len(self.auto_queue_persist.patterns))
self.assertNotIn(AutoQueuePattern("value\"with\"doublequote"), self.auto_queue_persist.patterns)
def test_remove_non_existing(self):
resp = self.test_app.get("/server/autoqueue/remove/one", expect_errors=True)
self.assertEqual(400, resp.status_int)
self.assertEqual("Auto-queue pattern 'one' doesn't exist.", str(resp.html))
def test_remove_empty_value(self):
uri = quote(quote(" ", safe=""), safe="")
resp = self.test_app.get("/server/autoqueue/remove/" + uri, expect_errors=True)
self.assertEqual(400, resp.status_int)
self.assertEqual("Auto-queue pattern ' ' doesn't exist.", str(resp.html))
self.assertEqual(0, len(self.auto_queue_persist.patterns))
resp = self.test_app.get("/server/autoqueue/remove/", expect_errors=True)
self.assertEqual(404, resp.status_int)
self.assertEqual(0, len(self.auto_queue_persist.patterns))
|
fasta/reorient_sequences_by_id.py | senjoro/biocode | 355 | 43420 | <reponame>senjoro/biocode<filename>fasta/reorient_sequences_by_id.py
#!/usr/bin/env python3
'''
Description:
Use this when you have a multi-FASTA file and you want to reverse or reverse
complement only select sequences within that file. For example, strand-specific
RNA-Seq read alignment showed that some assembled transcripts were in the wrong
orientation, and this allowed us to correct them.
Input:
- A (multi-)FASTA file. The IDs in the header (up to the first
whitespace) must match the identifiers in the ID file.
- An ID file with one sequence ID per line.
- An action, either 'reverse' or 'revcomp'
Output:
Output will be multi-FASTA data printed to STDOUT or a file if you
pass the -o option.
Those sequences with entries within the ID file will be processed, the rest
will just be printed back out.
'''
import argparse
import sys
from biocode import utils
def main():
parser = argparse.ArgumentParser( description='Reverse or reverse-complement selected sequences within a multi-FASTA')
## output file to be written
parser.add_argument('-f', '--fasta_file', type=str, required=True, help='Path to an input FASTA file' )
parser.add_argument('-i', '--id_file', type=str, required=True, help='Path to file with IDs to process' )
parser.add_argument('-a', '--action', type=str, required=True, choices=['reverse', 'revcomp'], help='What should be done to the sequences in the ID file' )
parser.add_argument('-o', '--output_file', type=str, required=False, default=None, help='Optional Path to an output file to be created' )
args = parser.parse_args()
## output will either be a file or STDOUT
fout = sys.stdout
if args.output_file is not None:
fout = open(args.output_file, 'wt')
seqs = utils.fasta_dict_from_file(args.fasta_file)
ids = list()
for line in open(args.id_file):
line = line.rstrip()
ids.append(line)
for seq_id in seqs:
seq = seqs[seq_id]
if seq_id in ids:
if args.action == 'reverse':
seq['s'] = seq['s'][::-1]
elif args.action == 'revcomp':
seq['s'] = utils.reverse_complement(seq['s'])
## write this sequence, 60bp per line
fout.write(">{0}\n".format(seq_id))
for i in range(0, len(seq['s']), 60):
fout.write(seq['s'][i : i + 60] + "\n")
if __name__ == '__main__':
main()
|
docs/code_snippets/hello_world_infix.py | Ying1123/pysmt | 435 | 43445 | from pysmt.shortcuts import Symbol
from pysmt.typing import INT
h = Symbol("H", INT)
domain = (1 <= h) & (10 >= h)
|
third_party/angle/src/libANGLE/renderer/vulkan/gen_vk_format_table.py | zipated/src | 2,151 | 43450 | <filename>third_party/angle/src/libANGLE/renderer/vulkan/gen_vk_format_table.py<gh_stars>1000+
#!/usr/bin/python
# Copyright 2016 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# gen_vk_format_table.py:
# Code generation for vk format map. See vk_format_map.json for data source.
from datetime import date
import json
import math
import pprint
import os
import re
import sys
sys.path.append('..')
import angle_format
template_table_autogen_cpp = """// GENERATED FILE - DO NOT EDIT.
// Generated by {script_name} using data from {input_file_name}
//
// Copyright {copyright_year} The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// {out_file_name}:
// Queries for full Vulkan format information based on GL format.
#include "libANGLE/renderer/vulkan/vk_format_utils.h"
#include "image_util/copyimage.h"
#include "image_util/generatemip.h"
#include "image_util/loadimage.h"
using namespace angle;
namespace rx
{{
namespace vk
{{
void Format::initialize(VkPhysicalDevice physicalDevice, const angle::Format &angleFormat)
{{
switch (angleFormat.id)
{{
{format_case_data}
default:
UNREACHABLE();
break;
}}
}}
}} // namespace vk
}} // namespace rx
"""
empty_format_entry_template = """{space}case angle::Format::ID::{format_id}:
{space} // This format is not implemented in Vulkan.
{space} break;
"""
format_entry_template = """{space}case angle::Format::ID::{format_id}:
{space}{{
{space} internalFormat = {internal_format};
{space} textureFormatID = angle::Format::ID::{texture};
{space} vkTextureFormat = {vk_texture_format};
{space} bufferFormatID = angle::Format::ID::{buffer};
{space} vkBufferFormat = {vk_buffer_format};
{space} dataInitializerFunction = {initializer};
{space} break;
{space}}}
"""
# This currently only handles texture fallback formats.
fallback_format_entry_template = """{space}case angle::Format::ID::{format_id}:
{space}{{
{space} internalFormat = {internal_format};
{space} if (!HasFullFormatSupport(physicalDevice, {vk_texture_format}))
{space} {{
{space} textureFormatID = angle::Format::ID::{fallback_texture};
{space} vkTextureFormat = {fallback_vk_texture_format};
{space} dataInitializerFunction = {fallback_initializer};
{space} ASSERT(HasFullFormatSupport(physicalDevice, {fallback_vk_texture_format}));
{space} }}
{space} else
{space} {{
{space} textureFormatID = angle::Format::ID::{texture};
{space} vkTextureFormat = {vk_texture_format};
{space} dataInitializerFunction = {initializer};
{space} }}
{space} bufferFormatID = angle::Format::ID::{buffer};
{space} vkBufferFormat = {vk_buffer_format};
{space} break;
{space}}}
"""
def gen_format_case(angle, internal_format, vk_json_data):
vk_map = vk_json_data["map"]
vk_overrides = vk_json_data["overrides"]
vk_fallbacks = vk_json_data["fallbacks"]
args = {
"space": " ",
"format_id": angle,
"internal_format": internal_format
}
if ((angle not in vk_map) and (angle not in vk_overrides) and
(angle not in vk_fallbacks)) or angle == 'NONE':
return empty_format_entry_template.format(**args)
template = format_entry_template
if angle in vk_map:
args["buffer"] = angle
args["texture"] = angle
if angle in vk_overrides:
args.update(vk_overrides[angle])
if angle in vk_fallbacks:
template = fallback_format_entry_template
fallback = vk_fallbacks[angle]
assert not "buffer" in fallback, "Buffer fallbacks not yet supported"
assert "texture" in fallback, "Fallback must have a texture fallback"
args["fallback_texture"] = fallback["texture"]
args["fallback_vk_texture_format"] = vk_map[fallback["texture"]]
args["fallback_initializer"] = angle_format.get_internal_format_initializer(
internal_format, fallback["texture"])
assert "buffer" in args, "Missing buffer format for " + angle
assert "texture" in args, "Missing texture format for " + angle
args["vk_buffer_format"] = vk_map[args["buffer"]]
args["vk_texture_format"] = vk_map[args["texture"]]
args["initializer"] = angle_format.get_internal_format_initializer(
internal_format, args["texture"])
return template.format(**args)
input_file_name = 'vk_format_map.json'
out_file_name = 'vk_format_table'
angle_to_gl = angle_format.load_inverse_table(os.path.join('..', 'angle_format_map.json'))
vk_json_data = angle_format.load_json(input_file_name)
vk_cases = [gen_format_case(angle, gl, vk_json_data)
for angle, gl in sorted(angle_to_gl.iteritems())]
output_cpp = template_table_autogen_cpp.format(
copyright_year = date.today().year,
format_case_data = "\n".join(vk_cases),
script_name = __file__,
out_file_name = out_file_name,
input_file_name = input_file_name)
with open(out_file_name + '_autogen.cpp', 'wt') as out_file:
out_file.write(output_cpp)
out_file.close()
|
src/azure-cli/azure/cli/command_modules/synapse/manual/operations/accesscontrol.py | YuanyuanNi/azure-cli | 3,287 | 43474 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.util import CLIError
from azure.cli.core.azclierror import InvalidArgumentValueError, ArgumentUsageError
from azure.cli.core.util import is_guid
from azure.graphrbac.models import GraphErrorException
from msrestazure.azure_exceptions import CloudError
from .._client_factory import cf_synapse_role_assignments, cf_synapse_role_definitions, cf_graph_client_factory
from ..constant import ITEM_NAME_MAPPING
# List Synapse Role Assignment
def list_role_assignments(cmd, workspace_name, role=None, assignee=None, assignee_object_id=None,
scope=None, item=None, item_type=None):
if bool(assignee) and bool(assignee_object_id):
raise ArgumentUsageError('usage error: --assignee STRING | --assignee-object-id GUID')
if bool(item) != bool(item_type):
raise ArgumentUsageError('usage error: --item-type STRING --item STRING')
return _list_role_assignments(cmd, workspace_name, role, assignee or assignee_object_id,
scope, resolve_assignee=(not assignee_object_id), item=item, item_type=item_type)
def _list_role_assignments(cmd, workspace_name, role=None, assignee=None, scope=None,
resolve_assignee=True, item=None, item_type=None):
"""Prepare scope, role ID and resolve object ID from Graph API."""
if any([scope, item, item_type]):
scope = _build_role_scope(workspace_name, scope, item, item_type)
role_id = _resolve_role_id(cmd, role, workspace_name)
object_id = _resolve_object_id(cmd, assignee, fallback_to_object_id=True) if resolve_assignee else assignee
client = cf_synapse_role_assignments(cmd.cli_ctx, workspace_name)
role_assignments = client.list_role_assignments(role_id, object_id, scope).value
return role_assignments
# Show Synapse Role Assignment By Id
def get_role_assignment_by_id(cmd, workspace_name, role_assignment_id):
client = cf_synapse_role_assignments(cmd.cli_ctx, workspace_name)
return client.get_role_assignment_by_id(role_assignment_id)
# Delete Synapse Role Assignment
def delete_role_assignment(cmd, workspace_name, ids=None, assignee=None, assignee_object_id=None, role=None,
scope=None, item=None, item_type=None):
client = cf_synapse_role_assignments(cmd.cli_ctx, workspace_name)
if not any([ids, assignee, assignee_object_id, role, scope, item, item_type]):
raise ArgumentUsageError('usage error: No argument are provided. --assignee STRING | --ids GUID')
if ids:
if any([assignee, assignee_object_id, role, scope, item, item_type]):
raise ArgumentUsageError('You should not provide --role or --assignee or --assignee_object_id '
'or --scope or --principal-type when --ids is provided.')
role_assignments = list_role_assignments(cmd, workspace_name, None, None, None, None, None, None)
assignment_id_list = [x.id for x in role_assignments]
# check role assignment id
for assignment_id in ids:
if assignment_id not in assignment_id_list:
raise ArgumentUsageError("role assignment id:'{}' doesn't exist.".format(assignment_id))
# delete when all ids check pass
for assignment_id in ids:
client.delete_role_assignment_by_id(assignment_id)
return
role_assignments = list_role_assignments(cmd, workspace_name, role, assignee, assignee_object_id,
scope, item, item_type)
if any([scope, item, item_type]):
scope = _build_role_scope(workspace_name, scope, item, item_type)
role_assignments = [x for x in role_assignments if x.scope == scope]
if role_assignments:
for assignment in role_assignments:
client.delete_role_assignment_by_id(assignment.id)
else:
raise CLIError('No matched assignments were found to delete, please provide correct --role or --assignee.'
'Use `az synapse role assignment list` to get role assignments.')
def create_role_assignment(cmd, workspace_name, role, assignee=None, assignee_object_id=None,
scope=None, assignee_principal_type=None, item_type=None, item=None, assignment_id=None):
"""Check parameters are provided correctly, then call _create_role_assignment."""
if assignment_id and not is_guid(assignment_id):
raise InvalidArgumentValueError('usage error: --id GUID')
if bool(assignee) == bool(assignee_object_id):
raise ArgumentUsageError('usage error: --assignee STRING | --assignee-object-id GUID')
if assignee_principal_type and not assignee_object_id:
raise ArgumentUsageError('usage error: --assignee-object-id GUID [--assignee-principal-type]')
if bool(item) != bool(item_type):
raise ArgumentUsageError('usage error: --item-type STRING --item STRING')
try:
return _create_role_assignment(cmd, workspace_name, role, assignee or assignee_object_id, scope, item,
item_type, resolve_assignee=(not assignee_object_id),
assignee_principal_type=assignee_principal_type, assignment_id=assignment_id)
except Exception as ex: # pylint: disable=broad-except
if _error_caused_by_role_assignment_exists(ex): # for idempotent
return list_role_assignments(cmd, workspace_name, role=role,
assignee=assignee, assignee_object_id=assignee_object_id,
scope=scope, item=item, item_type=item_type)
raise
def _resolve_object_id(cmd, assignee, fallback_to_object_id=False):
if assignee is None:
return None
client = cf_graph_client_factory(cmd.cli_ctx)
result = None
try:
result = list(client.users.list(filter="userPrincipalName eq '{0}' or mail eq '{0}' or displayName eq '{0}'"
.format(assignee)))
if not result:
result = list(client.service_principals.list(filter="displayName eq '{}'".format(assignee)))
if not result:
result = list(client.groups.list(filter="mail eq '{}'".format(assignee)))
if not result and is_guid(assignee): # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("Cannot find user or group or service principal in graph database for '{assignee}'. "
"If the assignee is a principal id, make sure the corresponding principal is created "
"with 'az ad sp create --id {assignee}'.".format(assignee=assignee))
if len(result) > 1:
raise CLIError("Find more than one user or group or service principal in graph database for '{assignee}'. "
"Please using --assignee-object-id GUID to specify assignee accurately"
.format(assignee=assignee))
return result[0].object_id
except (CloudError, GraphErrorException):
if fallback_to_object_id and is_guid(assignee):
return assignee
raise
def _get_object_stubs(graph_client, assignees):
from azure.graphrbac.models import GetObjectsParameters
result = []
assignees = list(assignees) # callers could pass in a set
for i in range(0, len(assignees), 1000):
params = GetObjectsParameters(include_directory_object_references=True, object_ids=assignees[i:i + 1000])
result += list(graph_client.objects.get_objects_by_object_ids(params))
return result
def _error_caused_by_role_assignment_exists(ex):
return getattr(ex, 'status_code', None) == 409 and 'role assignment already exists' in ex.message
def _create_role_assignment(cmd, workspace_name, role, assignee, scope=None, item=None, item_type=None,
resolve_assignee=True, assignee_principal_type=None, assignment_id=None):
"""Prepare scope, role ID and resolve object ID from Graph API."""
scope = _build_role_scope(workspace_name, scope, item, item_type)
role_id = _resolve_role_id(cmd, role, workspace_name)
object_id = _resolve_object_id(cmd, assignee, fallback_to_object_id=True) if resolve_assignee else assignee
assignment_client = cf_synapse_role_assignments(cmd.cli_ctx, workspace_name)
return assignment_client.create_role_assignment(assignment_id if assignment_id is not None else _gen_guid(),
role_id, object_id, scope, assignee_principal_type)
def _build_role_scope(workspace_name, scope, item, item_type):
if scope:
return scope
if item and item_type:
# workspaces/{workspaceName}/bigDataPools/{bigDataPoolName}
scope = "workspaces/" + workspace_name + "/" + item_type + "/" + item
else:
scope = "workspaces/" + workspace_name
return scope
def _resolve_role_id(cmd, role, workspace_name):
role_id = None
if not role:
return role_id
if is_guid(role):
role_id = role
else:
role_definition_client = cf_synapse_role_definitions(cmd.cli_ctx, workspace_name)
role_definition = role_definition_client.list_role_definitions()
role_dict = {x.name.lower(): x.id for x in role_definition if x.name}
if role.lower() not in role_dict:
raise CLIError("Role '{}' doesn't exist.".format(role))
role_id = role_dict[role.lower()]
return role_id
def _gen_guid():
import uuid
return uuid.uuid4()
# List Synapse Role Definitions Scope
def list_scopes(cmd, workspace_name):
client = cf_synapse_role_definitions(cmd.cli_ctx, workspace_name)
return client.list_scopes()
# List Synapse Role Definitions
def list_role_definitions(cmd, workspace_name, is_built_in=None):
client = cf_synapse_role_definitions(cmd.cli_ctx, workspace_name)
role_definitions = client.list_role_definitions(is_built_in)
return role_definitions
def _build_role_scope_format(scope, item_type):
if scope:
return scope
if item_type:
scope = "workspaces/{workspaceName}/" + item_type + "/" + ITEM_NAME_MAPPING[item_type]
else:
scope = "workspaces/{workspaceName}"
return scope
# Get Synapse Role Definition
def get_role_definition(cmd, workspace_name, role):
role_id = _resolve_role_id(cmd, role, workspace_name)
client = cf_synapse_role_definitions(cmd.cli_ctx, workspace_name)
return client.get_role_definition_by_id(role_id)
|
ml_logger/ml_logger_tests/test_cloud/test_s3.py | mcx/ml_logger | 107 | 43482 | <filename>ml_logger/ml_logger_tests/test_cloud/test_s3.py
"""
# AWS S3 Tests
"""
from os.path import join as pathJoin
import pytest
from ml_logger import logger
@pytest.fixture(scope='session')
def log_dir(request):
return request.config.getoption('--logdir')
@pytest.fixture(scope="session")
def setup(log_dir):
logger.configure('main_test_script', root=log_dir)
print(f"logging to {pathJoin(logger.root, logger.prefix)}")
def test_s3_upload(setup):
import os, pathlib
profile = os.environ.get('ML_LOGGER_TEST_AWS_PROFILE', None)
if profile:
os.environ['AWS_PROFILE'] = profile
s3_bucket = os.environ['ML_LOGGER_TEST_S3_BUCKET']
target = "s3://" + s3_bucket + "/test_dir.tar"
logger.upload_dir(pathlib.Path(__file__).absolute().parent, target)
def test_s3_download(setup):
import os, glob
profile = os.environ.get('ML_LOGGER_TEST_AWS_PROFILE', None)
if profile:
os.environ['AWS_PROFILE'] = profile
s3_bucket = os.environ['ML_LOGGER_TEST_S3_BUCKET']
source = "s3://" + s3_bucket + "/test_dir.tar"
local_prefix = '/tmp/test_dir_download'
logger.download_dir(source, to=local_prefix)
assert local_prefix + '/test_s3.py' in glob.glob(local_prefix + "/*")
logger.remove("test_dir_download")
def test_s3_glob(setup):
import os
profile = os.environ.get('ML_LOGGER_TEST_AWS_PROFILE', None)
if profile:
os.environ['AWS_PROFILE'] = profile
s3_bucket = os.environ['ML_LOGGER_TEST_S3_BUCKET']
target = "s3://" + s3_bucket + "/test_dir.tar"
logger.upload_dir('.', target)
files = logger.glob_s3(s3_bucket)
assert 'test_dir.tar' in files
files = logger.glob_s3(wd=s3_bucket)
assert 'test_dir.tar' in files
files = logger.glob_s3(s3_bucket + "/test_dir.tar")
assert 'test_dir.tar' in files
files = logger.glob_s3(s3_bucket + "/this_does_not_exist")
assert not files
def test_s3_glob_prefix(setup):
import os
profile = os.environ.get('ML_LOGGER_TEST_AWS_PROFILE', None)
if profile:
os.environ['AWS_PROFILE'] = profile
s3_bucket = os.environ['ML_LOGGER_TEST_S3_BUCKET']
target = "s3://" + s3_bucket + "/prefix/prefix-2/test_dir.tar"
logger.upload_dir(".", target)
files = logger.glob_s3(wd=s3_bucket + "/prefix/prefix-2")
assert 'test_dir.tar' in files
def test_s3_remove(setup):
import os
example_data = {'a': 1, 'b': 2}
s3_bucket = os.environ.get('ML_LOGGER_TEST_S3_BUCKET', None)
target = "s3://" + s3_bucket + "/prefix/prefix-2/example_data.pt"
logger.save_torch(example_data, target)
file, = logger.glob_s3(target[5:])
logger.remove_s3(s3_bucket, file)
assert not logger.glob_s3(target[5:])
def test_s3_upload_download_torch(setup):
import os
example_data = {'a': 1, 'b': 2}
s3_bucket = os.environ.get('ML_LOGGER_TEST_S3_BUCKET', None)
file = "prefix/prefix-2/example_data.pt"
target = "s3://" + s3_bucket + "/" + file
logger.remove_s3(s3_bucket, file)
logger.save_torch(example_data, target)
downloaded_data = logger.load_torch(target)
assert downloaded_data['a'] == 1
assert downloaded_data['b'] == 2
|
examples/paint-nd.py | MaksHess/napari | 1,345 | 43507 | <gh_stars>1000+
"""
Display a 4D labels layer and paint only in 3D.
This is useful e.g. when proofreading segmentations within a time series.
"""
import numpy as np
from skimage import data
import napari
blobs = np.stack(
[
data.binary_blobs(
length=128, blob_size_fraction=0.05, n_dim=3, volume_fraction=f
)
for f in np.linspace(0.05, 0.5, 10)
],
axis=0,
)
viewer = napari.view_image(blobs.astype(float), rendering='attenuated_mip')
labels = viewer.add_labels(np.zeros_like(blobs, dtype=np.int32))
labels.n_edit_dimensions = 3
labels.brush_size = 15
labels.mode = 'paint'
labels.n_dimensional = True
napari.run()
|
deepconcolic/recviz.py | nberth/DeepConcolic | 102 | 43514 | <gh_stars>100-1000
#!/usr/bin/env python3
from utils_io import *
from utils_args import *
import yaml
def check_record (r):
def chk (k, t):
if k not in r or not isinstance (r[k], t):
raise ValueError (f'missing or wrong `{k}\' entry in record')
chk ('adversarials', list)
chk ('passed_tests', list)
chk ('norm', str)
def read_yaml_record_file (f):
with open (f, 'r') as f:
r = yaml.safe_load (f)
try:
check_record (r)
return r
except ValueError as e:
raise ValueError (f'Malformed record file: {e}')
class Record:
def __init__(self, **kwds):
super().__init__(**kwds)
self.roots = ()
self.nodes = {}
def reset_from_yaml_record (self, record, dir = None):
self.roots = ()
self.nodes = {}
lr = [None] * (len (record['passed_tests']) +
len (record['adversarials']))
for t in record['passed_tests']:
lr[t['index']] = t
t['status'] = 'pass' if 'gen_test_id' in t else 'raw'
for t in record['adversarials']:
lr[t['index']] = t
t['status'] = 'adversarial'
def tnode_idx (t):
return -t['index'] - 1 if 'gen_test_id' not in t else t['gen_test_id']
# Note some elements in [lr] may still be [None] if there are
# duplicated inputs in the initial test suite. We can safely
# ignore those indexes as no generated test case may derive from
# them (i.e. those point to the index of the unique duplicated
# input that is in [lr]).
for t in lr:
if t is None: continue
id = tnode_idx (t)
t['id'] = id
t['childs'] = ()
self.nodes[id] = t
if 'origin_index' in t:
origin = self.nodes[tnode_idx (lr[t['origin_index']])]
t['origin'] = origin['id']
origin['childs'] += (id,)
else:
t['origin'] = None
self.roots += (id,)
def set_image (t, f):
if os.path.exists (os.path.join (dir, f) if dir else f):
t['image'] = f
for t in lr:
if t is None: continue
if t['status'] == 'raw':
tchilds = t["childs"]
if tchilds != ():
set_image (t, f'{self.nodes[tchilds[0]]["id"]}-original-{t["label"]}.png')
elif t['status'] == 'pass':
set_image (t, f'{t["id"]}-ok-{t["label"]}.png')
elif t['status'] == 'adversarial':
set_image (t, f'{t["id"]}-adv-{t["label"]}.png')
@classmethod
def from_yaml_record (cls, yr, **_):
self = cls.__new__(cls)
self.reset_from_yaml_record (yr, **_)
return self
def traverse (self, exclude_dangling_roots = True):
"""Parents first; yields pairs of dictionaries `(node, parent)`, with
`parent = None` for roots."""
def node (t):
parent = self.nodes[t['origin']] if t['origin'] is not None else None
yield (t, parent)
for c in t['childs']:
yield from node (self.nodes[c])
for ridx in self.roots:
r = self.nodes[ridx]
if not exclude_dangling_roots or r['childs'] != ():
for x in node (r): yield x
# ---
try:
from pyvis.network import Network
_has_pyvis = True
_pyvis_all_images = ('raw', 'pass', 'adversarial')
def record_to_pyvis (record,
show_images = _pyvis_all_images,
level_scaling = True,
**_):
n = Network (directed = True, **_)
mean_dist = 1.
if level_scaling:
mean_dist = 0.0
for i, (t, _) in enumerate (record.traverse (exclude_dangling_roots = True)):
if 'origin_dist' in t:
mean_dist += t['origin_dist'] / i
mean_dist /= 16.
for t, parent in record.traverse (exclude_dangling_roots = True):
tid = t['id']
label = t['label']
props = dict ()
if t['status'] == 'raw':
props['label'] = 'raw'
props['title'] = f'Label: {label}'
elif t['status'] == 'pass':
props['color'] = 'green'
props['title'] = f'Predicted label: {label}'
elif t['status'] == 'adversarial':
props['color'] = 'red'
props['title'] = f'Predicted label: {label}'
if 'image' in t and t['status'] in show_images:
props['image'] = t['image']
props['shape'] = 'image'
props['size'] = 40
props['shadow'] = True
if 'root_dist' in t:
dist = t['root_dist']
props['title'] += f'<br/>Distance to root: {dist:.4g}'
props['level'] = dist if dist <= 0 or dist >= 1 else dist / mean_dist
else:
props['level'] = 0
n.add_node (tid, **props)
if parent is not None:
dist = t['origin_dist']
n.add_edge (parent['id'], tid,
label = f'{dist:.3}',
color = props['color'],
width = 3)
return n
except:
_has_pyvis = False
# ---
ap = argparse.ArgumentParser \
(description = 'Interactive visualisation of DeepConcolic testing record',
formatter_class = argparse.ArgumentDefaultsHelpFormatter)
add_named_n_pos_args (ap, 'record', ('--record', '-r'), metavar = 'YAML',
help = 'record file')
if _has_pyvis:
_pyvis_all_buttons = ('nodes', 'edges', 'layout', 'interaction',
'manipulation', 'physics', 'selection', 'renderer')
_pyvis_default_buttons = ('layout', 'physics')
try:
import json # for dict load
ap.add_argument ('--pyvis-open', '-pvopen', action = 'store_true',
help = 'open the saved HTML network')
ap.add_argument ('--pyvis-non-hierarchical', '-pvnh', action = 'store_true',
help = 'do not use a hierarchical layout to render '
'the pyvis network')
ap.add_argument ('--pyvis-network-options', '-pvo', type = json.loads,
default = '{"width": "98vw", "height": "98vh"}',
help = 'dictionary of options for pyvis network creation')
ap.add_argument ('--pyvis-show-images', '-pvimgs', nargs = '+',
choices = _pyvis_all_images, default = _pyvis_all_images,
help = 'images to show in the pyvis network visualisation')
ap.add_argument ('--pyvis-show-buttons', '-pvbuttons', nargs = '*',
choices = _pyvis_all_buttons,
default = _pyvis_default_buttons,
help = 'buttons to show in the resulting HTML page')
except: pass
def get_args (args = None, parser = ap):
return parser.parse_args () if args is None else args
def main (args = None, parser = ap):
try:
args = get_args (args, parser = parser)
destdir = os.path.dirname (args.record)
record = Record.from_yaml_record (read_yaml_record_file (args.record),
dir = destdir)
if _has_pyvis:
layout = 'hierarchical' if not args.pyvis_non_hierarchical else None
n = record_to_pyvis (record, layout = layout,
show_images = args.pyvis_show_images,
**args.pyvis_network_options)
if not args.pyvis_non_hierarchical:
# A tad on the brittle side:
n.options.layout.hierarchical.direction = "LR"
if len (args.pyvis_show_buttons) != 0:
n.show_buttons (filter_ = args.pyvis_show_buttons)
htmldest = f'{os.path.splitext (args.record)[0]}.html'
xmsg, save = ((', and opening', n.show) if args.pyvis_open else \
('', n.save_graph))
p1 (f'Saving pyvis record graph into `{htmldest}\'{xmsg}')
save (htmldest)
else:
sys.exit ('No backend available')
except ValueError as e:
sys.exit (f'Error: {e}')
except FileNotFoundError as e:
sys.exit (f'Error: {e}')
except KeyboardInterrupt:
sys.exit ('Interrupted.')
# ---
if __name__=="__main__":
main ()
|
examples/numerical/unsupervised_num.py | stjordanis/QMLT | 117 | 43518 | <reponame>stjordanis/QMLT
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. currentmodule:: qmlt.examples.numerical
.. code-author:: <NAME> <<EMAIL>>
Example of a simple unsupervised learning task with the numerical circuit learner.
This example fails to learn the structure of the data, namely to have zero photons in the first mode,
irrespective of the second mode.
"""
import strawberryfields as sf
from strawberryfields.ops import *
import numpy as np
from qmlt.numerical import CircuitLearner
from qmlt.numerical.helpers import make_param
from qmlt.numerical.regularizers import l2
from qmlt.helpers import sample_from_distribution
# Create some parameters. Mark some of them to be regularized.
my_params = [
make_param(name='phi', stdev=0.2, regularize=False),
make_param(name='theta', stdev=0.2, regularize=False),
make_param(name='a', stdev=0.2, regularize=True),
make_param(name='rtheta', stdev=0.2, regularize=False),
make_param(name='r', stdev=0.2, regularize=True),
make_param(name='kappa', stdev=0.2, regularize=True)
]
# Define the variational circuit and its output
def circuit(params):
eng, q = sf.Engine(2)
with eng:
BSgate(params[0], params[1]) | (q[0], q[1])
Dgate(params[2]) | q[0]
Rgate(params[3]) | q[0]
Sgate(params[4]) | q[0]
Kgate(params[5]) | q[0]
state = eng.run('fock', cutoff_dim=7)
circuit_output = state.all_fock_probs()
return circuit_output
# Define a loss function that maximises the probabilities of the states we want to learn
def myloss(circuit_output, X):
probs = [circuit_output[x[0], x[1]] for x in X]
prob_total = sum(np.reshape(probs, -1))
return -prob_total
def myregularizer(regularized_params):
return l2(regularized_params)
# Generate some training data.
# The goal is to learn that the first mode contains no photons.
X_train = np.array([[0, 1],
[0, 2],
[0, 3],
[0, 4]])
# Set the hyperparameters of the model and the training algorithm
hyperparams = {'circuit': circuit,
'init_circuit_params': my_params,
'task': 'unsupervised',
'optimizer': 'Nelder-Mead',
'loss': myloss,
'regularizer': myregularizer,
'regularization_strength': 0.1,
'print_log': True,
'log_every': 100
}
# Create the learner
learner = CircuitLearner(hyperparams=hyperparams)
# Train the learner
learner.train_circuit(X=X_train, steps=500)
# Get the final distribution, which is the circuit output
outcomes = learner.run_circuit()
final_distribution = outcomes['outputs']
# Use a helper function to sample fock states from this state.
# They should show a similar distribution to the training data
for i in range(10):
sample = sample_from_distribution(distribution=final_distribution)
print("Fock state sample {}:{}".format(i, sample))
|
torchrs/datasets/whu_rs19.py | isaaccorley/torchrs | 146 | 43526 | import torchvision.transforms as T
from torchvision.datasets import ImageFolder
class WHURS19(ImageFolder):
""" WHU-RS19 dataset from'Structural High-resolution Satellite Image Indexing', Xia at al. (2010)
https://hal.archives-ouvertes.fr/file/index/docid/458685/filename/structural_satellite_indexing_XYDG.pdf
"""
def __init__(
self,
root: str = ".data/WHU-RS19",
transform: T.Compose = T.Compose([T.ToTensor()])
):
super().__init__(
root=root,
transform=transform
)
|
tempest/tests/lib/services/identity/v3/test_protocols_client.py | cityofships/tempest | 254 | 43564 | <filename>tempest/tests/lib/services/identity/v3/test_protocols_client.py<gh_stars>100-1000
# Copyright 2020 Samsung Electronics Co., Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
from tempest.lib.services.identity.v3 import protocols_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestProtocolsClient(base.BaseServiceTest):
FAKE_PROTOCOLS_INFO = {
"links": {
"next": None,
"previous": None,
"self": "http://example.com/identity/v3/OS-FEDERATION/" +
"identity_providers/FAKE_ID/protocols"
},
"protocols": [
{
"id": "fake_id1",
"links": {
"identity_provider": "http://example.com/identity/v3/" +
"OS-FEDERATION/identity_providers/" +
"FAKE_ID",
"self": "http://example.com/identity/v3/OS-FEDERATION/"
"identity_providers/FAKE_ID/protocols/fake_id1"
},
"mapping_id": "fake123"
}
]
}
FAKE_PROTOCOL_INFO = {
"protocol": {
"id": "fake_id1",
"links": {
"identity_provider": "http://example.com/identity/v3/OS-" +
"FEDERATION/identity_providers/FAKE_ID",
"self": "http://example.com/identity/v3/OS-FEDERATION/" +
"identity_providers/FAKE_ID/protocols/fake_id1"
},
"mapping_id": "fake123"
}
}
def setUp(self):
super(TestProtocolsClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = protocols_client.ProtocolsClient(
fake_auth, 'identity', 'regionOne')
def _test_add_protocol_to_identity_provider(self, bytes_body=False):
self.check_service_client_function(
self.client.add_protocol_to_identity_provider,
'tempest.lib.common.rest_client.RestClient.put',
self.FAKE_PROTOCOL_INFO,
bytes_body,
idp_id="FAKE_ID",
protocol_id="fake_id1",
status=201)
def _test_list_protocols_of_identity_provider(self, bytes_body=False):
self.check_service_client_function(
self.client.list_protocols_of_identity_provider,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_PROTOCOLS_INFO,
bytes_body,
idp_id="FAKE_ID",
status=200)
def _test_get_protocol_for_identity_provider(self, bytes_body=False):
self.check_service_client_function(
self.client.get_protocol_for_identity_provider,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_PROTOCOL_INFO,
bytes_body,
idp_id="FAKE_ID",
protocol_id="fake_id1",
status=200)
def _test_update_mapping_for_identity_provider(self, bytes_body=False):
self.check_service_client_function(
self.client.update_mapping_for_identity_provider,
'tempest.lib.common.rest_client.RestClient.patch',
self.FAKE_PROTOCOL_INFO,
bytes_body,
idp_id="FAKE_ID",
protocol_id="fake_id1",
status=200)
def _test_delete_protocol_from_identity_provider(self, bytes_body=False):
self.check_service_client_function(
self.client.delete_protocol_from_identity_provider,
'tempest.lib.common.rest_client.RestClient.delete',
{},
bytes_body,
idp_id="FAKE_ID",
protocol_id="fake_id1",
status=204)
def test_add_protocol_to_identity_provider_with_str_body(self):
self._test_add_protocol_to_identity_provider()
def test_add_protocol_to_identity_provider_with_bytes_body(self):
self._test_add_protocol_to_identity_provider(bytes_body=True)
def test_list_protocols_of_identity_provider_with_str_body(self):
self._test_list_protocols_of_identity_provider()
def test_list_protocols_of_identity_provider_with_bytes_body(self):
self._test_list_protocols_of_identity_provider(bytes_body=True)
def test_get_protocol_for_identity_provider_with_str_body(self):
self._test_get_protocol_for_identity_provider()
def test_get_protocol_for_identity_provider_with_bytes_body(self):
self._test_get_protocol_for_identity_provider(bytes_body=True)
def test_update_mapping_for_identity_provider_with_str_body(self):
self._test_update_mapping_for_identity_provider()
def test_update_mapping_for_identity_provider_with_bytes_body(self):
self._test_update_mapping_for_identity_provider(bytes_body=True)
def test_delete_protocol_from_identity_provider_with_str_body(self):
self._test_delete_protocol_from_identity_provider()
def test_delete_protocol_from_identity_provider_with_bytes_body(self):
self._test_delete_protocol_from_identity_provider(bytes_body=False)
|
tools/compare_rep.py | Woffee/deformer | 114 | 43574 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import os
from collections import defaultdict
import numpy as np
from scipy.spatial import distance
from tqdm import tqdm
np.set_printoptions(threshold=np.inf, suppress=True)
def main(args):
num_batches = args.num_batches
bert_data = defaultdict(list)
s_or_e_bert_data = defaultdict(list)
print('loading data...')
for para_idx in range(num_batches):
bert_filename = os.path.join(args.in_dir, 'bert_b{}.npz'.format(para_idx + 1))
bert_outputs = np.load(bert_filename)
for k, v in bert_outputs.items():
bert_data[k].append(v)
sbert_filename = os.path.join(args.in_dir, '{}_b{}.npz'.format(args.model, para_idx + 1))
sbert_outputs = np.load(sbert_filename)
for k, v in sbert_outputs.items():
s_or_e_bert_data[k].append(v)
print('stacking all examples of both bert and {}...'.format(args.model))
for k, v in s_or_e_bert_data.items():
s_or_e_bert_data[k] = np.concatenate(v) # stack along batch dim
for k, v in bert_data.items():
bert_data[k] = np.concatenate(v) # stack along batch dim
print('begin computing...')
all_para_distances = [[] for _ in range(12)]
all_q_distances = [[] for _ in range(12)]
# 500 examples paragraphs
for para_idx in tqdm(range(500)):
in_ids = bert_data['input_ids'][para_idx]
seg_ids = bert_data['segment_ids'][para_idx]
feature_ids = bert_data['feature_id'][para_idx]
q_ids = s_or_e_bert_data["question_ids"][para_idx]
c_ids = s_or_e_bert_data["context_ids"][para_idx]
q_length = np.sum(q_ids.astype(np.bool))
c_length = np.sum(c_ids.astype(np.bool))
sequence_length = np.sum(in_ids.astype(np.bool))
second_length = np.sum(seg_ids.astype(np.bool))
first_length = sequence_length - second_length
if not (c_length == second_length):
print('shifted paragraphs:', feature_ids, c_length, second_length)
continue
if not (q_length == first_length):
print('shifted questions:', feature_ids, q_length, first_length)
continue
for l in range(12):
b_layer_vectors = bert_data['layer{}'.format(l)][para_idx]
s_layer_vectors = s_or_e_bert_data['layer{}'.format(l)][para_idx]
# b_pvs is layer paragraph tokens vectors for bert
b_pvs = b_layer_vectors[first_length:second_length]
s_pvs = s_layer_vectors[len(q_ids):len(q_ids) + c_length]
# calculate variance of distances of 5 paragraph vectors to the centroid
p_dist = np.mean([distance.cosine(b_p, s_p) for b_p, s_p in zip(b_pvs, s_pvs)])
all_para_distances[l].append(p_dist)
# q_pvs is layer question tokens vectors for bert
b_qvs = b_layer_vectors[:first_length]
s_qvs = s_layer_vectors[:q_length]
q_dist = np.mean([distance.cosine(b_q, s_q) for b_q, s_q in zip(b_qvs, s_qvs)])
all_q_distances[l].append(q_dist)
# all_para_variances has 12 list, each has 100 variances
all_para_mean_variances = [np.mean(v) for v in all_para_distances]
all_q_mean_variances = [np.mean(v) for v in all_q_distances]
print(all_para_mean_variances)
print(all_q_mean_variances)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('in_dir', type=str, default=None)
parser.add_argument('-n', '--num_batches', type=int, default=20)
parser.add_argument('-m', '--model', type=str, default='sbert', choices=('ebert', 'sbert'),
help='choose which model compare distance')
main(parser.parse_args())
|
src/api/log/__init__.py | fekblom/critic | 216 | 43584 | import rebase
import partition
|
angr/procedures/posix/accept.py | Kyle-Kyle/angr | 6,132 | 43614 | import angr
######################################
# accept (but not really)
######################################
class accept(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, sockfd, addr, addrlen):
conc_addrlen = self.state.mem[addrlen].int.concrete
addr_data = self.state.solver.BVS('accept_addr', conc_addrlen*8, key=('api', 'accept', 'addr'))
self.state.memory.store(addr, addr_data)
ident = 'unknown'
if not sockfd.symbolic:
sockfd = self.state.solver.eval(sockfd)
if sockfd in self.state.posix.fd:
simsockfd = self.state.posix.fd[sockfd]
for potential_ident in self.state.posix.sockets:
if self.state.posix.sockets[potential_ident][0] is simsockfd.read_storage and \
self.state.posix.sockets[potential_ident][1] is simsockfd.write_storage:
ident = potential_ident
break
ident_counters = dict(self.state.globals.get('accept_idents', {}))
ident_counters[ident] = ident_counters.get(ident, 0) + 1
self.state.globals['accept_idents'] = ident_counters
fd = self.state.posix.open_socket(('accept', ident, ident_counters[ident]))
return fd
|
scripts/sample_data/discourse_1369.py | gmatteo/awesome-panel | 179 | 43627 | import panel as pn
def test_alert():
my_alert = pn.pane.Alert("foo", alert_type="primary")
my_button = pn.widgets.Button(name="Toggle")
def toggle(event):
if my_alert.alert_type == "primary":
my_alert.alert_type == "success"
else:
my_alert.alert_type = "primary"
my_alert.object = my_alert.alert_type
my_button.on_click(toggle)
pn.Row(my_alert, my_button).show()
test_alert()
|
tests/conftest.py | chadwhitacre/confidant | 1,820 | 43650 | <filename>tests/conftest.py<gh_stars>1000+
import pytest
@pytest.fixture(autouse=True)
def encrypted_settings_mock(mocker):
mocker.patch('confidant.settings.encrypted_settings.secret_string', {})
mocker.patch(
'confidant.settings.encrypted_settings.decrypted_secrets',
{'SESSION_SECRET': 'TEST_KEY'},
)
|
vega/algorithms/nas/modnas/optim/model_optim/base.py | This-50m/vega | 724 | 43651 | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Score model optimum finder."""
import random
from collections import OrderedDict
from typing import Set
class ModelOptim():
"""Score model optimum finder class."""
def __init__(self, space):
self.space = space
def get_random_index(self, excludes: Set[int]) -> int:
"""Return random categorical index from search space."""
index = random.randint(0, self.space.categorical_size() - 1)
while index in excludes:
index = random.randint(0, self.space.categorical_size() - 1)
return index
def get_random_params(self, excludes: Set[int]) -> OrderedDict:
"""Return random categorical parameters from search space."""
return self.space.get_categorical_params(self.get_random_index(excludes))
def get_optimums(self, model, size, excludes):
"""Return optimums in score model."""
raise NotImplementedError
|
tests/test_adjacency.py | jpmaterial/trimesh | 1,882 | 43700 | try:
from . import generic as g
except BaseException:
import generic as g
class AdjacencyTest(g.unittest.TestCase):
def test_radius(self):
for radius in [0.1, 1.0, 3.1459, 29.20]:
m = g.trimesh.creation.cylinder(
radius=radius, height=radius * 10)
# remove the cylinder cap
signs = (g.np.sign(m.vertices[:, 2]) < 0)[m.faces]
not_cap = ~g.np.logical_or(
signs.all(axis=1), ~signs.any(axis=1))
m.update_faces(not_cap)
# compare the calculated radius
radii = m.face_adjacency_radius
radii = radii[g.np.isfinite(radii)]
assert g.np.allclose(radii, radius, atol=radius / 100)
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
|
clastic/tests/test_obj_browser.py | mahmoud/clastic | 140 | 43708 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
import pytest
from clastic.contrib.obj_browser import create_app
_IS_PYPY = '__pypy__' in sys.builtin_module_names
@pytest.mark.skipif(_IS_PYPY, reason='pypy gc cannot support obj browsing')
def test_flaw_basic():
app = create_app()
cl = app.get_local_client()
resp = cl.get('/')
assert resp.status_code == 302 # take me to the default
resp = cl.get('/', follow_redirects=True)
assert resp.status_code == 200 # default should be sys
assert 'modules' in resp.get_data(True)
|
tests/rororo/test_openapi.py | fajfer/rororo | 105 | 43711 | import datetime
import io
import json
import zipfile
from pathlib import Path
import pyrsistent
import pytest
import yaml
from aiohttp import web
from openapi_core.shortcuts import create_spec
from yarl import URL
from rororo import (
BaseSettings,
get_openapi_context,
get_openapi_schema,
get_openapi_spec,
openapi_context,
OperationTableDef,
setup_openapi,
setup_settings_from_environ,
)
from rororo.annotations import DictStrAny
from rororo.openapi import get_validated_data
from rororo.openapi.exceptions import (
ConfigurationError,
OperationError,
validation_error_context,
ValidationError,
)
ROOT_PATH = Path(__file__).parent
INVALID_OPENAPI_JSON_PATH = ROOT_PATH / "invalid-openapi.json"
INVALID_OPENAPI_YAML_PATH = ROOT_PATH / "invalid-openapi.yaml"
OPENAPI_JSON_PATH = ROOT_PATH / "openapi.json"
OPENAPI_YAML_PATH = ROOT_PATH / "openapi.yaml"
TEST_NESTED_OBJECT = {
"uid": "6fccda1b-0873-4c8a-bceb-a2acfe5851da",
"type": "nested-object",
"data": {
"data_item": {"key": "value1", "any_data": {}},
"data_items": [
{"key": "value2", "any_data": {"two": 2}},
{"key": "value3", "any_data": {"three": 3}},
],
"str_items": ["1", "2", "3"],
},
"any_data": {"key1": "value1", "key2": "value2", "list": [1, 2, 3]},
}
operations = OperationTableDef()
invalid_operations = OperationTableDef()
def custom_json_loader(content: bytes) -> DictStrAny:
return json.load(io.BytesIO(content))
def custom_yaml_loader(content: bytes) -> DictStrAny:
return yaml.load(content, Loader=yaml.SafeLoader)
@invalid_operations.register("does-not-exist")
async def does_not_exist(request: web.Request) -> web.Response:
return web.Response(text="Hello, world!")
@operations.register("create-post")
async def create_post(request: web.Request) -> web.Response:
data = get_validated_data(request)
published_at: datetime.datetime = data["published_at"]
with validation_error_context("body", "published_at"):
if published_at.tzinfo is None:
raise ValidationError(message="Invalid value")
return web.json_response(
{**data, "id": 1, "published_at": data["published_at"].isoformat()},
status=201,
)
@operations.register
async def hello_world(request: web.Request) -> web.Response:
with openapi_context(request) as context:
name = context.parameters.query.get("name") or "world"
email = context.parameters.query.get("email") or "<EMAIL>"
return web.json_response(
{"message": f"Hello, {name}!", "email": email}
)
@operations.register
async def retrieve_any_object_from_request_body(
request: web.Request,
) -> web.Response:
return web.json_response(pyrsistent.thaw(get_validated_data(request)))
@operations.register
async def retrieve_array_from_request_body(
request: web.Request,
) -> web.Response:
with openapi_context(request) as context:
return web.json_response(pyrsistent.thaw(context.data))
@operations.register
async def retrieve_empty(request: web.Request) -> web.Response:
context = get_openapi_context(request)
return web.Response(
status=204, headers={"X-API-Key": context.security.get("apiKey") or ""}
)
@operations.register
async def retrieve_invalid_response(request: web.Request) -> web.Response:
return web.json_response({})
@operations.register
async def retrieve_post(request: web.Request) -> web.Response:
context = get_openapi_context(request)
return web.json_response(
{"id": context.parameters.path["post_id"], "title": "The Post"}
)
@operations.register
async def retrieve_nested_object_from_request_body(
request: web.Request,
) -> web.Response:
with openapi_context(request) as context:
data = pyrsistent.thaw(context.data)
data["uid"] = str(data["uid"])
return web.json_response(
data,
headers={
"X-Data-Type": str(type(context.data)),
"X-Data-Data-Data-Items-Type": str(
type(context.data["data"]["data_items"])
),
"X-Data-Data-Str-Items-Type": str(
type(context.data["data"]["str_items"])
),
"X-Data-UID-Type": str(type(context.data["uid"])),
},
)
@operations.register
async def retrieve_zip(request: web.Request) -> web.Response:
output = io.BytesIO()
with zipfile.ZipFile(output, "w") as handler:
handler.writestr("hello.txt", "Hello, world!")
output.seek(0)
return web.Response(
body=output,
content_type="application/zip",
headers={"Content-Disposition": "attachment; filename=hello.zip"},
)
@operations.register
async def upload_image(request: web.Request) -> web.Response:
return web.Response(
body=get_openapi_context(request).data,
content_type=request.content_type,
status=201,
)
@operations.register
async def upload_text(request: web.Request) -> web.Response:
return web.Response(
text=get_openapi_context(request).data,
content_type=request.content_type,
status=201,
)
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_any_object_request_body(aiohttp_client, schema_path):
app = setup_openapi(
web.Application(), schema_path, operations, server_url=URL("/api/")
)
client = await aiohttp_client(app)
response = await client.post("/api/any-object", json=TEST_NESTED_OBJECT)
assert response.status == 200
assert await response.json() == TEST_NESTED_OBJECT
@pytest.mark.parametrize(
"data, expected_status, expected_response",
(
(
{},
422,
{"detail": [{"loc": ["body"], "message": "[] is too short"}]},
),
(
[],
422,
{"detail": [{"loc": ["body"], "message": "[] is too short"}]},
),
(
[""],
422,
{"detail": [{"loc": ["body", 0], "message": "'' is too short"}]},
),
(["Hello", "world!"], 200, ["Hello", "world!"]),
),
)
async def test_array_request_body(
aiohttp_client, data, expected_status, expected_response
):
app = setup_openapi(
web.Application(),
OPENAPI_YAML_PATH,
operations,
server_url=URL("/api"),
)
client = await aiohttp_client(app)
response = await client.post("/api/array", json=data)
assert response.status == expected_status
assert await response.json() == expected_response
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_create_post_201(aiohttp_client, schema_path):
app = setup_openapi(
web.Application(), schema_path, operations, server_url="/api/"
)
published_at = "2020-04-01T12:00:00+02:00"
client = await aiohttp_client(app)
response = await client.post(
"/api/create-post",
json={
"title": "Post",
"slug": "post",
"content": "Post Content",
"published_at": published_at,
},
)
assert response.status == 201
assert await response.json() == {
"id": 1,
"title": "Post",
"slug": "post",
"content": "Post Content",
"published_at": published_at,
}
@pytest.mark.parametrize(
"schema_path, invalid_data, expected_detail",
(
(
OPENAPI_JSON_PATH,
{},
[
{"loc": ["body", "title"], "message": "Field required"},
{"loc": ["body", "slug"], "message": "Field required"},
{"loc": ["body", "content"], "message": "Field required"},
{"loc": ["body", "published_at"], "message": "Field required"},
],
),
(
OPENAPI_YAML_PATH,
{"title": "Title"},
[
{"loc": ["body", "slug"], "message": "Field required"},
{"loc": ["body", "content"], "message": "Field required"},
{"loc": ["body", "published_at"], "message": "Field required"},
],
),
(
OPENAPI_JSON_PATH,
{"title": "Title", "slug": "slug"},
[
{"loc": ["body", "content"], "message": "Field required"},
{"loc": ["body", "published_at"], "message": "Field required"},
],
),
(
OPENAPI_YAML_PATH,
{"title": "Title", "slug": "slug", "content": "Content"},
[{"loc": ["body", "published_at"], "message": "Field required"}],
),
),
)
async def test_create_post_422(
aiohttp_client, schema_path, invalid_data, expected_detail
):
app = setup_openapi(
web.Application(),
schema_path,
operations,
server_url=URL("/dev-api"),
)
client = await aiohttp_client(app)
response = await client.post("/dev-api/create-post", json=invalid_data)
assert response.status == 422
assert (await response.json())["detail"] == expected_detail
@pytest.mark.parametrize(
"schema_path, schema_loader",
(
(OPENAPI_JSON_PATH, custom_json_loader),
(OPENAPI_YAML_PATH, custom_yaml_loader),
),
)
def test_custom_schema_loader(schema_path, schema_loader):
app = setup_openapi(
web.Application(),
schema_path,
operations,
server_url="/api/",
schema_loader=schema_loader,
)
assert isinstance(get_openapi_schema(app), dict)
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_email_format(aiohttp_client, schema_path):
app = setup_openapi(
web.Application(), schema_path, operations, server_url="/api/"
)
client = await aiohttp_client(app)
response = await client.get(
"/api/hello", params={"email": "<EMAIL>"}
)
assert response.status == 200
assert (await response.json())["email"] == "<EMAIL>"
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_invalid_parameter_format(aiohttp_client, schema_path):
app = setup_openapi(
web.Application(), schema_path, operations, server_url="/api/"
)
client = await aiohttp_client(app)
response = await client.get("/api/posts/not-an-integer")
assert response.status == 422
assert await response.json() == {
"detail": [
{
"loc": ["parameters", "post_id"],
"message": "'not-an-integer' is not a type of 'integer'",
}
]
}
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_invalid_parameter_value(aiohttp_client, schema_path):
app = setup_openapi(
web.Application(), schema_path, operations, server_url="/api/"
)
client = await aiohttp_client(app)
response = await client.get("/api/posts/0")
assert response.status == 422
assert await response.json() == {
"detail": [
{
"loc": ["parameters", "post_id"],
"message": "0 is less than the minimum of 1",
}
]
}
def test_get_openapi_schema_no_schema():
with pytest.raises(ConfigurationError):
get_openapi_schema(web.Application())
def test_get_openapi_spec_no_spec():
with pytest.raises(ConfigurationError):
get_openapi_spec(web.Application())
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_multiple_request_errors(aiohttp_client, schema_path):
app = setup_openapi(
web.Application(), schema_path, operations, server_url="/api/"
)
client = await aiohttp_client(app)
response = await client.get("/api/hello?name=&email=")
assert response.status == 422
assert await response.json() == {
"detail": [
{
"loc": ["parameters", "name"],
"message": "Empty parameter value",
},
{
"loc": ["parameters", "email"],
"message": "Empty parameter value",
},
]
}
@pytest.mark.parametrize(
"schema_path, query_string, expected_message",
(
(OPENAPI_JSON_PATH, None, "Hello, world!"),
(OPENAPI_JSON_PATH, "?name=Name", "Hello, Name!"),
(str(OPENAPI_JSON_PATH), None, "Hello, world!"),
(str(OPENAPI_JSON_PATH), "?name=Name", "Hello, Name!"),
(OPENAPI_YAML_PATH, None, "Hello, world!"),
(OPENAPI_YAML_PATH, "?name=Name", "Hello, Name!"),
(str(OPENAPI_YAML_PATH), None, "Hello, world!"),
(str(OPENAPI_YAML_PATH), "?name=Name", "Hello, Name!"),
),
)
async def test_openapi(
aiohttp_client, schema_path, query_string, expected_message
):
app = setup_openapi(
web.Application(), schema_path, operations, server_url="/api"
)
client = await aiohttp_client(app)
url = "/api/hello"
response = await client.get(
f"{url}{query_string}" if query_string is not None else url
)
assert response.status == 200
assert (await response.json())["message"] == expected_message
@pytest.mark.parametrize("is_enabled", (False, True))
async def test_openapi_validate_response(aiohttp_client, is_enabled):
app = web.Application()
setup_openapi(
app,
OPENAPI_YAML_PATH,
operations,
server_url="/api",
is_validate_response=is_enabled,
)
client = await aiohttp_client(app)
response = await client.get("/api/hello")
assert response.status == 200
assert await response.json() == {
"message": "Hello, world!",
"email": "<EMAIL>",
}
@pytest.mark.parametrize(
"has_openapi_schema_handler, url, expected_status",
(
(True, "/api/openapi.json", 200),
(False, "/api/openapi.yaml", 404),
(True, "/api/openapi.yaml", 200),
(False, "/api/openapi.yaml", 404),
(True, "/api/openapi.txt", 500),
(False, "/api/openapi.txt", 404),
),
)
async def test_openapi_schema_handler(
aiohttp_client, has_openapi_schema_handler, url, expected_status
):
app = web.Application()
setup_openapi(
app,
OPENAPI_YAML_PATH,
operations,
server_url=URL("/api"),
has_openapi_schema_handler=has_openapi_schema_handler,
)
client = await aiohttp_client(app)
response = await client.get(url)
assert response.status == expected_status
@pytest.mark.parametrize(
"schema_path, headers, expected",
(
(OPENAPI_JSON_PATH, {}, ""),
(OPENAPI_JSON_PATH, {"X-API-Key": "apiKey"}, "apiKey"),
(OPENAPI_YAML_PATH, {}, ""),
(OPENAPI_YAML_PATH, {"X-API-Key": "apiKey"}, "apiKey"),
),
)
async def test_optional_security_scheme(
aiohttp_client, schema_path, headers, expected
):
app = setup_openapi(
web.Application(), schema_path, operations, server_url="/api/"
)
client = await aiohttp_client(app)
response = await client.get("/api/empty", headers=headers)
assert response.status == 204
assert response.headers["X-API-Key"] == expected
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_request_body_nested_object(aiohttp_client, schema_path):
app = setup_openapi(
web.Application(), schema_path, operations, server_url="/api/"
)
client = await aiohttp_client(app)
response = await client.post("/api/nested-object", json=TEST_NESTED_OBJECT)
assert response.status == 200
assert response.headers["X-Data-Type"] == "<class 'pyrsistent._pmap.PMap'>"
assert (
response.headers["X-Data-Data-Data-Items-Type"]
== "<class 'pvectorc.PVector'>"
)
assert (
response.headers["X-Data-Data-Str-Items-Type"]
== "<class 'pvectorc.PVector'>"
)
assert response.headers["X-Data-UID-Type"] == "<class 'uuid.UUID'>"
assert await response.json() == TEST_NESTED_OBJECT
@pytest.mark.parametrize(
"schema_path, loader",
(
(OPENAPI_JSON_PATH, custom_json_loader),
(OPENAPI_YAML_PATH, custom_yaml_loader),
),
)
async def test_setup_openapi_schema_and_spec(
aiohttp_client, schema_path, loader
):
schema = loader(schema_path.read_bytes())
spec = create_spec(schema)
app = setup_openapi(
web.Application(),
operations,
schema=schema,
spec=spec,
server_url="/api/",
)
client = await aiohttp_client(app)
response = await client.get("/api/hello")
assert response.status == 200
assert await response.json() == {
"message": "Hello, world!",
"email": "<EMAIL>",
}
@pytest.mark.parametrize(
"schema_path, loader",
(
(OPENAPI_JSON_PATH, custom_json_loader),
(OPENAPI_YAML_PATH, custom_yaml_loader),
),
)
async def test_setup_openapi_schema_and_path_ignore_invalid_schema_path(
aiohttp_client, schema_path, loader
):
schema = loader(schema_path.read_bytes())
spec = create_spec(schema)
setup_openapi(
web.Application(),
INVALID_OPENAPI_JSON_PATH,
operations,
schema=schema,
spec=spec,
server_url="/api/",
)
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
def test_setup_openapi_invalid_operation(schema_path):
with pytest.raises(OperationError):
setup_openapi(
web.Application(),
schema_path,
invalid_operations,
server_url="/api",
)
def test_setup_openapi_invalid_path():
with pytest.raises(ConfigurationError):
setup_openapi(
web.Application(), ROOT_PATH / "does-not-exist.yaml", operations
)
def test_setup_openapi_invalid_file():
with pytest.raises(ConfigurationError):
setup_openapi(web.Application(), ROOT_PATH / "settings.py", operations)
@pytest.mark.parametrize(
"schema_path", (INVALID_OPENAPI_JSON_PATH, INVALID_OPENAPI_YAML_PATH)
)
def test_setup_openapi_invalid_spec(schema_path):
with pytest.raises(ConfigurationError):
setup_openapi(web.Application(), schema_path, operations)
@pytest.mark.parametrize(
"schema_path, level, url, expected_status",
(
(OPENAPI_JSON_PATH, "test", "/api/hello", 200),
(OPENAPI_JSON_PATH, "test", "/dev-api/hello", 404),
(OPENAPI_YAML_PATH, "test", "/api/hello", 200),
(OPENAPI_YAML_PATH, "test", "/dev-api/hello", 404),
(OPENAPI_JSON_PATH, "dev", "/api/hello", 404),
(OPENAPI_JSON_PATH, "dev", "/dev-api/hello", 200),
(OPENAPI_YAML_PATH, "dev", "/api/hello", 404),
(OPENAPI_YAML_PATH, "dev", "/dev-api/hello", 200),
),
)
async def test_setup_openapi_server_url_from_settings(
monkeypatch, aiohttp_client, schema_path, level, url, expected_status
):
monkeypatch.setenv("LEVEL", level)
app = setup_openapi(
setup_settings_from_environ(web.Application(), BaseSettings),
schema_path,
operations,
)
client = await aiohttp_client(app)
response = await client.get(url)
assert response.status == expected_status
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
def test_setup_openapi_server_url_invalid_level(monkeypatch, schema_path):
monkeypatch.setenv("LEVEL", "prod")
with pytest.raises(ConfigurationError):
setup_openapi(
setup_settings_from_environ(web.Application(), BaseSettings),
schema_path,
operations,
)
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
def test_setup_openapi_server_url_does_not_set(schema_path):
with pytest.raises(ConfigurationError):
setup_openapi(web.Application(), schema_path, operations)
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_upload_image(aiohttp_client, schema_path):
blank_png = (Path(__file__).parent / "data" / "blank.png").read_bytes()
app = setup_openapi(
web.Application(), schema_path, operations, server_url="/api"
)
client = await aiohttp_client(app)
response = await client.post(
"/api/upload-image",
data=blank_png,
headers={"Content-Type": "image/png"},
)
assert response.status == 201
assert await response.read() == blank_png
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_upload_text(aiohttp_client, schema_path):
text = "Hello, world! And other things..."
app = setup_openapi(
web.Application(), schema_path, operations, server_url="/api"
)
client = await aiohttp_client(app)
response = await client.post(
"/api/upload-text",
data=text.encode("utf-8"),
headers={"Content-Type": "text/plain"},
)
assert response.status == 201
assert await response.text() == text
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_validate_binary_response(aiohttp_client, schema_path):
app = setup_openapi(
web.Application(),
schema_path,
operations,
server_url="/api",
is_validate_response=True,
)
client = await aiohttp_client(app)
response = await client.get("/api/download.zip")
assert response.status == 200
assert response.content_type == "application/zip"
content = io.BytesIO(await response.read())
with zipfile.ZipFile(content) as handler:
with handler.open("hello.txt") as item:
assert item.read() == b"Hello, world!"
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_validate_empty_response(aiohttp_client, schema_path):
app = setup_openapi(
web.Application(),
schema_path,
operations,
server_url="/api",
is_validate_response=True,
)
client = await aiohttp_client(app)
response = await client.get("/api/empty")
assert response.status == 204
@pytest.mark.parametrize(
"schema_path, is_validate_response, expected_status",
(
(OPENAPI_JSON_PATH, False, 200),
(OPENAPI_JSON_PATH, True, 422),
(OPENAPI_YAML_PATH, False, 200),
(OPENAPI_JSON_PATH, True, 422),
),
)
async def test_validate_response(
aiohttp_client, schema_path, is_validate_response, expected_status
):
app = setup_openapi(
web.Application(),
schema_path,
operations,
server_url="/api",
is_validate_response=is_validate_response,
)
client = await aiohttp_client(app)
response = await client.get("/api/invalid-response")
assert response.status == expected_status
@pytest.mark.parametrize("schema_path", (OPENAPI_JSON_PATH, OPENAPI_YAML_PATH))
async def test_validate_response_error(aiohttp_client, schema_path):
app = setup_openapi(
web.Application(),
schema_path,
operations,
server_url="/api",
is_validate_response=True,
)
client = await aiohttp_client(app)
response = await client.get("/api/invalid-response")
assert response.status == 422
assert await response.json() == {
"detail": [
{"loc": ["response", "uid"], "message": "Field required"},
{"loc": ["response", "type"], "message": "Field required"},
{"loc": ["response", "data"], "message": "Field required"},
{"loc": ["response", "any_data"], "message": "Field required"},
]
}
|
armi/materials/inconel600.py | celikten/armi | 162 | 43725 | # Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Inconel600
"""
import numpy
from armi.utils.units import getTc
from armi.materials.material import Material
class Inconel600(Material):
name = "Inconel600"
references = {
"mass fractions": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf",
"density": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf",
"thermalConductivity": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf",
"specific heat": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf",
"linear expansion percent": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf",
"linear expansion": "http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf",
}
def __init__(self):
Material.__init__(self)
self.p.refTempK = 294.15
self.p.refDens = 8.47 # g/cc
# Only density measurement presented in the reference.
# Presumed to be performed at 21C since this was the reference temperature for linear expansion measurements.
def setDefaultMassFracs(self):
massFracs = {
"NI": 0.7541,
"CR": 0.1550,
"FE": 0.0800,
"C": 0.0008,
"MN55": 0.0050,
"S": 0.0001,
"SI": 0.0025,
"CU": 0.0025,
}
for element, massFrac in massFracs.items():
self.setMassFrac(element, massFrac)
def polyfitThermalConductivity(self, power=2):
r"""
Calculates the coefficients of a polynomial fit for thermalConductivity.
Based on data from http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf
Fits a polynomial to the data set and returns the coefficients.
Parameters
----------
power : int, optional
power of the polynomial fit equation
Returns
-------
list of length 'power' containing the polynomial fit coefficients for thermal conductivity.
"""
Tc = [20.0, 100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0]
k = [14.9, 15.9, 17.3, 19.0, 20.5, 22.1, 23.9, 25.7, 27.5]
return numpy.polyfit(numpy.array(Tc), numpy.array(k), power).tolist()
def thermalConductivity(self, Tk=None, Tc=None):
r"""
Returns the thermal conductivity of Inconel600.
Parameters
----------
Tk : float, optional
temperature in (K)
Tc : float, optional
Temperature in (C)
Returns
-------
thermalCond : float
thermal conductivity in W/m/C
"""
Tc = getTc(Tc, Tk)
self.checkTempRange(20.0, 800.0, Tc, "thermal conductivity")
thermalCond = 3.4938e-6 * Tc ** 2 + 1.3403e-2 * Tc + 14.572
return thermalCond # W/m-C
def polyfitHeatCapacity(self, power=2):
r"""
Calculates the coefficients of a polynomial fit for heatCapacity.
Based on data from http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf
Fits a polynomial to the data set and returns the coefficients.
Parameters
----------
power : int, optional
power of the polynomial fit equation
Returns
-------
list of length 'power' containing the polynomial fit coefficients for heat capacity.
"""
Tc = [20.0, 100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0]
cp = [444.0, 465.0, 486.0, 502.0, 519.0, 536.0, 578.0, 595.0, 611.0, 628.0]
return numpy.polyfit(numpy.array(Tc), numpy.array(cp), power).tolist()
def heatCapacity(self, Tk=None, Tc=None):
r"""
Returns the specific heat capacity of Inconel600.
Parameters
----------
Tk : float, optional
Temperature in Kelvin.
Tc : float, optional
Temperature in degrees Celsius.
Returns
-------
heatCapacity : float
heat capacity in J/kg/C
"""
Tc = getTc(Tc, Tk)
self.checkTempRange(20, 900, Tc, "heat capacity")
heatCapacity = 7.4021e-6 * Tc ** 2 + 0.20573 * Tc + 441.3
return heatCapacity # J/kg-C
def polyfitLinearExpansionPercent(self, power=2):
r"""
Calculates the coefficients of a polynomial fit for linearExpansionPercent.
Based on data from http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf
Uses mean CTE values to find percent thermal strain values. Fits a polynomial
to the data set and returns the coefficients.
Parameters
----------
power : int, optional
power of the polynomial fit equation
Returns
-------
list of length 'power' containing the polynomial fit coefficients for linearExpansionPercent
"""
refTempC = getTc(None, Tk=self.p.refTempK)
Tc = [100.0, 200.0, 300.0, 400.0, 500.0, 600.0, 700.0, 800.0, 900.0]
alpha_mean = [
1.33e-05,
1.38e-05,
1.42e-05,
1.45e-05,
1.49e-05,
1.53e-05,
1.58e-05,
1.61e-05,
1.64e-05,
]
linExpPercent = [0.0]
for i, alpha in enumerate(alpha_mean):
linExpPercentVal = 100.0 * alpha * (Tc[i] - refTempC)
linExpPercent.append(linExpPercentVal)
Tc.insert(0, refTempC)
return numpy.polyfit(
numpy.array(Tc), numpy.array(linExpPercent), power
).tolist()
def linearExpansionPercent(self, Tk=None, Tc=None):
r"""
Returns percent linear expansion of Inconel600.
Parameters
----------
Tk : float
temperature in (K)
Tc : float
Temperature in (C)
Returns
-------
linExpPercent in %-m/m/C
"""
Tc = getTc(Tc, Tk)
self.checkTempRange(21.0, 900.0, Tc, "linear expansion percent")
linExpPercent = 3.722e-7 * Tc ** 2 + 1.303e-3 * Tc - 2.863e-2
return linExpPercent
def linearExpansion(self, Tk=None, Tc=None):
r"""
From http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf
Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100 to convert
from percent strain to strain, then differentiated with respect to temperature to find the correlation
for instantaneous linear expansion.
i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion correlation is 2*a/100*Tc + b/100
2*(3.722e-7/100.0)*Tc + 1.303e-3/100.0
Parameters
----------
Tk : float
temperature in (K)
Tc : float
Temperature in (C)
Returns
-------
linExp in m/m/C
"""
Tc = getTc(Tc, Tk)
self.checkTempRange(21.0, 900.0, Tc, "linear expansion")
linExp = 7.444e-9 * Tc + 1.303e-5
return linExp
|
hubspot/crm/products/api/__init__.py | fakepop/hubspot-api-python | 117 | 43727 | <reponame>fakepop/hubspot-api-python
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from hubspot.crm.products.api.associations_api import AssociationsApi
from hubspot.crm.products.api.basic_api import BasicApi
from hubspot.crm.products.api.batch_api import BatchApi
from hubspot.crm.products.api.search_api import SearchApi
|
apps/base/urls/product_category_uom.py | youssriaboelseod/pyerp | 115 | 43757 | <reponame>youssriaboelseod/pyerp
"""The store routes
"""
# Django Library
from django.urls import path
# Localfolder Library
from ..views.product_category_uom import (
ProductCategoryUOMCreateView, ProductCategoryUOMDeleteView,
ProductCategoryUOMDetailView, ProductCategoryUOMListView,
ProductCategoryUOMUpdateView)
app_name = 'PyProductCategoryUOM'
urlpatterns = [
path('', ProductCategoryUOMListView.as_view(), name='list'),
path('add/', ProductCategoryUOMCreateView.as_view(), name='add'),
path('int:pk>/', ProductCategoryUOMDetailView.as_view(), name='product-category-uom-detail'),
path('<int:pk>/update', ProductCategoryUOMUpdateView.as_view(), name='update'),
path('<int:pk>/delete/', ProductCategoryUOMDeleteView.as_view(), name='delete'),
]
|
muddery/server/statements/statement_func_set.py | dongwudanci/muddery | 127 | 43772 | <gh_stars>100-1000
"""
A statement function set holds a set of statement functions that can be used in statements.
"""
class BaseStatementFuncSet(object):
"""
A statement function set holds a set of statement functions that can be used in statements.
"""
def __init__(self):
self.funcs = {}
self.at_creation()
def at_creation(self):
"""
Load statement functions here.
"""
pass
def add(self, func_cls):
"""
Add a statement function's class.
Args:
func_cls: statement function's class
Returns:
None
"""
# save an instance of the function class
self.funcs[func_cls.key] = func_cls
def get_func_class(self, key):
"""
Get statement function's class.
Args:
key: statement function's key.
Returns:
function's class
"""
if key in self.funcs:
return self.funcs[key]
else:
return None
|
tagger/data/__init__.py | XMUNLP/Tagger | 335 | 43871 | <reponame>XMUNLP/Tagger<filename>tagger/data/__init__.py<gh_stars>100-1000
from tagger.data.dataset import get_dataset
from tagger.data.vocab import load_vocabulary, lookup
from tagger.data.embedding import load_glove_embedding
|
tools/parse_log.py | KyleHai/DeepSpeech2 | 158 | 43910 | import fileinput as fin
# funcs:
def findValWithFormat(line):
lines.append(line)
taken = line.split(" ")
raw_val = taken[-1]
val = raw_val.split("/")[-1]
val = val[0:-2]
if 'us' in val:
val = float(val[0:val.find('us')])
val = val/1000
else:
val = float(val[0:val.find('ms')])
return val
def getCellNum(line):
cell_num = line[line.find(rnn_cell_string):line.find(rnn_cell_string) + len(rnn_cell_string) + 1]
return cell_num
def profRNNCell(line, rnncell_prof):
cell_num = getCellNum(line)
val = findValWithFormat(line)
rnncell_prof[cell_num] += val
# variables:
lines = []
module_rnncell = "CustomRNNCell2"
module_grad = 'gradients'
num_rnn_layer = 7
rnn_cell_string = "cell_"
module_rnn = 'rnn'
module_conv1 = 'conv1'
module_conv2 = 'conv2'
module_softmax = 'softmax_linear'
module_ctc = ['ctc_loss', 'CTCLoss']
module_bn = 'bn2'
rnn_cells = [rnn_cell_string+str(i) for i in range(num_rnn_layer)]
rnncell_f_prof = dict.fromkeys(rnn_cells)
rnncell_b_prof = dict.fromkeys(rnn_cells)
# prf estimator:
for el in rnncell_f_prof:
rnncell_f_prof[el] = 0.0
for el in rnncell_b_prof:
rnncell_b_prof[el] = 0.0
overall_cost = 0.0
profs ={\
'rnn_trans_f_prof': 0.0, \
'rnn_trans_b_prof': 0.0, \
'rnn_reshape_f_prof': 0.0, \
'rnn_reshape_b_prof': 0.0, \
'rnn_ReverseSequence_f_prof': 0.0, \
'rnn_ReverseSequence_b_prof': 0.0, \
'conv1_f_prof': 0.0, \
'conv1_b_prof': 0.0, \
'bn1_f_prof': 0.0, \
'bn1_b_prof': 0.0, \
'relu1_f_prof': 0.0, \
'relu1_b_prof': 0.0, \
'conv2_f_prof': 0.0, \
'conv2_b_prof': 0.0, \
'bn2_f_prof': 0.0, \
'bn2_b_prof': 0.0, \
'relu2_f_prof': 0.0, \
'relu2_b_prof': 0.0, \
'softmax_f_prof': 0.0, \
'softmax_b_prof': 0.0, \
'ctc_f_prof': 0.0, \
'ctc_b_prof': 0.0 \
}
with open('timing_memory.log', 'r') as f:
for line in f:
if len(line) > 3:
if ((line[3] != ' ') or 'Adam/update_' in line) and ('flops' not in line):
# flops is not considered
# conv1
if (module_grad not in line) and (module_conv1 in line) and ('Minimum' not in line) and ('Relu' not in line) and (module_bn not in line):
val = findValWithFormat(line)
profs['conv1_f_prof'] += val
if (module_grad in line) and (module_conv1 in line) and ('Minimum' not in line) and ('Relu' not in line) and (module_bn not in line):
val = findValWithFormat(line)
profs['conv1_b_prof'] += val
# BN1
if (module_grad not in line) and (module_conv1 in line) and ('Minimum' not in line) and ('Relu' not in line) and (module_bn in line):
val = findValWithFormat(line)
profs['bn1_f_prof'] += val
if (module_grad in line) and (module_conv1 in line) and ('Minimum' not in line) and ('Relu' not in line) and (module_bn in line):
val = findValWithFormat(line)
profs['bn1_b_prof'] += val
# Relu1
if (module_grad not in line) and (module_conv1 in line) and ('Minimum' in line or 'Relu' in line) and (module_bn not in line):
val = findValWithFormat(line)
profs['relu1_f_prof'] += val
if (module_grad in line) and (module_conv1 in line) and ('Minimum' in line or 'Relu' in line) and (module_bn not in line):
val = findValWithFormat(line)
profs['relu1_b_prof'] += val
# conv2
if (module_grad not in line) and (module_conv2 in line) and ('Minimum' not in line) and ('Relu' not in line) and (module_bn not in line):
val = findValWithFormat(line)
profs['conv2_f_prof'] += val
if (module_grad in line) and (module_conv2 in line) and ('Minimum' not in line) and ('Relu' not in line) and (module_bn not in line):
val = findValWithFormat(line)
profs['conv2_b_prof'] += val
# BN2
if (module_grad not in line) and (module_conv2 in line) and ('Minimum' not in line) and ('Relu' not in line) and (module_bn in line):
val = findValWithFormat(line)
profs['bn2_f_prof'] += val
if (module_grad in line) and (module_conv2 in line) and ('Minimum' not in line) and ('Relu' not in line) and (module_bn in line):
val = findValWithFormat(line)
profs['bn2_b_prof'] += val
# Relu2
if (module_grad not in line) and (module_conv2 in line) and ('Minimum' in line or 'Relu' in line) and (module_bn not in line):
val = findValWithFormat(line)
profs['relu2_f_prof'] += val
if (module_grad in line) and (module_conv2 in line) and ('Minimum' in line or 'Relu' in line) and (module_bn not in line):
val = findValWithFormat(line)
profs['relu2_b_prof'] += val
#rnn transpose
if (module_grad not in line) and (module_rnn in line) and ('transpose' in line) and (module_rnncell not in line):
val = findValWithFormat(line)
profs['rnn_trans_f_prof'] += val
if (module_grad in line) and (module_rnn in line) and ('transpose' in line) and (module_rnncell not in line):
val = findValWithFormat(line)
profs['rnn_trans_b_prof'] += val
#rnn reshape
if (module_grad not in line) and (module_rnn in line) and ('rnn/Reshape' in line) and (module_rnncell not in line):
val = findValWithFormat(line)
profs['rnn_reshape_f_prof'] += val
if (module_grad in line) and (module_rnn in line) and ('rnn/Reshape' in line) and (module_rnncell not in line):
val = findValWithFormat(line)
profs['rnn_reshape_b_prof'] += val
#rnn reshape
if (module_grad not in line) and (module_rnn in line) and ('ReverseSequence' in line):
val = findValWithFormat(line)
profs['rnn_ReverseSequence_f_prof'] += val
if (module_grad in line) and (module_rnn in line) and ('ReverseSequence' in line):
val = findValWithFormat(line)
profs['rnn_ReverseSequence_b_prof'] += val
# rnn forward profiling by cell
if (module_grad not in line) and (module_rnncell in line):
profRNNCell(line, rnncell_f_prof)
# rnn backward profiling by cell
if (module_grad in line) and (module_rnncell in line):
profRNNCell(line, rnncell_b_prof)
# softmax
if (module_grad not in line) and (module_softmax in line):
val = findValWithFormat(line)
profs['softmax_f_prof'] += val
if (module_grad in line) and (module_softmax in line):
val = findValWithFormat(line)
profs['softmax_b_prof'] += val
# ctc
for c in module_ctc:
if (c in line) and (module_grad not in line):
val = findValWithFormat(line)
profs['ctc_f_prof'] += val
if (c in line) and (module_grad in line):
val = findValWithFormat(line)
profs['ctc_b_prof'] +=val
for key, val in dict.iteritems(rnncell_f_prof):
overall_cost += val
print "(RNN forward by cell) " + str(key) + ": " + str(val) + "ms"
for key, val in dict.iteritems(rnncell_b_prof):
overall_cost += val
print "(RNN backward by cell) " + str(key) + ": " + str(val) + "ms"
# Profiling result
for k in dict.fromkeys(profs):
overall_cost += profs[k]
print k + ": " + str(profs[k]) + "ms"
print "overall: " + str(overall_cost) + "ms"
prf_file1 = open('prf1.txt', 'w')
for k in dict.fromkeys(profs):
prf_file1.write("%s:%f\n" % (k, profs[k]))
prf_file1.close()
# write including modules
prf_file2 = open('prf2.txt', 'w')
for el in lines:
prf_file2.write("%s\n" % el)
prf_file2.close()
|
maskrcnn_benchmark/modeling/roi_heads/ke_head/inference.py | happog/Box_Discretization_Network | 285 | 43949 | import torch
from torch import nn
import pdb, os
from shapely.geometry import *
from maskrcnn_benchmark.structures.boxlist_ops import cat_boxlist
import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import argrelextrema
import random
import string
all_types = [[1,2,3,4],[1,2,4,3],[1,3,2,4],[1,3,4,2],[1,4,2,3],[1,4,3,2],\
[2,1,3,4],[2,1,4,3],[2,3,1,4],[2,3,4,1],[2,4,1,3],[2,4,3,1],\
[3,1,2,4],[3,1,4,2],[3,2,1,4],[3,2,4,1],[3,4,1,2],[3,4,2,1],\
[4,1,2,3],[4,1,3,2],[4,2,1,3],[4,2,3,1],[4,3,1,2],[4,3,2,1]]
class kePostProcessor(nn.Module):
def __init__(self, keer=None, cfg=None):
super(kePostProcessor, self).__init__()
self.keer = keer
self.cfg = cfg
def forward(self, ft_x, ft_y, mty, boxes):
ke_prob_x = ft_x
ke_prob_y = ft_y
mty_prob = mty
boxes_per_image = [box.bbox.size(0) for box in boxes]
ke_prob_x = ke_prob_x.split(boxes_per_image, dim=0)
ke_prob_y = ke_prob_y.split(boxes_per_image, dim=0)
mty_prob = mty_prob.split(boxes_per_image, dim=0)
results = []
for prob_x, prob_y, prob_mty, box in zip(ke_prob_x, ke_prob_y, mty_prob, boxes):
bbox = BoxList(box.bbox, box.size, mode='xyxy')
for field in box.fields():
bbox.add_field(field, box.get_field(field))
if self.keer:
prob_x, rescores_x = self.keer(prob_x, box)
prob_y, rescores_y = self.keer(prob_y, box)
rescores = (rescores_x+rescores_y)*0.5
if self.cfg.MODEL.ROI_KE_HEAD.RESCORING:
bbox.add_field('scores', rescores)
prob = torch.cat((prob_x,prob_y), dim = -2)
prob = prob[..., :1]
prob = textKES(prob, box.size)
bbox.add_field('ke', prob)
bbox.add_field('mty', prob_mty)
results.append(bbox)
return results
# TODO remove and use only the keer
import numpy as np
import cv2
def scores_to_probs(scores):
"""Transforms CxHxW of scores to probabilities spatially."""
channels = scores.shape[0]
for c in range(channels):
temp = scores[c, :, :]
max_score = temp.max()
temp = np.exp(temp - max_score) / np.sum(np.exp(temp - max_score))
scores[c, :, :] = temp
return scores
def kes_decode(kes):
# BDN decode
for ix, i in enumerate(kes):
mnd = i[0, 0]
nkes = i.shape[1]-2
kes[ix][0, 1:5] = kes[ix][0, 1:5]*2 - mnd
return kes
def heatmaps_to_kes(maps, rois, scores, cfg):
"""Extract predicted ke locations from heatmaps. Output has shape
(#rois, 4, #kes) with the 4 rows corresponding to (x, y, logit, prob)
for each ke.
"""
# This function converts a discrete image coordinate in a HEATMAP_SIZE x
# HEATMAP_SIZE image to a continuous ke coordinate. We maintain
# consistency with kes_to_heatmap_labels by using the conversion from
# Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a
# continuous coordinate.
offset_x = rois[:, 0]
offset_y = rois[:, 1]
widths = rois[:, 2] - rois[:, 0]
heights = rois[:, 3] - rois[:, 1]
widths = np.maximum(widths, 1)
heights = np.maximum(heights, 1)
widths_ceil = np.ceil(widths)
heights_ceil = np.ceil(heights)
resol = cfg.MODEL.ROI_KE_HEAD.RESOLUTION # cfg.mo... 56
if maps.shape[-2:] == (1, resol):
xory_mode = 0 # x mode
elif maps.shape[-2:] == (resol, 1):
xory_mode = 1 # y mode
else:
assert(0), 'invalid mode.'
# print("maps", maps.shape, maps[0,0], maps[0,1])
# NCHW to NHWC for use with OpenCV
maps = np.transpose(maps, [0, 2, 3, 1])
min_size = 0 # cfg
num_kes = int(cfg.MODEL.ROI_KE_HEAD.NUM_KES/2)+2
d_preds = np.zeros(
(len(rois), 2, num_kes), dtype=np.float32)
d_scores = np.zeros(scores.shape, dtype=np.float32)
assert(len(rois) == maps.shape[0]), 'shape mismatch {}, {}, {}, {}'.format(str(len(rois)), \
str(rois.shape), \
str(maps.shape[0]), \
str(maps.shape))
normal = 0
innormal = 0
for i in range(len(rois)):
if min_size > 0:
roi_map_width = int(np.maximum(widths_ceil[i], min_size))
roi_map_height = int(np.maximum(heights_ceil[i], min_size))
else:
roi_map_width = widths_ceil[i]
roi_map_height = heights_ceil[i]
width_correction = widths[i] / roi_map_width
height_correction = heights[i] / roi_map_height
np.set_printoptions(suppress=True)
# print(i, "stop", maps.shape, np.around(maps[i][0, :, :], decimals=2))
if not xory_mode:
roi_map = cv2.resize(
maps[i], (roi_map_width, 1), interpolation=cv2.INTER_CUBIC)
else:
roi_map = cv2.resize(
maps[i], (1, roi_map_height), interpolation=cv2.INTER_CUBIC)
# print(roi_map.shape, np.around(roi_map[0, :, :], decimals=2))
# Bring back to CHW
roi_map = np.transpose(roi_map, [2, 0, 1])
roi_map_probs = scores_to_probs(roi_map.copy())
# kescore visulize.
map_vis = np.transpose(maps[i], [2, 0, 1])
map_vis = scores_to_probs(map_vis.copy())
sum_score = []
if cfg.MODEL.ROI_KE_HEAD.RESCORING:
for k in range(num_kes):
if map_vis[k].shape[0] == 1:
x = np.arange(0, len(map_vis[k][0]), 1)
y = map_vis[k][0]
else:
x = np.arange(0, len(map_vis[k][:, 0]), 1)
y = map_vis[k][:, 0]
top = y.max()
atop = y.argmax()
# lf2&1
lf2 = max(atop-2, 0)
lf1 = max(atop-1, 0)
rt2 = min(atop+2, 55)
rt1 = min(atop+1, 55)
sum_score.append(top+y[lf2]+y[lf1]+y[rt1]+y[rt2])
kes_score_mean = sum(sum_score)*1.0/len(sum_score)
gama = cfg.MODEL.ROI_KE_HEAD.RESCORING_GAMA
final_score = (scores[i]*(2.0-gama)+gama*kes_score_mean)*0.5
# rescore
d_scores[i] = final_score
else:
d_scores[i] = scores[i]
w = roi_map.shape[2]
for k in range(num_kes):
pos = roi_map[k, :, :].argmax()
x_int = pos % w
y_int = (pos - x_int) // w
assert (roi_map_probs[k, y_int, x_int] ==
roi_map_probs[k, :, :].max())
x = (x_int + 0.5) * width_correction
y = (y_int + 0.5) * height_correction
if not xory_mode:
d_preds[i, 0, k] = x + offset_x[i]
d_preds[i, 1, k] = roi_map_probs[k, y_int, x_int]
else:
d_preds[i, 0, k] = y + offset_y[i]
d_preds[i, 1, k] = roi_map_probs[k, y_int, x_int]
out_kes_d = kes_decode(d_preds)
return np.transpose(out_kes_d, [0, 2, 1]), d_scores
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.ke import textKES
class KEer(object):
"""
Projects a set of masks in an image on the locations
specified by the bounding boxes
"""
def __init__(self, padding=0, cfg =None):
self.padding = padding
self.cfg =cfg
def compute_flow_field_cpu(self, boxes):
im_w, im_h = boxes.size
boxes_data = boxes.bbox
num_boxes = len(boxes_data)
device = boxes_data.device
TO_REMOVE = 1
boxes_data = boxes_data.int()
box_widths = boxes_data[:, 2] - boxes_data[:, 0] + TO_REMOVE
box_heights = boxes_data[:, 3] - boxes_data[:, 1] + TO_REMOVE
box_widths.clamp_(min=1)
box_heights.clamp_(min=1)
boxes_data = boxes_data.tolist()
box_widths = box_widths.tolist()
box_heights = box_heights.tolist()
flow_field = torch.full((num_boxes, im_h, im_w, 2), -2)
# TODO maybe optimize to make it GPU-friendly with advanced indexing
# or dedicated kernel
for i in range(num_boxes):
w = box_widths[i]
h = box_heights[i]
if w < 2 or h < 2:
continue
x = torch.linspace(-1, 1, w)
y = torch.linspace(-1, 1, h)
# meshogrid
x = x[None, :].expand(h, w)
y = y[:, None].expand(h, w)
b = boxes_data[i]
x_0 = max(b[0], 0)
x_1 = min(b[2] + 0, im_w)
y_0 = max(b[1], 0)
y_1 = min(b[3] + 0, im_h)
flow_field[i, y_0:y_1, x_0:x_1, 0] = x[(y_0 - b[1]):(y_1 - b[1]),(x_0 - b[0]):(x_1 - b[0])]
flow_field[i, y_0:y_1, x_0:x_1, 1] = y[(y_0 - b[1]):(y_1 - b[1]),(x_0 - b[0]):(x_1 - b[0])]
return flow_field.to(device)
def compute_flow_field(self, boxes):
return self.compute_flow_field_cpu(boxes)
# TODO make it work better for batches
def forward_single_image(self, masks, boxes):
boxes = boxes.convert('xyxy')
if self.padding:
boxes = BoxList(boxes.bbox.clone(), boxes.size, boxes.mode)
masks, scale = expand_masks(masks, self.padding)
boxes.bbox = expand_boxes(boxes.bbox, scale)
flow_field = self.compute_flow_field(boxes)
result = torch.nn.functional.grid_sample(masks, flow_field)
return result
def to_points(self, masks):
height, width = masks.shape[-2:]
m = masks.view(masks.shape[:2] + (-1,))
scores, pos = m.max(-1)
x_int = pos % width
y_int = (pos - x_int) // width
result = torch.stack([x_int.float(), y_int.float(), torch.ones_like(x_int, dtype=torch.float32)], dim=2)
return result
def __call__(self, masks, boxes):
# TODO do this properly
if isinstance(boxes, BoxList):
boxes = [boxes]
if isinstance(masks, list):
masks = torch.stack(masks, dim=0)
assert(len(masks.size()) == 4)
scores = boxes[0].get_field("scores")
result, rescores = heatmaps_to_kes(masks.detach().cpu().numpy(), boxes[0].bbox.cpu().numpy(), scores.cpu().numpy(), self.cfg)
return torch.from_numpy(result).to(masks.device), torch.from_numpy(rescores).to(masks.device)
def make_roi_ke_post_processor(cfg):
if cfg.MODEL.ROI_KE_HEAD.POSTPROCESS_KES:
keer = KEer(padding=0, cfg=cfg)
else:
keer = None
ke_post_processor = kePostProcessor(keer,cfg)
return ke_post_processor
|
submission/util.py | pwqbot/eoj3 | 107 | 43952 | <reponame>pwqbot/eoj3<filename>submission/util.py
class SubmissionStatus(object):
SUBMITTED = -4
WAITING = -3
JUDGING = -2
WRONG_ANSWER = -1
ACCEPTED = 0
TIME_LIMIT_EXCEEDED = 1
IDLENESS_LIMIT_EXCEEDED = 2
MEMORY_LIMIT_EXCEEDED = 3
RUNTIME_ERROR = 4
SYSTEM_ERROR = 5
COMPILE_ERROR = 6
SCORED = 7
REJECTED = 10
JUDGE_ERROR = 11
PRETEST_PASSED = 12
@staticmethod
def is_judged(status):
return status >= SubmissionStatus.WRONG_ANSWER
@staticmethod
def is_penalty(status):
return SubmissionStatus.is_judged(status) and status != SubmissionStatus.COMPILE_ERROR
@staticmethod
def is_accepted(status):
return status == SubmissionStatus.ACCEPTED or status == SubmissionStatus.PRETEST_PASSED
@staticmethod
def is_scored(status):
return status == SubmissionStatus.SCORED
STATUS_CHOICE = (
(-4, 'Submitted'),
(-3, 'In queue'),
(-2, 'Running'),
(-1, 'Wrong answer'),
(0, 'Accepted'),
(1, 'Time limit exceeded'),
(2, 'Idleness limit exceeded'),
(3, 'Memory limit exceeded'),
(4, 'Runtime error'),
(5, 'Denial of judgement'),
(6, 'Compilation error'),
(7, 'Partial score'),
(10, 'Rejected'),
(11, 'Checker error'),
(12, 'Pretest passed'),
)
|
repoxplorer/tests/test_yamlbackend.py | Priya-100/repoxplorer | 107 | 43965 | <reponame>Priya-100/repoxplorer
# Copyright 2017, Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
from unittest import TestCase
from repoxplorer.index.yamlbackend import YAMLBackend
class TestYAMLBackend(TestCase):
def setUp(self):
pass
def tearDown(self):
if os.path.isdir(self.db):
shutil.rmtree(self.db)
def create_db(self, files):
self.db = tempfile.mkdtemp()
for filename, content in files.items():
open(os.path.join(self.db, filename), 'w+').write(content)
def test_yamlbackend_load(self):
f1 = """
---
key: value
"""
f2 = """
---
key2: value2
"""
files = {'f1.yaml': f1, 'f2.yaml': f2}
self.create_db(files)
backend = YAMLBackend(db_path=self.db)
backend.load_db()
default_data, data = backend.get_data()
self.assertEqual(default_data, None)
self.assertEqual(len(data), 2)
def test_yamlbackend_load_with_default(self):
f1 = """
---
key: value
"""
f2 = """
---
key2: value2
"""
files = {'default.yaml': f1, 'f2.yaml': f2}
self.create_db(files)
backend = YAMLBackend(
db_path=self.db,
db_default_file=os.path.join(self.db, 'default.yaml'))
backend.load_db()
default_data, data = backend.get_data()
self.assertDictEqual(default_data, {'key': 'value'})
self.assertEqual(len(data), 1)
self.assertDictEqual(data[0], {'key2': 'value2'})
|
pdns-admin-base-ngoduykhanh/run.py | xoxefdp/docker-pdns | 153 | 43991 | #!/usr/bin/env python3
from powerdnsadmin import create_app
app = create_app()
|
tkinter/scrolled-frame-canvas/scrolledframe.py | whitmans-max/python-examples | 140 | 44005 | import tkinter as tk
class ScrolledFrame(tk.Frame):
def __init__(self, parent, vertical=True, horizontal=False):
super().__init__(parent)
# canvas for inner frame
self._canvas = tk.Canvas(self)
self._canvas.grid(row=0, column=0, sticky='news') # changed
# create right scrollbar and connect to canvas Y
self._vertical_bar = tk.Scrollbar(self, orient='vertical', command=self._canvas.yview)
if vertical:
self._vertical_bar.grid(row=0, column=1, sticky='ns')
self._canvas.configure(yscrollcommand=self._vertical_bar.set)
# create bottom scrollbar and connect to canvas X
self._horizontal_bar = tk.Scrollbar(self, orient='horizontal', command=self._canvas.xview)
if horizontal:
self._horizontal_bar.grid(row=1, column=0, sticky='we')
self._canvas.configure(xscrollcommand=self._horizontal_bar.set)
# inner frame for widgets
self.inner = tk.Frame(self._canvas)
self._window = self._canvas.create_window((0, 0), window=self.inner, anchor='nw')
# autoresize inner frame
self.columnconfigure(0, weight=1) # changed
self.rowconfigure(0, weight=1) # changed
# resize when configure changed
self.inner.bind('<Configure>', self.resize)
# resize inner frame to canvas size
self.resize_width = False
self.resize_height = False
self._canvas.bind('<Configure>', self.inner_resize)
def resize(self, event=None):
self._canvas.configure(scrollregion=self._canvas.bbox('all'))
def inner_resize(self, event):
# resize inner frame to canvas size
if self.resize_width:
self._canvas.itemconfig(self._window, width=event.width)
if self.resize_height:
self._canvas.itemconfig(self._window, height=event.height)
|
openbook_circles/validators.py | TamaraAbells/okuna-api | 164 | 44044 | <filename>openbook_circles/validators.py
from rest_framework.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from openbook_circles.models import Circle
def circle_id_exists(circle_id):
count = Circle.objects.filter(id=circle_id).count()
if count == 0:
raise ValidationError(
_('The circle does not exist.'),
)
|
dtreeviz/__init__.py | alitrack/dtreeviz | 1,905 | 44057 | <gh_stars>1000+
from .version import __version__
from dtreeviz.classifiers import clfviz
|
DN/ioUtils.py | dendisuhubdy/dwt | 206 | 44087 | <filename>DN/ioUtils.py
import numpy as np
import skimage
import skimage.io
import scipy.io as sio
import skimage.transform
import sys
np.random.seed(0)
VGG_MEAN = [103.939, 116.779, 123.68]
def read_mat(path):
return np.load(path)
def write_mat(path, m):
np.save(path, m)
def read_ids(path):
return [line.rstrip('\n') for line in open(path)]
class Batch_Feeder:
def __init__(self, dataset, indices, train, batchSize, padWidth=None, padHeight=None, flip=False, keepEmpty=True):
self._epochs_completed = 0
self._index_in_epoch = 0
self._dataset = dataset
self._indices = indices
self._train = train
self._batchSize = batchSize
self._padWidth = padWidth
self._padHeight = padHeight
self._flip = flip
self._keepEmpty = keepEmpty
def set_paths(self, idList=None, imageDir=None, gtDir=None, ssDir=None):
self._paths = []
if self._train:
for id in idList:
self._paths.append([id, imageDir + '/' + id + '_leftImg8bit.png',
gtDir + '/' + id + '_unified_GT.mat',
ssDir + '/' + id + '_unified_ss.mat'])
self.shuffle()
else:
for id in idList:
self._paths.append([id, imageDir + '/' + id + '_leftImg8bit.png',
ssDir + '/' + id + '_unified_ss.mat'])
self._numData = len(self._paths)
if self._numData < self._batchSize:
self._batchSize = self._numData
def shuffle(self):
np.random.shuffle(self._paths)
def next_batch(self):
idBatch = []
imageBatch = []
gtBatch = []
ssBatch = []
ssMaskBatch = []
weightBatch = []
if self._train:
while(len(idBatch) < self._batchSize):
ss = (sio.loadmat(self._paths[self._index_in_epoch][3])['mask']).astype(float)
ssMask = ss
ss = np.sum(ss[:,:,self._indices], 2)
background = np.zeros(ssMask.shape[0:2] + (1,))
ssMask = np.concatenate((ssMask[:,:,[1,2,3,4]], background, ssMask[:,:,[0,5,6,7]]), axis=-1)
ssMask = np.argmax(ssMask, axis=-1)
ssMask = ssMask.astype(float)
ssMask = (ssMask - 4) * 32 # centered at 0, with 0 being background, spaced 32 apart for classes
if ss.sum() > 0 or self._keepEmpty:
idBatch.append(self._paths[self._index_in_epoch][0])
image = (self.image_scaling(skimage.io.imread(self._paths[self._index_in_epoch][1]))).astype(float)
gt = (sio.loadmat(self._paths[self._index_in_epoch][2])['dir_map']).astype(float)
weight = (sio.loadmat(self._paths[self._index_in_epoch][2])['weight_map']).astype(float)
imageBatch.append(self.pad(image))
gtBatch.append(self.pad(gt))
weightBatch.append(self.pad(weight))
ssBatch.append(self.pad(ss))
ssMaskBatch.append(self.pad(ssMask))
else:
pass
# raw_input("skipping " + self._paths[self._index_in_epoch][0])
self._index_in_epoch += 1
if self._index_in_epoch == self._numData:
self._index_in_epoch = 0
self.shuffle()
imageBatch = np.array(imageBatch)
gtBatch = np.array(gtBatch)
ssBatch = np.array(ssBatch)
ssMaskBatch = np.array(ssMaskBatch)
weightBatch = np.array(weightBatch)
if self._flip and np.random.uniform() > 0.5:
for i in range(len(imageBatch)):
for j in range(3):
imageBatch[i,:,:,j] = np.fliplr(imageBatch[i,:,:,j])
weightBatch[i] = np.fliplr(weightBatch[i])
ssBatch[i] = np.fliplr(ssBatch[i])
ssMaskBatch[i] = np.fliplr(ssMaskBatch[i])
for j in range(2):
gtBatch[i,:,:,j] = np.fliplr(gtBatch[i,:,:,j])
gtBatch[i,:,:,0] = -1 * gtBatch[i,:,:,0]
return imageBatch, gtBatch, weightBatch, ssBatch, ssMaskBatch, idBatch
else:
for example in self._paths[self._index_in_epoch:min(self._index_in_epoch+self._batchSize, self._numData)]:
imageBatch.append(self.pad((self.image_scaling(skimage.io.imread(example[1]))).astype(float)))
idBatch.append(example[0])
ss = (sio.loadmat(example[2])['mask']).astype(float)
ssMask = ss
ss = np.sum(ss[:, :, self._indices], 2)
background = np.zeros(ssMask.shape[0:2] + (1,))
ssMask = np.concatenate((ssMask[:,:,[1,2,3,4]], background, ssMask[:,:,[0,5,6,7]]), axis=-1)
ssMask = np.argmax(ssMask, axis=-1)
ssMask = ssMask.astype(float)
ssMask = (ssMask - 4) * 32 # centered at 0, with 0 being background, spaced 32 apart for classes
ssBatch.append(self.pad(ss))
ssMaskBatch.append(self.pad(ssMask))
imageBatch = np.array(imageBatch)
ssBatch = np.array(ssBatch)
ssMaskBatch = np.array(ssMaskBatch)
self._index_in_epoch += self._batchSize
return imageBatch, ssBatch, ssMaskBatch, idBatch
def total_samples(self):
return self._numData
def image_scaling(self, rgb_in):
if rgb_in.dtype == np.float32:
rgb_in = rgb_in*255
elif rgb_in.dtype == np.uint8:
rgb_in = rgb_in.astype(np.float32)
# VGG16 was trained using opencv which reads images as BGR, but skimage reads images as RGB
rgb_out = np.zeros(rgb_in.shape).astype(np.float32)
rgb_out[:,:,0] = rgb_in[:,:,2] - VGG_MEAN[2]
rgb_out[:,:,1] = rgb_in[:,:,1] - VGG_MEAN[1]
rgb_out[:,:,2] = rgb_in[:,:,0] - VGG_MEAN[0]
return rgb_out
def pad(self, data):
if self._padHeight and self._padWidth:
if data.ndim == 3:
npad = ((0,self._padHeight-data.shape[0]),(0,self._padWidth-data.shape[1]),(0,0))
elif data.ndim == 2:
npad = ((0, self._padHeight - data.shape[0]), (0, self._padWidth - data.shape[1]))
padData = np.pad(data, npad, mode='constant', constant_values=0)
else:
padData = data
return padData
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.