filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_24005 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrestazure.azure_exceptions import CloudError
from .. import models
class NetworkInterfaceLoadBalancersOperations(object):
"""NetworkInterfaceLoadBalancersOperations operations.
You should not instantiate directly this class, but create a Client instance that will create it for you and attach it as attribute.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Client API version. Constant value: "2017-06-01".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2017-06-01"
self.config = config
def list(
self, resource_group_name, network_interface_name, custom_headers=None, raw=False, **operation_config):
"""Get all load balancers in a network interface.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_interface_name: The name of the network interface.
:type network_interface_name: str
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: An iterator like instance of LoadBalancer
:rtype:
~azure.mgmt.network.v2017_06_01.models.LoadBalancerPaged[~azure.mgmt.network.v2017_06_01.models.LoadBalancer]
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
def prepare_request(next_link=None):
if not next_link:
# Construct URL
url = self.list.metadata['url']
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkInterfaceName': self._serialize.url("network_interface_name", network_interface_name, 'str'),
'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
else:
url = next_link
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Accept'] = 'application/json'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters, header_parameters)
return request
def internal_paging(next_link=None):
request = prepare_request(next_link)
response = self._client.send(request, stream=False, **operation_config)
if response.status_code not in [200]:
exp = CloudError(response)
exp.request_id = response.headers.get('x-ms-request-id')
raise exp
return response
# Deserialize response
header_dict = None
if raw:
header_dict = {}
deserialized = models.LoadBalancerPaged(internal_paging, self._deserialize.dependencies, header_dict)
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/loadBalancers'}
|
the-stack_0_24006 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import subprocess
from contextlib import contextmanager
from textwrap import dedent
from pants.backend.jvm.subsystems.junit import JUnit
from pants.backend.jvm.subsystems.jvm_platform import JvmPlatform
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.junit_tests import JUnitTests
from pants.backend.jvm.tasks.coverage.cobertura import Cobertura
from pants.backend.jvm.tasks.coverage.engine import NoCoverage
from pants.backend.jvm.tasks.coverage.jacoco import Jacoco
from pants.backend.jvm.tasks.coverage.manager import CodeCoverage
from pants.backend.jvm.tasks.junit_run import JUnitRun
from pants.backend.python.targets.python_tests import PythonTests
from pants.base.exceptions import TargetDefinitionException, TaskError
from pants.build_graph.build_file_aliases import BuildFileAliases
from pants.build_graph.files import Files
from pants.build_graph.resources import Resources
from pants.ivy.bootstrapper import Bootstrapper
from pants.ivy.ivy_subsystem import IvySubsystem
from pants.java.distribution.distribution import DistributionLocator
from pants.java.executor import SubprocessExecutor
from pants.util.contextutil import environment_as, temporary_dir
from pants.util.dirutil import touch
from pants_test.jvm.jvm_tool_task_test_base import JvmToolTaskTestBase
from pants_test.subsystem.subsystem_util import global_subsystem_instance, init_subsystem
from pants_test.task_test_base import ensure_cached
class JUnitRunnerTest(JvmToolTaskTestBase):
@classmethod
def task_type(cls):
return JUnitRun
@classmethod
def alias_groups(cls):
return super().alias_groups().merge(BuildFileAliases(
targets={
'files': Files,
'junit_tests': JUnitTests,
'python_tests': PythonTests,
},
))
def setUp(self):
super().setUp()
init_subsystem(JUnit)
@ensure_cached(JUnitRun, expected_num_artifacts=1)
def test_junit_runner_success(self):
self._execute_junit_runner(
[('FooTest.java', dedent("""
import org.junit.Test;
import static org.junit.Assert.assertTrue;
public class FooTest {
@Test
public void testFoo() {
assertTrue(5 > 3);
}
}
"""))]
)
@ensure_cached(JUnitRun, expected_num_artifacts=0)
def test_junit_runner_failure(self):
with self.assertRaises(TaskError) as cm:
self._execute_junit_runner(
[('FooTest.java', dedent("""
import org.junit.Test;
import static org.junit.Assert.assertTrue;
public class FooTest {
@Test
public void testFoo() {
assertTrue(5 < 3);
}
}
"""))]
)
self.assertEqual([t.name for t in cm.exception.failed_targets], ['foo_test'])
@ensure_cached(JUnitRun, expected_num_artifacts=0)
def test_junit_runner_error(self):
with self.assertRaises(TaskError) as cm:
self._execute_junit_runner(
[('FooTest.java', dedent("""
import org.junit.Test;
public class FooTest {
@Test
public void testFoo() {
throw new RuntimeException("test error");
}
}
"""))]
)
self.assertEqual([t.name for t in cm.exception.failed_targets], ['foo_test'])
def _execute_junit_runner(self, list_of_filename_content_tuples, create_some_resources=True,
target_name=None):
# Create the temporary base test directory
test_rel_path = 'tests/java/org/pantsbuild/foo'
test_abs_path = self.create_dir(test_rel_path)
# Create the temporary classes directory under work dir
test_classes_abs_path = self.create_workdir_dir(test_rel_path)
test_java_file_abs_paths = []
# Generate the temporary java test source code.
for filename, content in list_of_filename_content_tuples:
test_java_file_rel_path = os.path.join(test_rel_path, filename)
test_java_file_abs_path = self.create_file(test_java_file_rel_path, content)
test_java_file_abs_paths.append(test_java_file_abs_path)
# Invoke ivy to resolve classpath for junit.
classpath_file_abs_path = os.path.join(test_abs_path, 'junit.classpath')
ivy_subsystem = global_subsystem_instance(IvySubsystem)
distribution = DistributionLocator.cached(jdk=True)
ivy = Bootstrapper(ivy_subsystem=ivy_subsystem).ivy()
ivy.execute(args=['-cachepath', classpath_file_abs_path,
'-dependency', 'junit', 'junit-dep', '4.10'],
executor=SubprocessExecutor(distribution=distribution))
with open(classpath_file_abs_path, 'r') as fp:
classpath = fp.read()
# Now directly invoke javac to compile the test java code into classfiles that we can later
# inject into a product mapping for JUnitRun to execute against.
javac = distribution.binary('javac')
subprocess.check_call(
[javac, '-d', test_classes_abs_path, '-cp', classpath] + test_java_file_abs_paths)
# If a target_name is specified create a target with it, otherwise create a junit_tests target.
if target_name:
target = self.target(target_name)
else:
target = self.create_library(test_rel_path, 'junit_tests', 'foo_test', ['FooTest.java'])
target_roots = []
if create_some_resources:
# Create a synthetic resource target.
target_roots.append(self.make_target('some_resources', Resources))
target_roots.append(target)
# Set the context with the two targets, one junit_tests target and
# one synthetic resources target.
# The synthetic resources target is to make sure we won't regress
# in the future with bug like https://github.com/pantsbuild/pants/issues/508. Note
# in that bug, the resources target must be the first one in the list.
context = self.context(target_roots=target_roots)
# Before we run the task, we need to inject the "runtime_classpath" with
# the compiled test java classes that JUnitRun will know which test
# classes to execute. In a normal run, this "runtime_classpath" will be
# populated by java compilation step.
self.populate_runtime_classpath(context=context, classpath=[test_classes_abs_path])
# Finally execute the task.
self.execute(context)
@ensure_cached(JUnitRun, expected_num_artifacts=0)
def test_junit_runner_raises_no_error_on_non_junit_target(self):
"""Run pants against a `python_tests` target, but set an option for the `test.junit` task. This
should execute without error.
"""
self.add_to_build_file('foo', dedent("""
python_tests(
name='hello',
sources=['some_file.py'],
)
"""
))
self.set_options(test='#abc')
self.execute(self.context(target_roots=[self.target('foo:hello')]))
@ensure_cached(JUnitRun, expected_num_artifacts=0)
def test_empty_sources(self):
self.add_to_build_file('foo', dedent("""
junit_tests(
name='empty',
sources=[],
)
"""
))
task = self.prepare_execute(self.context(target_roots=[self.target('foo:empty')]))
with self.assertRaisesRegexp(TargetDefinitionException,
r'must include a non-empty set of sources'):
task.execute()
@ensure_cached(JUnitRun, expected_num_artifacts=1)
def test_allow_empty_sources(self):
self.add_to_build_file('foo', dedent("""
junit_tests(
name='empty',
sources=[],
)
"""
))
self.set_options(allow_empty_sources=True)
context = self.context(target_roots=[self.target('foo:empty')])
self.populate_runtime_classpath(context=context)
self.execute(context)
@ensure_cached(JUnitRun, expected_num_artifacts=1)
def test_junit_runner_extra_jvm_options(self):
self.make_target(
spec='tests/java/org/pantsbuild/foo:foo_test',
target_type=JUnitTests,
sources=['FooTest.java'],
extra_jvm_options=['-Dexample.property=1'],
)
self._execute_junit_runner([('FooTest.java', dedent("""
package org.pantsbuild.foo;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
public class FooTest {
@Test
public void testFoo() {
String exampleProperty = System.getProperty("example.property");
assertTrue(exampleProperty != null && exampleProperty.equals("1"));
}
}
"""))], target_name='tests/java/org/pantsbuild/foo:foo_test')
@ensure_cached(JUnitRun, expected_num_artifacts=1)
def test_junit_runner_platform_args(self):
self.make_target(
spec='tests/java/org/pantsbuild/foo:foo_test',
target_type=JUnitTests,
sources=['FooTest.java'],
test_platform='java8-extra',
#extra_jvm_options=['-Dexample.property=1'],
)
self.set_options_for_scope(JvmPlatform.options_scope,
platforms={
'java8-extra': {
'source': '8',
'target': '8',
'args': ['-Dexample.property=1'] },})
self._execute_junit_runner([('FooTest.java', dedent("""
package org.pantsbuild.foo;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
public class FooTest {
@Test
public void testFoo() {
String exampleProperty = System.getProperty("example.property");
assertTrue(exampleProperty != null && exampleProperty.equals("1"));
}
}
"""))], target_name='tests/java/org/pantsbuild/foo:foo_test')
@ensure_cached(JUnitRun, expected_num_artifacts=1)
def test_junit_runner_multiple_extra_jvm_options(self):
self.make_target(
spec='tests/java/org/pantsbuild/foo:foo_test',
target_type=JUnitTests,
sources=['FooTest.java'],
extra_jvm_options=['-Dexample.property1=1','-Dexample.property2=2'],
)
self._execute_junit_runner([('FooTest.java', dedent("""
package org.pantsbuild.foo;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
public class FooTest {
@Test
public void testFoo() {
String exampleProperty1 = System.getProperty("example.property1");
assertTrue(exampleProperty1 != null && exampleProperty1.equals("1"));
String exampleProperty2 = System.getProperty("example.property2");
assertTrue(exampleProperty2 != null && exampleProperty2.equals("2"));
String exampleProperty3 = System.getProperty("example.property3");
assertTrue(exampleProperty3 == null);
}
}
"""))], target_name='tests/java/org/pantsbuild/foo:foo_test')
# 2 runs with different targets (unique configurations), should cache twice.
@ensure_cached(JUnitRun, expected_num_artifacts=2)
def test_junit_runner_extra_env_vars(self):
self.make_target(
spec='tests/java/org/pantsbuild/foo:foo_test',
target_type=JUnitTests,
sources=['FooTest.java'],
extra_env_vars={
'HELLO': 27,
'THERE': 32,
},
)
self.make_target(
spec='tests/java/org/pantsbuild/foo:bar_test',
target_type=JUnitTests,
sources=['FooTest.java'],
extra_env_vars={
'THE_ANSWER': 42,
'HELLO': 12,
},
)
self._execute_junit_runner(
[
('FooTest.java', dedent("""
package org.pantsbuild.foo;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class FooTest {
@Test
public void testFoo() {
assertEquals("27", System.getenv().get("HELLO"));
assertEquals("32", System.getenv().get("THERE"));
}
}
"""))
], target_name='tests/java/org/pantsbuild/foo:foo_test')
# Execute twice in a row to make sure the environment changes aren't sticky.
self._execute_junit_runner([('FooTest.java', dedent("""
package org.pantsbuild.foo;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
public class FooTest {
@Test
public void testFoo() {
assertEquals("12", System.getenv().get("HELLO"));
assertEquals("42", System.getenv().get("THE_ANSWER"));
assertFalse(System.getenv().containsKey("THERE"));
}
}
"""))], target_name='tests/java/org/pantsbuild/foo:bar_test', create_some_resources=False)
@ensure_cached(JUnitRun, expected_num_artifacts=1)
def test_junit_runner_extra_env_vars_none(self):
with environment_as(THIS_VARIABLE="12", THAT_VARIABLE="This is a variable."):
self.make_target(
spec='tests/java/org/pantsbuild/foo:foo_test',
target_type=JUnitTests,
sources=['FooTest.java'],
extra_env_vars={
'HELLO': None,
'THERE': False,
'THIS_VARIABLE': None
},
)
self._execute_junit_runner([('FooTest.java', dedent("""
package org.pantsbuild.foo;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
public class FooTest {
@Test
public void testFoo() {
assertEquals("False", System.getenv().get("THERE"));
assertEquals("This is a variable.", System.getenv().get("THAT_VARIABLE"));
assertFalse(System.getenv().containsKey("HELLO"));
assertFalse(System.getenv().containsKey("THIS_VARIABLE"));
}
}
"""))], target_name='tests/java/org/pantsbuild/foo:foo_test')
@ensure_cached(JUnitRun, expected_num_artifacts=1)
def test_junt_run_with_too_many_args(self):
max_subprocess_args = 2
num_of_classes = 5
list_of_filename_content_tuples = []
for n in range(num_of_classes):
filename = 'FooTest{}.java'.format(n)
content = dedent("""
package org.pantsbuild.foo;
import org.junit.Test;
import static org.junit.Assert.assertTrue;
public class FooTest{}{{
@Test
public void testFoo() {{
int x = 5;
}}
}}""".format(n))
list_of_filename_content_tuples.append((filename, content))
self.make_target(
spec='tests/java/org/pantsbuild/foo:foo_test',
target_type=JUnitTests,
sources=[name for name, _ in list_of_filename_content_tuples],
)
self.set_options(max_subprocess_args=max_subprocess_args)
self._execute_junit_runner(list_of_filename_content_tuples,
target_name='tests/java/org/pantsbuild/foo:foo_test')
@ensure_cached(JUnitRun, expected_num_artifacts=1)
def test_junit_run_chroot(self):
self.create_files('config/org/pantsbuild/foo', ['sentinel', 'another'])
files = self.make_target(
spec='config/org/pantsbuild/foo:sentinel',
target_type=Files,
sources=['sentinel']
)
self.make_target(
spec='tests/java/org/pantsbuild/foo:foo_test',
target_type=JUnitTests,
sources=['FooTest.java'],
dependencies=[files]
)
content = dedent("""
package org.pantsbuild.foo;
import java.io.File;
import org.junit.Test;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class FooTest {
@Test
public void testFoo() {
assertTrue(new File("config/org/pantsbuild/foo/sentinel").exists());
assertFalse(new File("config/org/pantsbuild/foo/another").exists());
}
}
""")
self.set_options(chroot=True)
self._execute_junit_runner([('FooTest.java', content)],
target_name='tests/java/org/pantsbuild/foo:foo_test')
@ensure_cached(JUnitRun, expected_num_artifacts=0)
def test_junit_run_chroot_cwd_mutex(self):
with temporary_dir() as chroot:
self.set_options(chroot=True, cwd=chroot)
with self.assertRaises(JUnitRun.OptionError):
self.execute(self.context())
@ensure_cached(JUnitRun, expected_num_artifacts=1)
def test_junit_run_target_cwd_trumps_chroot(self):
with temporary_dir() as target_cwd:
self.create_files('config/org/pantsbuild/foo', ['files_dep_sentinel'])
files = self.make_target(
spec='config/org/pantsbuild/foo:sentinel',
target_type=Files,
sources=['files_dep_sentinel']
)
self.make_target(
spec='tests/java/org/pantsbuild/foo:foo_test',
target_type=JUnitTests,
sources=['FooTest.java'],
dependencies=[files],
cwd=target_cwd
)
content = dedent("""
package org.pantsbuild.foo;
import java.io.File;
import org.junit.Test;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class FooTest {{
@Test
public void testFoo() {{
assertTrue(new File("target_cwd_sentinel").exists());
// We declare a Files dependency on this file, but since we run in a CWD not in a
// chroot and not in the build root, we can't find it at the expected relative path.
assertFalse(new File("config/org/pantsbuild/foo/files_dep_sentinel").exists());
// As a sanity check, it is at the expected absolute path though.
File buildRoot = new File("{}");
assertTrue(new File(buildRoot,
"config/org/pantsbuild/foo/files_dep_sentinel").exists());
}}
}}
""".format(self.build_root))
touch(os.path.join(target_cwd, 'target_cwd_sentinel'))
self.set_options(chroot=True)
self._execute_junit_runner([('FooTest.java', content)],
target_name='tests/java/org/pantsbuild/foo:foo_test')
@ensure_cached(JUnitRun, expected_num_artifacts=1)
def test_junit_run_target_cwd_trumps_cwd_option(self):
with temporary_dir() as target_cwd:
self.make_target(
spec='tests/java/org/pantsbuild/foo:foo_test',
target_type=JUnitTests,
sources=['FooTest.java'],
cwd=target_cwd
)
content = dedent("""
package org.pantsbuild.foo;
import java.io.File;
import org.junit.Test;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
public class FooTest {
@Test
public void testFoo() {
assertTrue(new File("target_cwd_sentinel").exists());
assertFalse(new File("option_cwd_sentinel").exists());
}
}
""")
touch(os.path.join(target_cwd, 'target_cwd_sentinel'))
with temporary_dir() as option_cwd:
touch(os.path.join(option_cwd, 'option_cwd_sentinel'))
self.set_options(cwd=option_cwd)
self._execute_junit_runner([('FooTest.java', content)],
target_name='tests/java/org/pantsbuild/foo:foo_test')
def test_junit_run_with_coverage_caching(self):
source_under_test_content = dedent("""
package org.pantsbuild.foo;
class Foo {
static String foo() {
return "foo";
}
static String bar() {
return "bar";
}
}
""")
source_under_test = self.make_target(spec='tests/java/org/pantsbuild/foo',
target_type=JavaLibrary,
sources=['Foo.java'])
test_content = dedent("""
package org.pantsbuild.foo;
import org.pantsbuild.foo.Foo;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class FooTest {
@Test
public void testFoo() {
assertEquals("foo", Foo.foo());
}
}
""")
self.make_target(spec='tests/java/org/pantsbuild/foo:foo_test',
target_type=JUnitTests,
sources=['FooTest.java'],
dependencies=[source_under_test])
self.set_options(coverage=True)
with self.cache_check(expected_num_artifacts=1):
self._execute_junit_runner([('Foo.java', source_under_test_content),
('FooTest.java', test_content)],
target_name='tests/java/org/pantsbuild/foo:foo_test')
# Now re-execute with a partial invalidation of the input targets. Since coverage is enabled,
# that input set is {tests/java/org/pantsbuild/foo, tests/java/org/pantsbuild/foo:bar_test}
# with only tests/java/org/pantsbuild/foo:bar_test invalidated. Even though the invalidation is
# partial over all input targets, it is total over all the test targets in the input and so the
# successful result run is eligible for caching.
test_content_edited = dedent("""
package org.pantsbuild.foo;
import org.pantsbuild.foo.Foo;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class FooTest {
@Test
public void testFoo() {
assertEquals("bar", Foo.bar());
}
}
""")
self.make_target(spec='tests/java/org/pantsbuild/foo:bar_test',
target_type=JUnitTests,
sources=['FooTest.java'],
dependencies=[source_under_test])
with self.cache_check(expected_num_artifacts=1):
self._execute_junit_runner([('Foo.java', source_under_test_content),
('FooTest.java', test_content_edited)],
target_name='tests/java/org/pantsbuild/foo:bar_test',
create_some_resources=False)
@contextmanager
def _coverage_engine(self):
junit_run = self.prepare_execute(self.context())
with temporary_dir() as output_dir:
code_coverage = CodeCoverage.global_instance()
source_under_test = self.make_target(spec='tests/java/org/pantsbuild/foo',
target_type=JavaLibrary,
sources=['Foo.java'])
yield code_coverage.get_coverage_engine(task=junit_run,
output_dir=output_dir,
all_targets=[source_under_test],
execute_java=junit_run.execute_java_for_coverage)
def _assert_coverage_engine(self, expected_engine_type):
with self._coverage_engine() as engine:
self.assertIsInstance(engine, expected_engine_type)
def test_coverage_default_off(self):
self._assert_coverage_engine(NoCoverage)
def test_coverage_explicit_on(self):
self.set_options(coverage=True)
self._assert_coverage_engine(Cobertura)
def test_coverage_open_implicit_on(self):
self.set_options(coverage_open=True)
self._assert_coverage_engine(Cobertura)
def test_coverage_processor_implicit_on(self):
self.set_options(coverage_processor='jacoco')
self._assert_coverage_engine(Jacoco)
def test_coverage_processor_invalid(self):
self.set_options(coverage_processor='bob')
with self.assertRaises(CodeCoverage.InvalidCoverageEngine):
with self._coverage_engine():
self.fail("We should never get here.")
|
the-stack_0_24007 | """Initialization routines for ODE filters.
You may use the following (rough) guidelines to choose a suitable strategy
for low(ish) dimensional ODEs.
* ``num_derivatives = 1``: :class:`Stack`
* ``num_derivatives = 2``: :class:`StackWithJacobian` if a Jacobian is available,
or :class:`NonProbabilisticFit` if not.
If Jax is available, compute the Jacobian and use :class:`StackWithJacobian`,
(or choose :class:`ForwardModeJVP` altogether).
* ``num_derivatives = 3,4,5``: :class:`NonProbabilisticFitWithJacobian`
if the Jacobian of the ODE vector field is available,
or :class:`NonProbabilisticFit` if not.
* ``num_derivatives > 5``: :class:`TaylorMode`. For orders 6 and 7,
:class:`ForwardModeJVP` might work well too. :class:`TaylorMode` shines for
``num_derivatives >> 5``.
Initialization routines for high-dimensional ODEs are not implemented efficiently yet.
It may also be worth noting:
* Only automatic-differentiation-based routines yield the exact initialization.
This becomes more desirable, the larger the number of modelled derivatives is.
* :class:`ForwardModeJVP` is generally more efficient than :class:`ForwardMode`. The
jury is still out on the efficiency of :class:`ReverseMode`.
* :class:`Stack` and :class:`StackWithJacobian` are the only routines that come
essentially for free.
The other routines rely on either inference or automatic differentiation.
* For stiff ODEs, prefer :class:`NonProbabilisticFitWithJacobian` with ``BDF`` or
``Radau`` over :class:`NonProbabilisticFit` (or use one of the
automatic-differentiation-based routines).
* Initialization routines can be chained together. For example, build a
``prior_process`` with an ``initrv`` that is generated by :class:`StackWithJacobian`,
and initialize the ODE filter with :class:`NonProbabilisticFitWithJacobian`.
"""
from ._autodiff import ForwardMode, ForwardModeJVP, ReverseMode, TaylorMode
from ._interface import InitializationRoutine
from ._non_probabilistic_fit import NonProbabilisticFit, NonProbabilisticFitWithJacobian
from ._stack import Stack, StackWithJacobian
__all__ = [
"InitializationRoutine",
"Stack",
"StackWithJacobian",
"NonProbabilisticFit",
"NonProbabilisticFitWithJacobian",
"ForwardMode",
"ForwardModeJVP",
"ReverseMode",
"TaylorMode",
]
|
the-stack_0_24011 | import _plotly_utils.basevalidators
class MinexponentValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="minexponent",
parent_name="scattergeo.marker.colorbar",
**kwargs,
):
super(MinexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
the-stack_0_24013 | import sys
import logging
import unittest
from nose2 import util
from nose2 import events
log = logging.getLogger(__name__)
__unittest = True
#
# Layer suite class
#
class LayerSuite(unittest.BaseTestSuite):
def __init__(self, session, tests=(), layer=None):
super(LayerSuite, self).__init__(tests)
self.layer = layer
self.wasSetup = False
self.session = session
def run(self, result):
self.handle_previous_test_teardown(result)
if not self._safeMethodCall(self.setUp, result):
return
try:
for test in self:
if result.shouldStop:
break
self._safeMethodCall(self.setUpTest, result, test)
try:
test(result)
finally:
self._safeMethodCall(self.tearDownTest, result, test)
finally:
if self.wasSetup:
self._safeMethodCall(self.tearDown, result)
def handle_previous_test_teardown(self, result):
prev = getattr(result, '_previousTestClass', None)
if prev is None:
return
layer_attr = getattr(prev, 'layer', None)
if isinstance(layer_attr, LayerSuite):
return
try:
suite_obj = unittest.suite.TestSuite()
suite_obj._tearDownPreviousClass(None, result)
suite_obj._handleModuleTearDown(result)
finally:
result._previousTestClass = None
def setUp(self):
if self.layer is None:
return
setup = self._getBoundClassmethod(self.layer, 'setUp')
event = events.StartLayerSetupEvent(self.layer)
self.session.hooks.startLayerSetup(event)
if setup:
setup()
self.wasSetup = True
event = events.StopLayerSetupEvent(self.layer)
self.session.hooks.stopLayerSetup(event)
def setUpTest(self, test):
if self.layer is None:
return
# skip suites, to ensure test setup only runs once around each test
# even for sub-layer suites inside this suite.
try:
iter(test)
except TypeError:
# ok, not a suite
pass
else:
# suite-like enough for skipping
return
if getattr(test, '_layer_wasSetUp', False):
return
event = events.StartLayerSetupTestEvent(self.layer, test)
self.session.hooks.startLayerSetupTest(event)
self._allLayers(test, 'testSetUp')
test._layer_wasSetUp = True
event = events.StopLayerSetupTestEvent(self.layer, test)
self.session.hooks.stopLayerSetupTest(event)
def tearDownTest(self, test):
if self.layer is None:
return
if not getattr(test, '_layer_wasSetUp', None):
return
event = events.StartLayerTeardownTestEvent(self.layer, test)
self.session.hooks.startLayerTeardownTest(event)
self._allLayers(test, 'testTearDown', reverse=True)
event = events.StopLayerTeardownTestEvent(self.layer, test)
self.session.hooks.stopLayerTeardownTest(event)
delattr(test, '_layer_wasSetUp')
def tearDown(self):
if self.layer is None:
return
teardown = self._getBoundClassmethod(self.layer, 'tearDown')
event = events.StartLayerTeardownEvent(self.layer)
self.session.hooks.startLayerTeardown(event)
if teardown:
teardown()
event = events.StopLayerTeardownEvent(self.layer)
self.session.hooks.stopLayerTeardown(event)
def _safeMethodCall(self, method, result, *args):
try:
method(*args)
return True
except KeyboardInterrupt:
raise
except:
result.addError(self, sys.exc_info())
return False
def _allLayers(self, test, method, reverse=False):
done = set()
all_lys = util.ancestry(self.layer)
if reverse:
all_lys = [reversed(lys) for lys in reversed(all_lys)]
for lys in all_lys:
for layer in lys:
if layer in done:
continue
self._inLayer(layer, test, method)
done.add(layer)
def _inLayer(self, layer, test, method):
meth = self._getBoundClassmethod(layer, method)
if meth:
if util.num_expected_args(meth) > 1:
meth(test)
else:
meth()
def _getBoundClassmethod(self, cls, method):
"""
Use instead of :func:`getattr` to get only classmethods explicitly
defined on ``cls`` (not methods inherited from ancestors)
"""
descriptor = cls.__dict__.get(method, None)
if descriptor:
if not isinstance(descriptor, classmethod):
raise TypeError(
'The %s method on a layer must be a classmethod.' % method)
bound_method = descriptor.__get__(None, cls)
return bound_method
else:
return None
|
the-stack_0_24018 | from data_extraction import splitTrainTest, splitTrainTestStratified
import pandas as pd
import pandas_profiling as pdp
from IPython.display import display
import io
import preprocess_vis as prv
class Nekoma:
def __init__(self, method=None, **kwargs):
if method is None:
self.train_set = pd.DataFrame()
self.test_set = pd.DataFrame()
self.target = pd.DataFrame()
self.back_up = self.train_set.copy()
self.transformations = list()
self.counter = 0
elif method == "df":
df = kwargs.get("df")
target = kwargs.get("target")
test_ratio = kwargs.get("test_ratio", 0.2)
seed = kwargs.get("seed", 1)
strat = kwargs.get("strat", False)
if strat is False:
self.train_set, self.test_set = splitTrainTest(
df, test_ratio, seed)
else:
self.train_set, self.test_set = splitTrainTestStratified(
df, target, test_ratio, seed)
self.target = target
self.back_up = self.train_set.copy()
self.transformations = []
elif method == "file":
file_path = kwargs.get("file_path")
target = kwargs.get("target")
test_ratio = kwargs.get("test_ratio", 0.2)
seed = kwargs.get("seed", 1)
strat = kwargs.get("strat", None)
if type(df_or_file) == "str":
extension = file.split(".")[1]
if extension == "csv":
df = pd.read_csv(file_path, **kwargs.get(dict_param))
elif extension == "json":
df = pd.read_json(file_path)
if strat:
self.train_set, self.test_set = splitTrainTest(
df, target, test_ratio, seed)
else:
self.train_set, self.test_set = splitTrainTestStratified(
df, target, test_ratio, seed)
else:
if strat:
self.train_set, self.test_set = splitTrainTest(
df_or_file, target, test_ratio, seed)
else:
self.train_set, self.test_set = splitTrainTestStratified(
df_or_file, target, test_ratio, seed)
self.target = target
self.back_up = self.train_set.copy()
self.transformations = []
def reset(self):
self.train_set = pd.DataFrame()
self.train_set = self.back_up.copy()
tr_counter = 0
self.transformations = []
def setTrainSet(self, train_set):
self.train_set = train_set
self.back_up = self.train_set.copy()
def setTestSet(self, test_set):
self.test_set = test_set
def setTarget(self, target):
self.target = target
def info(self):
prv.info(self.train_set)
def desc(self):
prv.desc(self.train_set)
def report(self):
prv.report(self.train_set)
def apply(self, transformation_f, parameters):
tr_counter += 1
self.transformations.append((str(get_counter()) +
". " + type(transformation).__name__,
transformation))
transformation_f(self.train_set, **parameters)
def use_pipeline(self, df, name, update=True):
"""
Use a pipeline by giving the associated name on a dataframe.
Parameters
----------
df
type : Dataframe
name
type : string
"""
for pipeline in self.tr_transformation_pipelines_collection:
if pipeline[0] == name:
pipeline[1].fit_transform(df)
if update:
for transformation in pipeline[1].steps:
self.add_transformation(transformation[1])
break
|
the-stack_0_24019 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import math
from collections import OrderedDict
from ..utils import _to_triple, _triple_same, _pair_same
from .base_acsconv import _ACSConv
class SoftACSConv(_ACSConv):
"""
Decorator class for soft ACS Convolution
Args:
mean: *bool*, optional, the default value is False. If True, it changes to a mean ACS Convolution.
Other arguments are the same as torch.nn.Conv3d.
Examples:
>>> import SoftACSConv
>>> x = torch.rand(batch_size, 3, D, H, W)
>>> # soft ACS Convolution
>>> conv = SoftACSConv(3, 10, 1)
>>> out = conv(x)
>>> # mean ACS Convolution
>>> conv = SoftACSConv(3, 10, 1, mean=Ture)
>>> out = conv(x)
"""
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, mean=False,
bias=True, padding_mode='zeros'):
super().__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, 0, groups, bias, padding_mode)
if not mean:
self.soft_w_core = nn.Parameter(torch.rand(out_channels,3)) #TODO: init
self.mean = mean
def conv3D_output_shape_f(self,i, input_shape):
"""
Calculate the original output size assuming the convolution is nn.Conv3d based on
input size, kernel size, dilation, padding and stride.
"""
return math.floor((input_shape[i]-self.kernel_size[i]-(self.dilation[i]-1)*
(self.kernel_size[i]-1)+2*self.padding[i])
/self.stride[i])+1
def forward(self, x):
"""
Convolution forward function
Conduct convolution on three directions seperately and then
aggregate the three parts of feature maps by *soft* or *mean* way.
Bias is added at last.
"""
B, C_in, *input_shape = x.shape
conv3D_output_shape = (self.conv3D_output_shape_f(0, input_shape),
self.conv3D_output_shape_f(1, input_shape),
self.conv3D_output_shape_f(2, input_shape))
f_a = F.conv3d(x if conv3D_output_shape[0]==input_shape[0] or 2*conv3D_output_shape[0]==input_shape[0] else F.pad(x, (0,0,0,0,self.padding[0],self.padding[0]),'constant',0)[:,:,
self.kernel_size[0]//2:self.kernel_size[0]//2+(conv3D_output_shape[0]-1)*self.stride[0]+1,
:,:],
weight=self.weight.unsqueeze(2), bias=None,
stride=self.stride,
padding=(0,self.padding[1],self.padding[2]),
dilation=self.dilation,
groups=self.groups)
f_c = F.conv3d(x if conv3D_output_shape[1]==input_shape[1] or 2*conv3D_output_shape[1]==input_shape[1] else F.pad(x, (0,0,self.padding[1],self.padding[1]),'constant',0)[:,:,:,
self.kernel_size[1]//2:self.kernel_size[1]//2+self.stride[1]*(conv3D_output_shape[1]-1)+1,
:],
weight=self.weight.unsqueeze(3), bias=None,
stride=self.stride,
padding=(self.padding[0],0,self.padding[2]),
dilation=self.dilation,
groups=self.groups)
f_s = F.conv3d(x if conv3D_output_shape[2]==input_shape[2] or 2*conv3D_output_shape[2]==input_shape[2] else F.pad(x, (self.padding[2],self.padding[2]),'constant',0)[:,:,:,:,
self.kernel_size[2]//2:self.kernel_size[2]//2+self.stride[2]*(conv3D_output_shape[2]-1)+1
],
weight=self.weight.unsqueeze(4), bias=None,
stride=self.stride,
padding=(self.padding[0],self.padding[1],0),
dilation=self.dilation,
groups=self.groups)
if self.mean:
f = (f_a + f_c + f_s) / 3
else:
soft_w = self.soft_w_core.softmax(-1)
f = f_a*soft_w[:,0].view(1,self.out_channels,1,1,1)+\
f_c*soft_w[:,1].view(1,self.out_channels,1,1,1)+\
f_s*soft_w[:,2].view(1,self.out_channels,1,1,1)
if self.bias is not None:
f += self.bias.view(1,self.out_channels,1,1,1)
return f
def extra_repr(self):
s = super().extra_repr() + ', mean={mean}'
return s.format(**self.__dict__) |
the-stack_0_24020 | # -*- coding: utf-8 -*-
"""Command line interface."""
import logging
import os
import time
import click
from bio2bel_chebi import Manager as ChebiManager
from bio2bel_hgnc import Manager as HgncManager
from pybel import from_pickle
from tqdm import tqdm
from pathme.constants import DATA_DIR, DEFAULT_CACHE_CONNECTION
from pathme.constants import RDF_REACTOME, REACTOME_BEL, REACTOME_FILES, REACTOME_FILE_LIST, REACTOME_SPECIES_TO_ID
from pathme.export_utils import get_paths_in_folder
from pathme.reactome.rdf_sparql import get_reactome_statistics, reactome_to_bel
from pathme.reactome.utils import untar_file
from pathme.utils import make_downloader, statistics_to_df, summarize_helper
from pathme.wikipathways.utils import get_file_name_from_url
__all__ = [
'main',
]
logger = logging.getLogger(__name__)
@click.group()
def main():
"""Manage Reactome."""
@main.command(help='Downloads Reactome RDF files')
def download():
"""Download Reactome RDF."""
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(name)s - %(message)s")
logger.setLevel(logging.INFO)
logger.info('Downloading Reactome RDF file')
cached_file = os.path.join(REACTOME_FILES, get_file_name_from_url(RDF_REACTOME))
make_downloader(RDF_REACTOME, cached_file, REACTOME_FILES, untar_file)
logger.info('Reactome was downloaded')
@main.command()
@click.option('-v', '--verbose', is_flag=True)
@click.option('-s', '--species', default=None)
def bel(verbose, species):
"""Convert Reactome to BEL."""
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(name)s - %(message)s")
if verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
files = []
if species:
species = species.replace(" ", "").split(",")
for species_id in species:
species_name = [k for k, v in REACTOME_SPECIES_TO_ID.items() if v == int(species_id)][0]
files.append(species_name + ".owl")
logger.info('Initiating HGNC Manager')
hgnc_manager = HgncManager()
chebi_manager = ChebiManager()
if not hgnc_manager.is_populated():
click.echo('bio2bel_hgnc was not populated. Populating now.')
hgnc_manager.populate()
for reactome_file in files or REACTOME_FILE_LIST:
t = time.time()
resource_file = os.path.join(REACTOME_FILES, reactome_file)
reactome_to_bel(resource_file, hgnc_manager, chebi_manager)
logger.info(f'Reactome exported file {reactome_file} in {(time.time() - t):.2f} seconds')
@main.command()
@click.option('-e', '--export-folder', default=REACTOME_BEL, show_default=True)
def summarize(export_folder):
"""Summarize the Reactome export."""
click.echo('loading Reactome graphs')
graphs = [
from_pickle(os.path.join(export_folder, fname))
for fname in tqdm(get_paths_in_folder(export_folder))
]
if graphs:
summarize_helper(graphs)
else:
click.echo("Please export Reactome to BEL first. Run 'python3 -m pathme reactome bel' ")
@main.command()
@click.option('-c', '--connection', help="Defaults to {}".format(DEFAULT_CACHE_CONNECTION))
@click.option('-v', '--verbose', is_flag=True)
@click.option('-x', '--only-canonical', default=True, help='Parse only canonical pathways')
@click.option('-e', '--export', default=False, help='Export to datasheet csv and xls')
def statistics(connection, verbose, only_canonical, export):
"""Generate statistics for a database."""
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(name)s - %(message)s")
if verbose:
logger.setLevel(logging.DEBUG)
logger.info('Initiating HGNC Manager')
hgnc_manager = HgncManager()
chebi_manager = ChebiManager()
resource_file = os.path.join(REACTOME_FILES, 'Homo_sapiens.owl')
global_statistics, all_pathways_statistics = get_reactome_statistics(resource_file, hgnc_manager, chebi_manager)
if export:
df = statistics_to_df(all_pathways_statistics)
df.to_excel(os.path.join(DATA_DIR, 'reactome_statistics.xlsx'))
df.to_csv(os.path.join(DATA_DIR, 'reactome_statistics.csv'))
if __name__ == '__main__':
main()
|
the-stack_0_24022 | from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.decomposition import PCA
from sklearn.svm import SVC
from sklearn.metrics import f1_score
import pickle
import numpy as np
from time import time
pic_filename = "data/photo_dataset.pkl"
label_filename = "data/label_dataset.pkl"
classifier_filename = "data/classifier.pkl"
pca_filename = "data/pca.pkl"
def train():
pic_file_handler = open(pic_filename,"rb")
pics = pickle.load(pic_file_handler)
pic_file_handler.close()
label_file_handler = open(label_filename,"rb")
labels = pickle.load(label_file_handler)
label_file_handler.close()
n_samples = pics.shape[0]
n_features = pics.shape[2]
n_classes = max(labels)+1
pics = pics.reshape((n_samples,n_features))
print("Total dataset size")
print ("n_classes: %d" % n_classes)
print ("n_samples: %d" % n_samples)
print ("n_features: %d" % n_features)
###############################################################################
# Split into a training and testing set
x_train, x_test, y_train, y_test = train_test_split(pics, labels, test_size=0.3, random_state=42)
###############################################################################
# Compute a PCA (eigenfaces) on the face dataset (treated as unlabeled
# dataset): unsupervised feature extraction / dimensionality reduction
n_components = 100
print ("Extracting the top %d eigenfaces from %d faces" % (n_components, x_train.shape[0]))
t0 = time()
pca = PCA(n_components=n_components, whiten=True).fit(x_train)
print ("done in %0.3fs" % (time() - t0))
#eigenfaces = pca.components_.reshape((n_components, 240, 360))
print ("Projecting the input data on the eigenfaces orthonormal basis")
t0 = time()
x_train_pca = pca.transform(x_train)
x_test_pca = pca.transform(x_test)
print ("done in %0.3fs" % (time() - t0))
###############################################################################
# Train a SVM classification model
print ("Fitting the classifier to the training set")
t0 = time()
param_grid = {
'C': [1e3, 5e3, 1e4, 5e4, 1e5],
'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1],
}
# for sklearn version 0.16 or prior, the class_weight parameter value is 'auto'
clf = GridSearchCV(SVC(kernel='rbf', class_weight='balanced'), param_grid)
clf = clf.fit(x_train_pca, y_train)
print ("done in %0.3fs" % (time() - t0))
print ("Best estimator found by grid search:")
print (clf.best_estimator_)
###############################################################################
# Quantitative evaluation of the model quality on the test set
print ("Predicting the people names on the testing set")
t0 = time()
y_pred = clf.predict(x_test_pca)
print ("done in %0.3fs" % (time() - t0))
print ("f1 score is: ", f1_score(y_test, y_pred, average = None))
clf_file_handler = open(classifier_filename,"wb")
pickle.dump(clf, clf_file_handler)
clf_file_handler.close()
pca_file_handler = open(pca_filename,"wb")
pickle.dump(pca, pca_file_handler)
pca_file_handler.close()
input("Press any key to continue")
|
the-stack_0_24023 | from django.conf.urls import url, include
from . import views
app_name = 'data_manager'
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^input/', views.input, name='input'),
url(r'^format_information/', views.field_info, name='field_info'),
url(r'^data_to_database/', views.data_to_database, name='data_to_database'),
url(r'^save_fishplot/', views.save_fishplot, name='save_fishplot')
] |
the-stack_0_24024 | # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE
"""Make database for record."""
class Data(object):
"""
Make base class of data structure to store train/test information, for analysis relative performance.
local database will using sqlite.
local file will work with numpy & csv
"""
VERSION = 0.1
def __init__(self):
self.base_fields = (
"env_name", # rl's environment
"alg_name", # algorithm
"train_index", # the index of model saved, user define
"start_time", # this event start time
"sample_step", # the total sample steps used for training,
"train_loss",
"train_reward",
"eval_reward",
"framework",
"comments", # user others' comments
)
def insert_records(self, to_record):
"""
Insert train record.
Args:
----
to_record:
"""
raise NotImplementedError
def get_version(self):
"""Get database version info."""
return self.VERSION
|
the-stack_0_24025 | # coding: utf-8
from nwpc_hpc_model.base.query_category import QueryCategoryList, QueryCategory
from . import record_parser, value_saver
class AixDiskSpaceCategoryList(QueryCategoryList):
def __init__(self):
QueryCategoryList.__init__(self)
@classmethod
def build_from_config(cls, category_list_config):
category_list = AixDiskSpaceCategoryList()
for an_item in category_list_config:
category = QueryCategory(
category_id=an_item['id'],
display_name=an_item['display_name'],
label=an_item['label'],
record_parser_class=getattr(record_parser, an_item['record_parser_class']),
record_parser_arguments=tuple(an_item['record_parser_arguments']),
value_saver_class=getattr(value_saver, an_item['value_saver_class']),
value_saver_arguments=tuple(an_item['value_saver_arguments'])
)
category_list.append(category)
return category_list
|
the-stack_0_24026 | import json
import requests
from django.core.cache import cache
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.template.loader import render_to_string
from annoying.decorators import render_to, ajax_request
from main_site.models import DataPoint, DataPointAggregate, Milestone, Sale, DASHBOARD_DATA_KEY
from main_site.tasks import update_dashboard_cache
@login_required
@render_to("main_site/home.html")
def home(request):
data_points = DataPointAggregate.objects.all()
milestones = Milestone.objects.all()
if not cache.get(DASHBOARD_DATA_KEY):
update_dashboard_cache()
data_string = cache.get(DASHBOARD_DATA_KEY)
return locals()
@login_required
@render_to("main_site/sales_cycle.html")
def sales_cycle(request):
sales = Sale.objects.all()
return locals()
@login_required
@ajax_request
def save_sales(request):
try:
data = json.loads(request.body)
for sale in data:
s = Sale.objects.get(pk=sale["pk"])
old_status = "%s" % s.status
s.status = sale["value"]
s.save()
if old_status != "%s" % sale["value"]:
try:
s = Sale.objects.get(pk=sale["pk"])
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
r = requests.post(
"%s/api/sales-update" % settings.WILL_URL,
headers=headers,
data=json.dumps({
"name": s.name,
"status": s.get_status_display()
})
)
assert r.status_code == 200
except:
import traceback; traceback.print_exc();
pass
return {'success': True}
except:
import traceback; traceback.print_exc();
return {'success': False}
@login_required
@render_to("main_site/intercom.html")
def intercom(request):
return locals()
@login_required
@render_to("main_site/will.html")
def will(request):
return locals()
@login_required
@render_to("main_site/admin.html")
def admin(request):
return locals() |
the-stack_0_24028 | import numpy as np
from gym.spaces import Box
from ..core import ReferenceGenerator
from ..utils import instantiate
import gym_electric_motor as gem
class MultipleReferenceGenerator(ReferenceGenerator, gem.RandomComponent):
"""Reference Generator that combines multiple sub reference generators that all have to reference
different state variables.
"""
def __init__(self, sub_generators, sub_args=None, **kwargs):
"""
Args:
sub_generators(list(str/class/object)): List of keys, classes or objects to instantiate the sub_generators
sub_args(dict/list(dict)/None): (Optional) Arguments to pass to the sub_converters. If not passed all kwargs
will be passed to each sub_generator.
kwargs: All kwargs of the environment. Passed to the sub_generators, if no sub_args are passed.
"""
ReferenceGenerator.__init__(self, **kwargs)
gem.RandomComponent.__init__(self)
self.reference_space = Box(-1, 1, shape=(1,))
if type(sub_args) is dict:
sub_arguments = [sub_args] * len(sub_generators)
elif hasattr(sub_args, '__iter__'):
assert len(sub_args) == len(sub_generators)
sub_arguments = sub_args
else:
sub_arguments = [kwargs] * len(sub_generators)
self._sub_generators = [instantiate(ReferenceGenerator, sub_generator, **sub_arg)
for sub_generator, sub_arg in zip(sub_generators, sub_arguments)]
self._reference_names = []
for sub_gen in self._sub_generators:
self._reference_names += sub_gen.reference_names
def set_modules(self, physical_system):
"""
Args:
physical_system(PhysicalSystem): The physical system of the environment.
"""
super().set_modules(physical_system)
for sub_generator in self._sub_generators:
sub_generator.set_modules(physical_system)
# Ensure that all referenced states are different
assert all(sum([sub_generator.referenced_states.astype(int) for sub_generator in self._sub_generators]) < 2), \
'Some of the passed reference generators share the same reference variable'
ref_space_low = np.concatenate([sub_generator.reference_space.low for sub_generator in self._sub_generators])
ref_space_high = np.concatenate([sub_generator.reference_space.high for sub_generator in self._sub_generators])
self.reference_space = Box(ref_space_low, ref_space_high)
self._referenced_states = np.sum(
[sub_generator.referenced_states for sub_generator in self._sub_generators], dtype=bool, axis=0
)
def reset(self, initial_state=None, initial_reference=None):
# docstring from superclass
refs = np.zeros_like(self._physical_system.state_names, dtype=float)
ref_obs = np.array([])
for sub_generator in self._sub_generators:
ref, ref_observation, _ = sub_generator.reset(initial_state, initial_reference)
refs += ref
ref_obs = np.concatenate((ref_obs, ref_observation))
return refs, ref_obs, None
def get_reference(self, state, **kwargs):
# docstring from superclass
return sum([sub_generator.get_reference(state, **kwargs) for sub_generator in self._sub_generators])
def get_reference_observation(self, state, *_, **kwargs):
# docstring from superclass
return np.concatenate(
[sub_generator.get_reference_observation(state, **kwargs) for sub_generator in self._sub_generators]
)
def seed(self, seed=None):
super().seed(seed)
for sub_generator in self._sub_generators:
if isinstance(sub_generator, gem.RandomComponent):
seed = self._seed_sequence.spawn(1)[0]
sub_generator.seed(seed)
|
the-stack_0_24029 | from keras.models import load_model
from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_v3 import preprocess_input
from keras.preprocessing import image
from keras.models import Model
from keras.preprocessing.sequence import pad_sequences
from keras.utils import to_categorical
import numpy as np
import flask
import io
import config as cf
from word_feature import utils
from googletrans import Translator
# initialize our Flask application and the Keras model
app = flask.Flask(__name__, static_url_path="", static_folder="demo")
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])
app.config['JSON_AS_ASCII'] = False
caption_model = None
vector_model = None
descriptions = utils.read_caption_clean_file('Flickr8k_text/Flickr8k.cleaned.lemma.token.txt')
idxtoword, wordtoidx, vocab_size = utils.map_w2id(descriptions.values())
max_len = utils.calculate_caption_max_len(descriptions.values())
def load_sever_model():
# Load caption model and vector
global caption_model
global vector_model
caption_model = load_model('history/train_lemma.64-3.38.hdf5')
model = InceptionV3(weights='imagenet')
vector_model = Model(model.input, model.layers[-2].output)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def gen_caption(feature):
start = wordtoidx['startseq']
end = wordtoidx['endseq']
current = [start]
length = 0
caption = ''
feature = np.expand_dims(feature, axis=0)
while current[-1]!=end and length <= max_len:
in_seq = pad_sequences([current], maxlen=max_len)[0]
in_seq = np.expand_dims(in_seq, axis=0)
y = caption_model.predict([feature, in_seq])
y = np.argmax(y)
caption = caption + ' ' + idxtoword[y]
current.append(y)
length += 1
return caption.rsplit(' ', 1)[0]
def vectorize_img(file):
img = image.load_img(file, target_size=cf.img_size)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
feature = vector_model.predict(x)
# flatten
feature = np.reshape(feature, cf.vector_len)
return feature
@app.route('/')
def index():
return flask.render_template('index.html', has_result=False)
@app.route("/predict", methods=["POST"])
def predict():
# FIXME: Need front end and handle file not path
data = {"success": False}
error = None
translator = Translator()
# ensure an image was properly uploaded to our endpoint
if flask.request.method == "POST":
if flask.request.form.get("image") != None:
# read the image in PIL format
file = flask.request.form["image"]
print('File: ', file, '\n')
feature = vectorize_img(file)
cap = gen_caption(feature)
data['success'] = True
result = translator.translate(cap, src='en', dest='vi')
data['vi'] = result.text
data['en'] = cap
# return the data dictionary as a JSON response
return flask.jsonify(data)
@app.route('/upload', methods=['POST'])
def upload_file():
data = dict()
#translator = Translator()
if flask.request.method == 'POST':
# check if the post request has the file part
if 'file' not in flask.request.files:
flash('No file part')
return redirect(flask.request.url)
filepath = flask.request.files['file']
# if user does not select file, browser also
# submit a empty part without filename
if filepath.filename == '':
flash('No selected file')
return redirect(flask.request.url)
if filepath and allowed_file(filepath.filename):
filename = filepath.filename
file = 'demo/'+filename
feature = vectorize_img(file)
cap = gen_caption(feature)
data['src'] = filename
#result = translator.translate(cap, src='en', dest='vi')
#data['vi'] = result.text
data['Caption'] = cap
# return flask.jsonify(data)
return flask.render_template('result.html', result=data)
if __name__ == "__main__":
print(("* Loading Keras model and Flask starting server..."
"please wait until server has fully started"))
load_sever_model()
app.run(debug = False, threaded = False)
#################### Test #########################################
#translator = Translator()
#result = translator.translate('A tall man', src='en', dest='vi')
#print(result.text)
####################################################################
|
the-stack_0_24030 | # -*- coding: utf-8 -*-
'''
salt.utils.gzip
~~~~~~~~~~~~~~~
Helper module for handling gzip consistently between 2.7+ and 2.6-
'''
from __future__ import absolute_import
# Import python libs
import gzip
# Import 3rd-party libs
from salt.ext.six import BytesIO
class GzipFile(gzip.GzipFile):
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None):
gzip.GzipFile.__init__(self, filename, mode, compresslevel, fileobj)
### Context manager (stolen from Python 2.7)###
def __enter__(self):
"""Context management protocol. Returns self."""
return self
def __exit__(self, *args):
"""Context management protocol. Calls close()"""
self.close()
def open(filename, mode="rb", compresslevel=9):
if hasattr(gzip.GzipFile, '__enter__'):
return gzip.open(filename, mode, compresslevel)
else:
return GzipFile(filename, mode, compresslevel)
def open_fileobj(fileobj, mode='rb', compresslevel=9):
if hasattr(gzip.GzipFile, '__enter__'):
return gzip.GzipFile(
filename='', mode=mode, fileobj=fileobj,
compresslevel=compresslevel
)
return GzipFile(
filename='', mode=mode, fileobj=fileobj, compresslevel=compresslevel
)
def compress(data, compresslevel=9):
'''
Returns the data compressed at gzip level compression.
'''
buf = BytesIO()
with open_fileobj(buf, 'wb', compresslevel) as ogz:
ogz.write(data)
compressed = buf.getvalue()
return compressed
def uncompress(data):
buf = BytesIO(data)
with open_fileobj(buf, 'rb') as igz:
unc = igz.read()
return unc
|
the-stack_0_24031 | import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="family", parent_name="scattersmith.textfont", **kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs,
)
|
the-stack_0_24032 | # Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
#
#
#
def get_sp_mu2s(sp2nmult,sp_mu2j):
""" Generates list of start indices for atomic orbitals, based on the counting arrays """
sp_mu2s = []
for sp,(nmu,mu2j) in enumerate(zip(sp2nmult,sp_mu2j)):
mu2s = np.zeros((nmu+1), dtype='int64')
for mu in range(nmu):
mu2s[mu+1] = sum(2*mu2j[0:mu+1]+1)
sp_mu2s.append(mu2s)
return sp_mu2s
|
the-stack_0_24035 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from math import sqrt
from compas.geometry._core import subtract_vectors
from compas.geometry._core import sum_vectors
from compas.geometry._core import cross_vectors
from compas.geometry._core import dot_vectors
from compas.geometry._core import scale_vector
from compas.geometry._core import normalize_vector
from compas.geometry._core import length_vector
from compas.geometry._core import length_vector_sqrd
__all__ = [
'circle_from_points',
'circle_from_points_xy',
]
def circle_from_points(a, b, c):
"""Construct a circle from three points.
Parameters
----------
a : sequence of float
XYZ coordinates.
b : sequence of float
XYZ coordinates.
c : sequence of float
XYZ coordinates.
Returns
-------
circle : tuple
Center, radius, normal of the circle.
Notes
-----
For more information, see [1]_.
References
----------
.. [1] Wikipedia. *Circumscribed circle*.
Available at: https://en.wikipedia.org/wiki/Circumscribed_circle.
Examples
--------
>>>
"""
ab = subtract_vectors(b, a)
cb = subtract_vectors(b, c)
ba = subtract_vectors(a, b)
ca = subtract_vectors(a, c)
ac = subtract_vectors(c, a)
bc = subtract_vectors(c, b)
normal = normalize_vector(cross_vectors(ab, ac))
d = 2 * length_vector_sqrd(cross_vectors(ba, cb))
A = length_vector_sqrd(cb) * dot_vectors(ba, ca) / d
B = length_vector_sqrd(ca) * dot_vectors(ab, cb) / d
C = length_vector_sqrd(ba) * dot_vectors(ac, bc) / d
Aa = scale_vector(a, A)
Bb = scale_vector(b, B)
Cc = scale_vector(c, C)
center = sum_vectors([Aa, Bb, Cc])
radius = length_vector(subtract_vectors(a, center))
return center, radius, normal
def circle_from_points_xy(a, b, c):
"""Create a circle from three points lying in the XY-plane
Parameters
----------
a : sequence of float
XY(Z) coordinates of a 2D or 3D point (Z will be ignored).
b : sequence of float
XY(Z) coordinates of a 2D or 3D point (Z will be ignored).
c : sequence of float
XY(Z) coordinates of a 2D or 3D point (Z will be ignored).
Returns
-------
tuple
XYZ coordinates of center in the XY-plane (Z = 0.0) and radius of the circle.
Notes
-----
For more info, see [1]_.
References
----------
.. [1] Wikipedia. *Circumscribed circle*.
Available at: https://en.wikipedia.org/wiki/Circumscribed_circle.
Examples
--------
>>>
"""
ax, ay = a[0], a[1]
bx, by = b[0], b[1]
cx, cy = c[0], c[1]
a = bx - ax
b = by - ay
c = cx - ax
d = cy - ay
e = a * (ax + bx) + b * (ay + by)
f = c * (ax + cx) + d * (ay + cy)
g = 2 * (a * (cy - by) - b * (cx - bx))
if g == 0:
return None
centerx = (d * e - b * f) / g
centery = (a * f - c * e) / g
radius = sqrt((ax - centerx) ** 2 + (ay - centery) ** 2)
return [centerx, centery, 0.0], radius, [0, 0, 1]
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
|
the-stack_0_24038 | # -*- coding: utf-8 -*-
import curses
try:
from dns import resolver
except:
pass
from copy import deepcopy
import random
import json
from os import path
import collections
from operator import itemgetter
try:
import requests
except:
pass
import threading
import logging
from .player import info_dict_to_list
from .cjkwrap import cjklen, PY3
from .countries import countries
from .simple_curses_widgets import SimpleCursesLineEdit, SimpleCursesHorizontalPushButtons, SimpleCursesWidgetColumns, SimpleCursesCheckBox, SimpleCursesCounter, SimpleCursesBoolean, DisabledWidget, SimpleCursesString
from .ping import ping
import locale
locale.setlocale(locale.LC_ALL, '') # set your locale
logger = logging.getLogger(__name__)
RADIO_BROWSER_DISPLAY_TERMS = {
'topvote': 0,
'topclick': 1,
'lastclick': 2,
'lastchange': 3,
'changed': -1,
'improvable': -1,
'broken': -1,
}
RADIO_BROWSER_SEARCH_BY_TERMS = {
'byuuid': -1,
'byname': 6,
'bynameexact': 6,
'bycodec': 16,
'bycodecexact': 16,
'bycountry': 8,
'bycountryexact': 8,
'bycountrycodeexact': -1,
'bystate': 14,
'bystateexact': 14,
'bylanguage': 10,
'bylanguageexact': 10,
'bytag': 12,
'bytagexact': 12,
}
RADIO_BROWSER_SEARCH_SORT_TERMS = {
'random': 1,
'name': 2,
'tags': 3,
'country': 4,
'state': 5,
'language': 6,
'votes': 7,
'clickcount': 8,
'bitrate': 9,
'codec': 10,
}
RADIO_BROWSER_SEARCH_TERMS = {
'name': 6,
'nameExact': 5,
'country': 8,
'countryExact': 7,
'countrycode': -1,
'state': 13,
'stateExact': 14,
'language': 10,
'languageExact': 9,
'tag': 12,
'tagList': 12,
'tagExact': 11,
'codec': 16,
'bitrateMin': -1,
'bitrateMax': -1,
'has_geo_info': -1,
'offset': -1,
}
RADIO_BROWSER_EXACT_SEARCH_TERM = {
'name': 'nameExact',
'country': 'countryExact',
'state': 'stateExact',
'language': 'languageExact',
'tag': 'tagExact'
}
def country_from_server(a_server):
if a_server:
country = a_server.split('.')[0]
up = country[:-1].upper()
if up in countries.keys():
return countries[up]
else:
return country
else:
return None
def capitalize_comma_separated_string(a_string):
sp = a_string.split(',')
for i, n in enumerate(sp):
sp[i] = n.strip().capitalize()
return ', '.join(sp)
class PyRadioStationsBrowser(object):
''' A base class to get results from online radio directory services.
Actual implementations should be subclasses of this one.
'''
BROWSER_NAME = 'PyRadioStationsBrowser'
BASE_URL = ''
AUTO_SAVE_CONFIG = False
TITLE = ''
_parent = _outer_parent = None
_raw_stations = []
_last_search = None
_internal_header_height = 0
_url_timeout = 3
_search_timeout = 3
_vote_callback = None
_sort = _search_win = None
# Normally outer boddy (holding box, header, internal header) is
# 2 chars wider that the internal body (holding the stations)
# This property value is half the difference (normally 2 / 2 = 1)
# Used to chgat the columns' separators in internal body
# Check if the cursor is divided as required and adjust
_outer_internal_body_diff = 2
_outer_internal_body_half_diff = 1
def __init__(self,
config,
config_encoding,
session=None,
search=None,
pyradio_info=None,
search_return_function=None,
message_function=None):
''' Initialize the station's browser.
It should return a valid search result (for example,
www.radio-browser.info implementation, returns 100 stations
sorted by number of votes).
Parameters
----------
search
Search parameters to be used instead of the default.
'''
pass
def __del__(self):
self._sort = None
self._search_win = None
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, val):
self._parent = val
if self._sort:
self._sort._parent = val
@property
def outer_parent(self):
return self._outer_parent
@outer_parent.setter
def outer_parent(self, val):
self._outer_parent = val
if self._search_win:
self._search_win._parent = val
@property
def outer_internal_body_half_diff(self):
return self._outer_internal_body_half_diff
@outer_internal_body_half_diff.setter
def outer_internal_body_half_diff(self, value):
raise ValueError('property is read only')
@property
def internal_header_height(self):
return self._internal_header_height
@internal_header_height.setter
def internal_header_height(self, value):
raise ValueError('property is read only')
@property
def title(self):
return self.TITLE
@title.setter
def title(self, value):
self.TITLE = value
@property
def vote_callback(self):
return self._vote_callback
@vote_callback.setter
def vote_callback(self, val):
self._vote_callback = val
def stations(self, playlist_format=1):
return []
def url(self, id_in_list):
''' Return a station's real/playable url
It has to be implemented only in case have_to_retrieve_url is True
Parameters
----------
id_in_list
id in list of stations (0..len-1)
Returns
-------
Real/playable url or '' if failed (string)
'''
return ''
def set_played(self, id_in_list, played):
''' Note that a player has been played.
Parameters
----------
id_in_list
id in list of stations (0..len-1)
played
True or False
'''
pass
def search(self, go_back_in_history=True):
return []
def set_encoding(self, id_in_list, new_encoding):
return
def format_station_line(self, id_in_list, pad, width):
return ''
def click(self, a_station):
pass
def vote(self, a_station):
return True
def save_config(self):
''' setting AUTO_SAVE_CONFIG to True here, so that any
calling functions will call save_config (which does
nothing) directly (without displaying a confirm
window).
Subclasses should set it to False (if a confirmation
window is needed)
'''
self.AUTO_SAVE_CONFIG = True
return True
def is_config_dirty(self):
return False
class RadioBrowser(PyRadioStationsBrowser):
BROWSER_NAME = 'RadioBrowser'
BASE_URL = 'api.radio-browser.info'
TITLE = 'RadioBrowser '
browser_config = _config_win = None
_headers = {'User-Agent': 'PyRadio/dev',
'Content-Type': 'application/json'}
_raw_stations = []
# the output format to use based on window width
# Default value: -1
# Possible values: 0..5
# Look at format_station_line() for info
_output_format = -1
_info_len = []
_info_name_len = 0
_raw_stations = []
_internal_header_height = 1
_search_history = []
_search_history_index = -1
_columns_width = {
'votes': 7,
'clickcount': 7,
'bitrate': 7,
'country': 18,
'language': 15,
'state': 18,
'tags': 20,
'codec': 5
}
_server_selection_window = None
_dns_info = None
search_by = _old_search_by = None
_default_max_number_of_results = 100
_default_server = ''
_default_ping_count = 1
_default_ping_timeout = 1
_do_ping = False
keyboard_handler = None
''' _search_history_index - current item in this browser - corresponds to search window _history_id
_default_search_history_index - autoload item in this browser - corresponds to search window _default_history_id
'''
_search_history_index = 1
_default_search_history_index = 1
def __init__(self,
config,
config_encoding,
session=None,
search=None,
pyradio_info=None,
search_return_function=None,
message_function=None):
'''
When first_search is True, it means that we are opening
the browser. If empty result is returned by the first
browser search, we show an empty stations' list.
if it is False and an empty result is returned by the first
browser search, which means we are already in the browser's
search screen, we just display the 'no result message'.
All of this is done at radio.py
'''
self.first_search = True
self._cnf = config
self.browser_config = RadioBrowserConfig(self._cnf.stations_dir)
# logger.error('DE AUTO_SAVE_CONFIG = {}'.format(self.AUTO_SAVE_CONFIG))
if session:
self._session = session
else:
self._session = requests.Session()
self._pyradio_info = pyradio_info.strip()
if self._pyradio_info:
self._headers['User-Agent'] = self._pyradio_info.replace(' ', '/')
self._config_encoding = config_encoding
self._message_function = message_function
self._search_return_function = search_return_function
def reset_dirty_config(self):
self.browser_config.dirty = False
def is_config_dirty(self):
return self.browser_config.dirty if self.browser_config else False
def initialize(self):
self._dns_info = RadioBrowserDns()
return self.read_config()
@property
def server(self):
return self._server
@property
def add_to_title(self):
return self._server.split('.')[0]
def _get_title(self):
self.TITLE = 'RadioBrowser ({})'.format(country_from_server(self._server))
def stations(self, playlist_format=1):
''' Return stations' list (in PyRadio playlist format)
Parameters
----------
playlist_format
0: station name, url
1: station name, url, encoding
2: station name, url, encoding, browser flag (default)
'''
ret = []
for n in self._raw_stations:
if playlist_format == 0:
ret.append([n['name'], n['url']])
elif playlist_format == 1:
enc = '' if n['encoding'] == self._config_encoding else n['encoding']
ret.append([n['name'], n['url'], enc])
else:
enc = '' if n['encoding'] == self._config_encoding else n['encoding']
ret.append([n['name'], n['url'], enc, ''])
return ret
def save_config(self):
''' just an interface to config class save_config
'''
if self._config_win:
return self._config_win.save_config()
else:
return self.browser_config.save_config(
self.AUTO_SAVE_CONFIG,
self._search_history,
self._default_search_history_index,
self._default_server if 'Random' not in self._default_server else '',
self._default_ping_count,
self._default_ping_timeout,
self._default_max_number_of_results)
def url(self, id_in_list):
''' Get a station's url using resolved_url
Parameters
----------
id_in_list
id in list of stations (0..len-1)
Returns
-------
url or '' if failed
'''
if self._raw_stations:
if id_in_list < len(self._raw_stations):
if self._raw_stations[id_in_list]['url_resolved']:
return self._raw_stations[id_in_list]['url_resolved']
else:
return self._raw_stations[id_in_list]['url']
return ''
def click(self, a_station):
def do_click(a_station, a_station_uuid):
url = 'http://' + self._server + '/json/url/' + a_station_uuid
try:
r = self._session.get(url=url, headers=self._headers, timeout=(self._search_timeout, 2 * self._search_timeout))
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Station click result: "{}"'.format(r.text))
# if '"ok":true' in r.text:
# self._raw_stations[a_station]['clickcount'] += 1
except:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Station click failed...')
threading.Thread(target=do_click, args=(a_station, self._raw_stations[a_station]['stationuuid'], )).start()
def vote(self, a_station):
url = 'http://' + self._server + '/json/vote/' + self._raw_stations[a_station]['stationuuid']
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Voting for: {}'.format(self._raw_stations[a_station]))
logger.debug('Voting url: ' + url)
try:
r = self._session.get(url=url, headers=self._headers, timeout=(self._search_timeout, 2 * self._search_timeout))
message = json.loads(r.text)
self.vote_result = self._raw_stations[a_station]['name'], message['message'][0].upper() + message['message'][1:]
# logger.error('DE voting result = {}'.format(self.vote_result))
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Voting result: "{}"'.format(message))
ret = message['ok']
except:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Station voting failed...')
self.vote_result = self._raw_stations[a_station]['name'], 'Voting for station failed'
ret = False
if ret:
self._raw_stations[a_station]['votes'] += 1
if self._vote_callback:
self._vote_callback()
def get_info_string(self, a_station, max_width=60):
guide = [
('Name', 'name'),
('URL', 'url'),
('Resolved URL', 'url_resolved'),
('Website', 'homepage'),
('Tags', 'tags'),
('Votes', 'votes'),
('Clicks', 'clickcount'),
('Country', 'country'),
('State', 'state'),
('Language', 'language'),
('Bitrate', 'bitrate'),
('Codec', 'codec')
]
if self._raw_stations[a_station]['url'] == self._raw_stations[a_station]['url_resolved']:
guide.pop(2)
info = collections.OrderedDict()
for n in guide:
try:
info[n[0]] = str(self._raw_stations[a_station][n[1]])
except:
''' do this here for python 2
TODO: make the previous statement work on py2
'''
info[n[0]] = self._raw_stations[a_station][n[1]].encode('utf-8', 'replace')
if n[1] == 'bitrate':
info[n[0]] += ' kb/s'
a_list = []
fix_highlight = []
a_list = info_dict_to_list(info, fix_highlight, max_width)
ret = '|' + '\n|'.join(a_list)
# logger.error('DE \n\n{}\n\n'.format(ret))
sp = ret.split('\n')
wrong_wrap = -1
for i, n in enumerate(sp):
# logger.exception('DE {0}: "{1}"'.format(i, n))
if wrong_wrap == i:
sp[i] = n.replace('|', '')
sp[i-1] += sp[i].replace('_', '')
sp[i] = '*' + sp[i]
wrong_wrap = -1
else:
if ': ' not in n:
sp[i] = n[1:]
if n[-1] == ':':
''' wrong wrapping! '''
wrong_wrap = i + 1
sp[i] += '|'
if sp[i][-1] != ' ':
sp[i] += ' '
if sp[i][0] != '|':
sp[i] = '|' + sp[i]
for i, n in enumerate(sp):
if n[0] == '*':
sp.pop(i)
ret = '\n'.join(sp).replace(': |', ':| ').replace(': ', ':| ')
# logger.error('DE \n\n{}\n\n'.format(ret))
return ret, ''
def search(self, go_back_in_history=True):
''' Search for stations with parameters.
Result is limited to 100 stations by default (use the
'limit' parameter to change it).
Parameters
----------
data
A dictionary containing the fields described at
http://www.radio-browser.info/webservice/#Advanced_station_search
Returns
-------
self._raw_stations
A dictionary with a subset of returned station data.
Its format is:
name : station name
id : station id
url : station url
resolved_url : station resolved_url
tags : starion tags
bitrate : station bitrate
hls : HLS status
votes : station votes
clickcount : station clicks
country : station country
state : station state
language : station language
codec : station codec
encoding : station encoding ('' means utf-8)
'''
if self._message_function:
self._message_function()
self.search_by = self._old_search_by = None
self._get_search_elements(
self._search_history[self._search_history_index]
)
self._old_search_by = self.search_by
self._sort = None
url = self._format_url(self._search_history[self._search_history_index])
post_data = {}
if self._search_history[self._search_history_index]['post_data']:
post_data = deepcopy(self._search_history[self._search_history_index]['post_data'])
self._output_format = -1
if self._search_type > 0:
if 'limit' not in post_data.keys() and self._default_max_number_of_results > 0:
post_data['limit'] = self._default_max_number_of_results
else:
if post_data['limit'] == '0':
post_data.pop('limit')
if 'hidebroken' not in post_data.keys():
post_data['hidebroken'] = 'true'
self._log_query(url, post_data)
''' keep server results here '''
new_raw_stations = []
try:
r = self._session.get(url=url, headers=self._headers, params=post_data, timeout=(self._search_timeout, 2 * self._search_timeout))
self._log_response(r)
r.raise_for_status()
new_raw_stations = self._extract_data(json.loads(r.text))
# logger.error('DE \n\n{}'.format(new_raw_stations))
ret = True, len(new_raw_stations), go_back_in_history
except requests.exceptions.RequestException as e:
if logger.isEnabledFor(logging.INFO):
logger.info(e)
# self._raw_stations = []
ret = False, 0, go_back_in_history
''' use server result '''
if len(new_raw_stations) > 0:
self._raw_stations = new_raw_stations[:]
if self._search_return_function:
self._search_return_function(ret)
def _log_query(self, url, post_data):
if logger.isEnabledFor(logging.INFO):
try:
logger.info('>>> RadioBrowser Query:')
logger.info(' search term = {}'.format(self._search_history[self._search_history_index]))
logger.info(' url = "{}"'.format(url))
logger.info(' headers = "{}"'.format(self._headers))
logger.info(' post_data = "{}"'.format(post_data))
except:
pass
def _log_response(self, r):
if logger.isEnabledFor(logging.INFO):
try:
logger.info('>>> RadioBrowser Response Query:')
logger.info(' url = "{}"'.format(r.request.url))
logger.info(' body = "{}"'.format(r.request.body))
logger.info(' headers = "{}"'.format(r.request.headers))
except:
pass
def _get_search_elements(self, a_search):
'''
get "by search" and "reverse"
values from a search dict.
To be used with the sort function
'''
if logger.isEnabledFor(logging.DEBUG):
logger.debug('_get_search_elements() :search term is\n\t"{}"'.format(a_search))
a_term = a_search['term']
p_data = a_search['post_data']
self.search_by = None
self.reverse = False
if a_search['post_data']:
if 'order' in a_search['post_data'].keys():
self.search_by = a_search['post_data']['order']
if 'reverse' in a_search['post_data']:
self.reverse = True if a_search['post_data']['reverse'] == 'true' else False
if logger.isEnabledFor(logging.DEBUG):
logger.debug('searching by was: "{}"'.format(self.search_by))
if self.search_by is None:
a_type = a_search['type']
if a_type == 'byname':
self.search_by = 'name'
elif a_type == 'topvote':
self.search_by = 'votes'
logger.error('DE search by is votes')
elif a_type == 'clickcount':
self.search_by = 'clickcount'
elif a_type == 'bitrate':
self.search_by = 'bitrate'
elif a_type == 'codec':
self.search_by = 'codec'
elif a_type == 'country':
self.search_by = 'country'
elif a_type == 'state':
self.search_by = 'state'
elif a_type == 'language':
self.search_by = 'language'
elif a_type == 'tags':
self.search_by = 'tags'
if self.search_by is None:
if p_data:
if 'name' in p_data.keys():
self.search_by = 'name'
if logger.isEnabledFor(logging.DEBUG):
logger.error('p_data searching by: "name" (default)')
if self.search_by is None:
self.search_by = 'name'
if logger.isEnabledFor(logging.DEBUG):
logger.error('forced searching by: "name" (default)')
if logger.isEnabledFor(logging.DEBUG):
logger.debug('searching by: "{}"'.format(self.search_by))
def get_next(self, search_term, start=0, stop=None):
if search_term:
for n in range(start, len(self._raw_stations)):
if self._search_in_station(search_term, n):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('forward search term "{0}" found at {1}'.format(search_term, n))
return n
""" if not found start from list top """
for n in range(0, start):
if self._search_in_station(search_term, n):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('forward search term "{0}" found at {1}'.format(search_term, n))
return n
""" if not found return None """
if logger.isEnabledFor(logging.DEBUG):
logger.debug('forward search term "{}" not found'.format(search_term))
return None
else:
return None
def get_previous(self, search_term, start=0, stop=None):
if search_term:
for n in range(start, -1, -1):
if self._search_in_station(search_term, n):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('backward search term "{0}" found at {1}'.format(search_term, n))
return n
""" if not found start from list end """
for n in range(len(self._raw_stations) - 1, start, -1):
if self._search_in_station(search_term, n):
if logger.isEnabledFor(logging.DEBUG):
logger.debug('backward search term "{0}" found at {1}'.format(search_term, n))
return n
""" if not found return None """
if logger.isEnabledFor(logging.DEBUG):
logger.debug('backward search term "{}" not found'.format(search_term))
return None
else:
return None
def _search_in_station(self, a_search_term, a_station):
guide = (
'name',
'country',
'codec',
'tags',
'bitrate',
'language'
)
for n in guide:
source = self._raw_stations[a_station][n]
if isinstance(source, int):
''' this is one of the numerical data '''
source = str(source)
if a_search_term.lower() in source.lower():
return True
return False
def _format_url(self, a_search):
''' work on a copy, this way we can change it and not
break "term to widgets" assignment
'''
a_search_copy = deepcopy(a_search)
if a_search_copy['type'] in RADIO_BROWSER_DISPLAY_TERMS.keys():
url = 'http://{0}{1}'.format(
self._server,
'/json/stations/{}'.format(a_search_copy['type'])
)
if a_search_copy['term'] not in ('', '0'):
url += '/{}'.format(a_search_copy['term'])
self._search_type = 0
elif a_search_copy['type'] in RADIO_BROWSER_SEARCH_BY_TERMS.keys():
if a_search_copy['type'].startswith('bycountry') and \
len(a_search_copy['term']) == 2:
a_search_copy['type'] = 'bycountrycodeexact'
a_search_copy['term'] = a_search_copy['term'].upper()
url = 'http://{0}{1}/{2}'.format(
self._server,
'/json/stations/{}'.format(a_search_copy['type']),
a_search_copy['term']
)
self._search_type = 1
elif a_search_copy['type'] == 'search':
url = 'http://{0}{1}'.format(
self._server,
'/json/stations/search'
)
if a_search_copy['post_data']:
if 'country' in a_search_copy['post_data']:
''' look for country code '''
if len(a_search_copy['post_data']['country']) == 2:
''' revert to countrcode '''
a_search_copy['post_data']['countrycode'] = a_search_copy['post_data']['country'].upper()
try:
a_search_copy['post_data'].pop('country', None)
# a_search_copy['post_data'].pop('countryExact', None)
except KeyError:
pass
self._search_type = 2
return url
def format_empty_line(self, width):
if self._output_format == 0:
return -1, ' '
info = (
(),
('bitrate', ),
('votes', 'bitrate'),
('votes', 'clickcount', 'bitrate'),
('votes', 'clickcount', 'bitrate', 'country'),
('votes', 'clickcount', 'bitrate', 'country', 'language'),
('votes', 'clickcount', 'bitrate', 'country', 'state', 'language'),
('votes', 'clickcount', 'bitrate', 'codec', 'country', 'state', 'language', 'tags')
)
out = ['', '']
i_out = []
for i, n in enumerate(info[self._output_format]):
i_out.append(u'│' + ' ' * self._columns_width[n])
out[1] = ''.join(i_out)
name_width = width-len(out[1])
out[0] = ' ' * name_width
if PY3:
return -1, '{0}{1}'.format(*out)
else:
return -1 , '{0}{1}'.format(
out[0],
out[1].encode('utf-8', 'replace')
)
def format_station_line(self, id_in_list, pad, width):
''' Create a formated line for a station
Parameters
----------
id_in_list
id in list of stations (0..len-1)
pad
length of NUMBER
width
final length of created string
Returns
-------
A string of the following format:
NUMBER. STATION NAME [INFO]
where:
NUMBER
Right padded counter (id_in_list + 1)
STATION NAME
Left padded station name
INFO
Station info. Depending on window width, it can be:
[Votes: XX, Clicks: XX, Bitrate: XXXkb, Country: XXXX],
[Votes: XX, Clicks: XX, Bitrate: XXXkb],
[XXXX v, XXXX, cl, XXXkb],
[Bitrate: XXXkb], or
empty string
'''
info = (u'',
u' {0}{1}kb',
u' {0}{1}│{2}kb',
u' {0}{1}│{2}│{3}kb',
u' {0}{1}│{2}│{3}kb│{4}',
u' {0}{1}│{2}│{3}kb│{4}│{5}',
u' {0}{1}│{2}│{3}kb│{4}│{5}│{6}',
u' {0}{1}│{2}│{3}kb│{4}│{5}│{6}│{7}│{8}',
)
self._get_output_format(width)
# logger.error('DE self._output_format = {}'.format(self._output_format))
out = ['{0}. '.format(str(id_in_list + 1).rjust(pad)), '', '']
# format info field
pl = u'├' if self._raw_stations[id_in_list]['played'] else u'│'
if self._output_format == 7:
# full with state
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['clickcount']).rjust(self._columns_width['clickcount'])[:self._columns_width['clickcount']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2],
self._raw_stations[id_in_list]['codec'].rjust(self._columns_width['codec'])[:self._columns_width['codec']],
self._fix_cjk_string_width(self._raw_stations[id_in_list]['country'].ljust(self._columns_width['country']), self._columns_width['country']),
self._fix_cjk_string_width(self._raw_stations[id_in_list]['state'].ljust(self._columns_width['state']), self._columns_width['state']),
self._fix_cjk_string_width(self._raw_stations[id_in_list]['language'].ljust(self._columns_width['language']), self._columns_width['language']),
self._fix_cjk_string_width(self._raw_stations[id_in_list]['tags'].ljust(self._columns_width['tags']), self._columns_width['tags'])
)
if self._output_format == 6:
# full with state
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['clickcount']).rjust(self._columns_width['clickcount'])[:self._columns_width['clickcount']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2],
self._fix_cjk_string_width(self._raw_stations[id_in_list]['country'].ljust(self._columns_width['country']), self._columns_width['country']),
self._fix_cjk_string_width(self._raw_stations[id_in_list]['state'].ljust(self._columns_width['state']), self._columns_width['state']),
self._fix_cjk_string_width(self._raw_stations[id_in_list]['language'].ljust(self._columns_width['language']), self._columns_width['language']),
self._fix_cjk_string_width(self._raw_stations[id_in_list]['tags'].ljust(self._columns_width['tags']), self._columns_width['tags'])
)
if self._output_format == 5:
# full with state
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['clickcount']).rjust(self._columns_width['clickcount'])[:self._columns_width['clickcount']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2],
self._fix_cjk_string_width(self._raw_stations[id_in_list]['country'].ljust(self._columns_width['country']), self._columns_width['country']),
self._fix_cjk_string_width(self._raw_stations[id_in_list]['language'].ljust(self._columns_width['language']), self._columns_width['language']),
)
if self._output_format == 4:
# full or condensed info
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['clickcount']).rjust(self._columns_width['clickcount'])[:self._columns_width['clickcount']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2],
self._fix_cjk_string_width(self._raw_stations[id_in_list]['country'].ljust(self._columns_width['country']), self._columns_width['country']),
)
elif self._output_format == 2:
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2]
)
elif self._output_format == 3:
out[2] = ' ' + info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['votes']).rjust(self._columns_width['votes'])[:self._columns_width['votes']],
str(self._raw_stations[id_in_list]['clickcount']).rjust(self._columns_width['clickcount'])[:self._columns_width['clickcount']],
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2]
)
elif self._output_format == 1:
# Bitrate only
out[2] = info[self._output_format].format(
pl,
str(self._raw_stations[id_in_list]['bitrate']).rjust(self._columns_width['bitrate']-2)[:self._columns_width['bitrate']-2]
)
name_width = width-len(out[0])-cjklen(out[2])
# logger.error('width-len(out[0])-len(out[2]) - {0}-{1}-{2} = {3}'.format(width, len(out[0]), len(out[2]), name_width))
out[1] = self._fix_cjk_string_width(self._raw_stations[id_in_list]['name'].ljust(name_width)[:name_width], name_width)
if PY3:
# if pl == '╞':
# out[2] += '╡'
return (self._raw_stations[id_in_list]['played'],
'{0}{1}{2}'.format(*out))
else:
# on python 2, strings are already in utf-8
return (self._raw_stations[id_in_list]['played'],
'{0}{1}{2}'.format(
out[0].encode('utf-8', 'replace'),
out[1].encode('utf-8', 'replace'),
out[2].encode('utf-8', 'replace')))
def set_encoding(self, id_in_list, new_encoding):
if id_in_list < len(self._raw_stations):
self._raw_stations[id_in_list]['encoding'] = new_encoding
if logger.isEnabledFor(logging.DEBUG):
logger.debug('New encoding set to "{0}" for station "{1}"'.format(new_encoding, self._raw_stations[id_in_list]['name']))
def _fix_cjk_string_width(self, a_string, width):
while cjklen(a_string) > width:
a_string = a_string[:-1]
while cjklen(a_string) < width:
a_string += ' '
return a_string
def _extract_data(self, a_search_result):
ret = []
self._max_len = [0, 0]
if a_search_result:
for n in a_search_result:
ret.append({'name': n['name'].replace(',', ' ')})
ret[-1]['stationuuid'] = n['stationuuid']
ret[-1]['url'] = n['url']
ret[-1]['url_resolved'] = n['url_resolved']
ret[-1]['url'] = n['url']
ret[-1]['played'] = False
ret[-1]['hls'] = n['hls']
ret[-1]['stationuuid'] = n['stationuuid']
ret[-1]['countrycode'] = n['countrycode']
ret[-1]['country'] = n['country']
ret[-1]['codec'] = n['codec']
ret[-1]['state'] = n['state']
ret[-1]['tags'] = n['tags'].replace(',', ', ')
ret[-1]['homepage'] = n['homepage']
if isinstance(n['clickcount'], int):
# old API
ret[-1]['votes'] = n['votes']
ret[-1]['clickcount'] = n['clickcount']
ret[-1]['bitrate'] = n['bitrate']
else:
# new API
ret[-1]['votes'] = int(n['votes'])
ret[-1]['clickcount'] = int(n['clickcount'])
ret[-1]['bitrate'] = int(n['bitrate'])
ret[-1]['language'] = capitalize_comma_separated_string(n['language'])
ret[-1]['encoding'] = ''
self._get_max_len(ret[-1]['votes'],
ret[-1]['clickcount'])
return ret
def _get_max_len(self, votes, clicks):
''' Calculate the maximum length of numeric_data / country
Parameters
----------
votes
Number of station's vote (string)
clicks
Number of station's clicks (string)
numeric_data
Returns
-------
self._max_len
A list [max votes length,
max clickcount length]
'''
numeric_data = (votes, clicks)
# logger.error('DE numeric_data = {}'.format(numeric_data))
min_data = (6, 7)
for i, x in enumerate(numeric_data):
n = str(x)
if len(n) > self._max_len[i]:
self._max_len[i] = len(n) if len(n) > min_data[i] else min_data[i]
def _get_output_format(self, width):
''' Return output format based on window width
Paramaters
----------
width
Window width
Returns
-------
self._output_format
A number 0..5
'''
# now_width = get_terminal_size().columns - 2
if width <= 50:
self._output_format = 0
elif width < 57:
self._output_format = 1
elif width < 65:
self._output_format = 2
elif width < 80:
self._output_format = 3
elif width < 95:
self._output_format = 4
elif width < 120:
self._output_format = 5
elif width < 145:
self._output_format = 6
else:
self._output_format = 7
def _populate_columns_separators(self, a_tuple, width):
ret = []
for i, n in enumerate(a_tuple):
if i == 0:
# logger.error('DE {0} - {1} = {2} - {3}'.format(width, self._columns_width[n], width-self._columns_width[n]-2, n))
ret.append(width - self._columns_width[n] - 2)
else:
# logger.error('{0} -1 - {1} = {2} - {3}'.format(ret[-1], self._columns_width[n], ret[-1] - 1 - self._columns_width[n], n))
ret.append(ret[-1] - 1 - self._columns_width[n])
ret.reverse()
# logger.error('DE \n\nret = {}\n\n'.format(ret))
return ret
def get_columns_separators(self,
width,
use_old_output_format=False,
adjust=0,
adjust_for_body=False,
adjust_for_header=False,
):
''' Calculates columns separators for a given width
based on self._output_format.
Parameters
----------
width
Window width to use for the calculation.
use_old_output_format
If True, do not calculate self._output_format
(use what's already calculated).
adjust
Delete adjust from the output
Example:
if the output was [55, 67]
and adjust was 2
the output would become [53, 65]
adjust_for_header
Delete self._outer_internal_body_diff from output
This is to be used for displaying the internal header
adjust_for_body
Delete self._outer_internal_body_half_diff from output
This is to be used for changing columns' separators
color, when displaying body lines (stations' lines).
IMPORTANT
---------
The adjust* parameters are mutually exclusive, which means
that ONLY ONE of them can be used at any given call to the
function. If you fail to comply, the result will be wrong.
Returns
-------
A list containing columns_separotors (e.g. [55, 65]).
'''
columns_separotors = []
if not use_old_output_format:
self._get_output_format(width)
if self._output_format == 0:
columns_separotors = []
elif self._output_format == 1:
columns_separotors = [width - self._columns_width['bitrate']]
elif self._output_format == 2:
columns_separotors = self._populate_columns_separators(('bitrate', 'votes'), width)
elif self._output_format == 3:
columns_separotors = self._populate_columns_separators(('bitrate', 'clickcount', 'votes'), width)
elif self._output_format == 4:
columns_separotors = self._populate_columns_separators(('country', 'bitrate', 'clickcount', 'votes'), width)
elif self._output_format == 5:
columns_separotors = self._populate_columns_separators(('language', 'country', 'bitrate', 'clickcount', 'votes'), width)
elif self._output_format == 6:
columns_separotors = self._populate_columns_separators(('language', 'state', 'country', 'bitrate', 'clickcount', 'votes'), width)
else:
columns_separotors = self._populate_columns_separators(('tags', 'language', 'state', 'country', 'codec', 'bitrate', 'clickcount', 'votes'), width)
if adjust_for_header and self._output_format == 1:
columns_separotors[0] -= self._outer_internal_body_diff
if adjust_for_body:
if self._output_format == 1:
columns_separotors[0] -= self._outer_internal_body_half_diff
else:
for n in range(0, len(columns_separotors)):
columns_separotors[n] += self._outer_internal_body_half_diff
if adjust > 0:
for n in range(0, len(columns_separotors)):
columns_separotors[n] -= adjust
return columns_separotors
def get_history_from_search(self):
if self._search_win:
self._search_history_index, self._default_search_history_index, history = self._search_win.get_history()
logger.error('DE search_history_index = {}'.format(self._search_history_index))
logger.error('DE search_default_history_index = {}'.format(self._default_search_history_index))
logger.error('DE history = {}'.format(history))
self._search_history = deepcopy(history)
def get_internal_header(self, pad, width):
guide = {
'name': 'Name',
'votes': ' Votes',
'clickcount': ' Clicks',
'bitrate': 'Bitrate',
'codec': 'Codec',
'country': 'Country',
'state': 'State',
'language': 'Language',
'tags': 'Tags',
}
# logger.error('DE search = {}'.format(self._search_history[self._search_history_index]))
reset_search_elements = False
if self.search_by is None:
reset_search_elements = True
self._get_search_elements(self._search_history[self._search_history_index])
# logger.error('DE search by = {}'.format(self.search_by))
columns = ((),
('Bitrate', ),
(' Votes', 'Bitrate'),
(' Votes', ' Clicks', 'Bitrate'),
(' Votes', ' Clicks', 'Bitrate', 'Country'),
(' Votes', ' Clicks', 'Bitrate', 'Country', 'Language'),
(' Votes', ' Clicks', 'Bitrate', 'Country', 'State', 'Language'),
(' Votes', ' Clicks', 'Bitrate', 'Codec', 'Country', 'State', 'Language', 'Tags')
)
columns_separotors = self.get_columns_separators(width, use_old_output_format=True)
if self._output_format == 1:
columns_separotors[0] -= 2
title = '# '.rjust(pad), ' Name '
if reset_search_elements:
self._old_search_by = self.search_by
# logger.error('DE search by = {}'.format(self.search_by))
# logger.error('DE Looking for: "{}"'.format(guide[self.search_by]))
# logger.error('DE Names = {}'.format(columns[self._output_format]))
if guide[self.search_by] == 'Name':
highlight = -2
else:
try:
highlight = columns[self._output_format].index(guide[self.search_by])
except:
highlight = -1
return highlight, ((title, columns_separotors, columns[self._output_format]), )
def select_servers(self, with_config=False, return_function=None, init=False):
''' RadioBrowser select servers '''
if init:
self._server_selection_window = None
if self._server_selection_window is None:
self._old_server = self._server
if with_config:
self._server_selection_window = RadioBrowserServersSelect(
self._config_win._win,
self._dns_info.server_urls,
self._config_win._params[0]['server'],
self._config_win._params[0]['ping_count'],
self._config_win._params[0]['ping_timeout'],
Y=11, X=19,
show_random=True,
return_function=return_function)
else:
self._server_selection_window = RadioBrowserServersSelect(
self.parent,
self._dns_info.server_urls,
self._server,
self._default_ping_count,
self._default_ping_timeout,
return_function=return_function
)
else:
self._server_selection_window.set_parent(self.parent)
self.keyboard_handler = self._server_selection_window
self.server_window_from_config = with_config
self._server_selection_window.show()
return self._server_selection_window
def sort(self):
'''
Create and show the Sort window
'''
if self._sort is None:
self._get_search_elements(
self._search_history[self._search_history_index]
)
self._sort = RadioBrowserSort(
parent=self.parent,
search_by=self.search_by
)
self.keyboard_handler = self._sort
self._sort.show()
def _calculate_do_ping(self):
if self._default_ping_count == 0 or self._default_ping_timeout == 0:
self._do_ping = False
else:
self._do_ping = True
return self._do_ping
def read_config(self):
''' RadioBrowser read config '''
self.browser_config.read_config()
self.AUTO_SAVE_CONFIG = self.browser_config.auto_save
self._default_max_number_of_results = int(self.browser_config.limit)
self._default_search_history_index = self._search_history_index = self.browser_config.default
self._search_history = self.browser_config.terms
self._default_server = self.browser_config.server
self._default_ping_count = self.browser_config.ping_count
self._default_ping_timeout = self.browser_config.ping_timeout
self._calculate_do_ping()
self._server = None
if self._default_server:
if logger.isEnabledFor(logging.INFO):
logger.info('RadioBrowser: pinging user default server: ' + self._default_server)
if self._do_ping:
if ping(self._default_server,
count=self._default_ping_count,
timeout_in_seconds=self._default_ping_timeout) == 1:
self._server = self._default_server
if logger.isEnabledFor(logging.INFO):
logger.info('ping was successful!')
logger.info('RadioBrowser: server is set by user: ' + self._server)
else:
self._server = self._default_server
if not self._server:
random_server = self._dns_info.give_me_a_server_url()
# logger.error('DE random_server = {}'.format(random_server))
if random_server is None:
if logger.isEnabledFor(logging.INFO):
logger.info('RadioBrowser: No server is reachable!')
return False
self._server = random_server
if logger.isEnabledFor(logging.INFO):
logger.info('RadioBrowser: using random server: ' + self._server)
if logger.isEnabledFor(logging.INFO):
logger.info('RadioBrowser: result limit = {}'.format(self._default_max_number_of_results))
logger.info('RadioBrowser: default search term = {}'.format(self._default_search_history_index))
logger.info('RadioBrowser: search history')
for i, n in enumerate(self._search_history):
logger.info(' {0}: {1}'.format(i, n))
self._get_title()
return True
def keypress(self, char):
''' RadioBrowser keypress
Returns:
-1: Cancel
0: Done, result is in ....
1: Continue
'''
# logger.error('DE keyboard handler = {}'.format(self.keyboard_handler))
ret = self.keyboard_handler.keypress(char)
# logger.error('DE online_browser ret = {}'.format(ret))
if ret == 0:
if self.keyboard_handler == self._sort:
self.search_by = self._sort.search_by
if self.search_by == self._old_search_by:
self.reverse = not self.reverse
else:
self.reverse = False
if self.search_by != self._old_search_by:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('search by = "{}"'.format(self.search_by))
''' set reverse to True for numerical values
when changing sort type
'''
if self.search_by in (
'votes',
'clickcount',
'bitrate'
):
self.reverse = True
if logger.isEnabledFor(logging.DEBUG):
logger.debug('settng reverse to {}'.format(self.reverse))
self._raw_stations = sorted(self._raw_stations, key=itemgetter(self.search_by), reverse=self.reverse)
self._old_search_by = self.search_by
elif self.keyboard_handler == self._server_selection_window:
if ret == 0:
self._server = self._server_selection_window.server
if logger.isEnabledFor(logging.INFO):
logger.info('RadioBrowser: user selected server is ' + self._server)
self._get_title()
return ret
def line_editor_has_focus(self):
if self._search_win:
return self._search_win.line_editor_has_focus()
return False
def do_search(self, parent=None, init=False):
if init:
self._search_win = RadioBrowserSearchWindow(
parent=parent,
config=self.browser_config,
limit=self._default_max_number_of_results,
init=init
)
self._search_win.set_search_history(
self._default_search_history_index,
self._search_history_index,
self._search_history, init)
self.keyboard_handler = self._search_win
self._search_win.show()
def show_config(self, parent=None, init=False):
if init:
self._config_win = RadioBrowserConfigWindow(
parent=parent,
config=self.browser_config,
dns_info=self._dns_info,
current_auto_save=self.AUTO_SAVE_CONFIG,
current_server=self._default_server,
current_history=self._search_history,
current_history_id=self._default_search_history_index,
current_limit=self._default_max_number_of_results,
current_ping_count=self._default_ping_count,
current_ping_timeout=self._default_ping_timeout,
init=init
)
self.keyboard_handler = self._config_win
self._config_win.show(parent=parent)
class RadioBrowserConfig(object):
''' RadioBrowser config calss
Parameters:
auto_save : Boolean
server : string
default : int (id on terms)
ping_timeout : int (ping timeout is seconds)
ping_count : int (number of ping packages)
terms : list of dicts (the actual search paremeters)
'''
auto_save = False
server = ''
default = 1
limit = '100'
terms = []
dirty = False
ping_count = 1
ping_timeout = 1
def __init__(self, stations_dir):
self.config_file = path.join(stations_dir, 'radio-browser-config')
def read_config(self):
''' RadioBrowserConfig read config '''
self.terms = [{ 'type': '',
'term': '100',
'post_data': {}
}]
self.default = 1
self.auto_save = False
self.limit = 100
self.ping_count = 1
self.ping_timeout = 1
lines = []
term_str = []
try:
with open(self.config_file, 'r') as cfgfile:
lines = [line.strip() for line in cfgfile if line.strip() and not line.startswith('#') ]
except:
self.terms.append({
'type': 'topvote',
'term': '100',
'post_data': {'reverse': 'true'}
})
if logger.isEnabledFor(logging.DEBUG):
logger.debug('RadioBrowser: error reading config, reverting to defaults')
return False
for line in lines:
if '=' in line:
# logger.error('DE line = "' + line + '"')
sp = line.split('=')
for n in range(0, len(sp)):
sp[n] = sp[n].strip()
# logger.error('DE sp = {}'.format(sp))
if sp[1]:
if sp[0] == 'AUTO_SAVE_CONFIG':
self.auto_save = True if sp[1].lower() == 'true' else False
elif sp[0] == 'DEFAULT_SERVER':
self.server = sp[1]
elif sp[0] == 'DEFAULT_LIMIT':
try:
self.limit = int(sp[1])
except:
self.limit = '100'
elif sp[0] == 'SEARCH_TERM':
term_str.append(sp[1])
elif sp[0] == 'PING_COUNT':
try:
self.ping_count = int(sp[1])
except:
self.ping_count = 1
elif sp[0] == 'PING_TIMEOUT':
try:
self.ping_timeout = int(sp[1])
except:
self.ping_timeout = 1
if term_str:
for n in range(0, len(term_str)):
if term_str[n].startswith('*'):
term_str[n] = term_str[n][1:]
self.default = n + 1
term_str[n] = term_str[n].replace("'", '"')
# logger.error('term {0} = "{1}"'.format(n, term_str[n]))
try:
self.terms.append(json.loads(term_str[n]))
except:
try:
if logger.isEnabledFor(logging.ERROR):
logger.error('RadioBrowser: error inserting search term {}'.format(n))
except:
if logger.isEnabledFor(logging.ERROR):
logger.error('RadioBrowser: error inserting serch item id {}'.format(n))
else:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('RadioBrowser: no search terms found, reverting to defaults')
self.terms.append({
'type': 'topvote',
'term': '100',
'post_data': {'reverse': 'true'}
})
return False
self.terms[0]['term'] = self.limit
# logger.error('DE limit = {}'.format(self.limit))
# logger.error('DE server = ' + self.server)
# logger.error('DE default = {}'.format(self.default))
# logger.error('DE terms = {}'.format(self.terms))
return True
def save_config(self,
auto_save,
search_history,
search_default_history_index,
default_server,
default_ping_count,
default_ping_timeout,
default_max_number_of_results):
self.auto_save = auto_save
self.server = default_server if 'Random' not in default_server else ''
self.default = default_max_number_of_results
self.terms = deepcopy(search_history)
txt = '''##############################################################################
# RadioBrowser config file for PyRadio #
##############################################################################
#
# Auto save config
# If True, the config will be automatically saved upon
# closing RadioBrowser. Otherwise, confirmation will be asked
# Possible values: True, False (default)
AUTO_SAVE_CONFIG = '''
txt += str(auto_save)
txt += '''
# Default server
# The server that RadioBrowser will use by default
# Default: empty string (use random server)
DEFAULT_SERVER = '''
txt += default_server
txt += '''
# Default maximum number of returned results
# for any query to a RadioBrowser saerver
# Default value: 100
DEFAULT_LIMIT = '''
txt += str(default_max_number_of_results)
txt += '''
# server pinging parameters
# set any parameter to 0 to disable pinging
# number of packages to send
PING_COUNT = '''
txt += str(default_ping_count)
txt += '''
# timeout in seconds
PING_TIMEOUT = '''
txt += str(default_ping_timeout)
txt += '''
# List of "search terms" (queries)
# An asterisk specifies the default search term (the
# one activated when RadioBrowser opens up)
# Default = {'type': 'topvote',
# 'term': '100',
# 'post_data': {'reverse': 'true'}
# }
#
'''
for n in range(1, len(search_history)):
asterisk = '*' if n == search_default_history_index else ''
if PY3:
txt += 'SEARCH_TERM = ' + asterisk + str(search_history[n]) + '\n'
else:
txt += 'SEARCH_TERM = ' + asterisk + str(search_history[n]).replace('{u\'', '{\'').replace('u\'', '\'') + '\n'
try:
with open(self.config_file, 'w') as cfgfile:
cfgfile.write(txt)
except:
if logger.isEnabledFor(logging.ERROR):
logger.error('Saving Online Browser config file failed')
return False
self.dirty = False
if logger.isEnabledFor(logging.INFO):
logger.info('Saved Online Browser config file')
return True
class RadioBrowserConfigWindow(object):
BROWSER_NAME = 'RadioBrowser'
TITLE = ' RadioBrowser Config '
_win = _widgets = _config = _history = _dns = None
_server_selection_window = None
_default_history_id = _focus = 0
_auto_save =_showed = False
invalid = False
_widgets = None
_params = []
_focused = 0
_token = ''
server_window_from_config = False
keyboard_handler = None
enable_servers = True
def __init__(
self,
parent,
config=None,
dns_info=None,
current_auto_save=False,
current_server='',
current_ping_count=1,
current_ping_timeout=1,
current_history=None,
current_history_id=-1,
current_limit=100,
init=False,
stations_dir=None
):
''' Parameters
0: working
1: current in browser window
2: from config
'''
if len(self._params) == 0:
for i in range(0, 3):
self._params.append(
{'auto_save': False,
'server': '',
'default': 1,
'limit': 100,
'ping_count': 1,
'ping_timeout': 1,
'terms': [{
'type': 'topvote',
'term': '100',
'post_data': {'reverse': 'true'}
}]},
)
# self._print_params()
self._win = self._parent = parent
self.maxY, self.maxX = self._parent.getmaxyx()
if config:
self._config = config
else:
self._config = RadioBrowserConfig(stations_dir)
self._config.read_config()
if dns_info:
self._dns_info = dns_info
else:
self._dns_info = RadioBrowserDns()
self._init_set_working_params(current_auto_save,
current_server,
current_ping_count,
current_ping_timeout,
current_limit,
current_history_id,
current_history
)
@property
def urls(self):
return self._dns_info.server_urls
@urls.setter
def urls(self, val):
return
@property
def focus(self):
return self._focus
@focus.setter
def focus(self, val):
if val in range(0, len(self._widgets)):
self._focus = val
else:
if val < 0:
self._focus = len(self._widgets) - 1
else:
self._focus = 0
if self._showed:
self.show()
def _fix_server(self, server):
if server == '':
return 'Random'
return server
def _focus_next(self):
if self._focused == len(self._widgets) - 1:
self._focused = 0
else:
self._focused += 1
while not self._widgets[self._focused].enabled:
self._focus_next()
return
self._refresh()
def _focus_previous(self):
if self._focused == 0:
self._focused = len(self._widgets) - 1
else:
self._focused -= 1
while not self._widgets[self._focused].enabled:
self._focus_previous()
return
self._refresh()
def _refresh(self):
self._fix_focus()
self._win.refresh()
def _fix_focus(self, show=True):
for i, widg in enumerate(self._widgets):
widg.focused = True if self._focused == i else False
if show:
for n in self._widgets:
n.show(self._win)
def _init_set_working_params(self,
auto_save,
server,
ping_count,
ping_timeout,
limit,
default,
terms
):
if terms is None:
self._revert_to_default_params()
self._params[0]['auto_save'] = self._config.auto_save
self._params[0]['server'] = self._fix_server(self._config.server)
self._params[0]['ping_count'] = self._config.ping_count
self._params[0]['ping_timeout'] = self._config.ping_timeout
self._params[0]['limit'] = self._config.limit
self._params[0]['default'] = self._config.default
self._params[0]['terms'] = deepcopy(self._config.terms)
else:
self._params[0]['auto_save'] = auto_save
self._params[0]['server'] = self._fix_server(server)
self._params[0]['ping_count'] = ping_count
self._params[0]['ping_timeout'] = ping_timeout
self._params[0]['limit'] = limit
self._params[0]['default'] = default
self._params[0]['terms'] = deepcopy(terms)
self._params[1]['auto_save'] = self._params[0]['auto_save']
self._params[1]['server'] = self._params[0]['server']
self._params[1]['ping_count'] = self._params[0]['ping_count']
self._params[1]['ping_timeout'] = self._params[0]['ping_timeout']
self._params[1]['default'] = self._params[0]['default']
self._params[1]['limit'] = self._params[0]['limit']
self._params[1]['terms'] = deepcopy(self._params[0]['terms'])
def _revert_to_saved_params(self):
self._revert_params(1)
def _revert_to_default_params(self):
self._revert_params(2)
def is_config_dirty(self):
return self._config.dirty
def reset_dirty_config(self):
self._config.dirty = False
def _revert_params(self, index):
self._params[0]['auto_save'] = self._params[index]['auto_save']
self._params[0]['server'] = self._fix_server(self._params[index]['server'])
self._params[0]['server'] = self._fix_server(self._params[index]['server'])
self._params[0]['ping_count'] = self._params[index]['ping_count']
self._params[0]['ping_timeout'] = self._params[index]['ping_timeout']
self._params[0]['limit'] = self._params[index]['limit']
self._params[0]['default'] = self._params[index]['default']
self._params[0]['terms'] = deepcopy(self._params[index]['terms'])
''' set to widgets '''
if self._widgets:
self._widgets[0].value = self._params[0]['auto_save']
self._widgets[1].value = int(self._params[0]['limit'])
self._widgets[2].value = int(self._params[0]['ping_count'])
self._widgets[3].value = int(self._params[0]['ping_timeout'])
self._widgets[4].string = self._params[0]['server'] if self._params[0]['server'] else 'Random'
# TODO: set of ping count and timeout
self._fix_ping_enable()
for n in self._widgets:
n.show(self._win)
self._win.refresh()
# self._print_params()
def _fix_ping_enable(self):
self._widgets[2].enabled = True
self._widgets[3].enabled = True
if self._widgets[2].value == 0:
self._widgets[3].enabled = False
elif self._widgets[3].value == 0:
self._widgets[2].enabled = False
def calculate_dirty(self):
self._config.dirty = False
for n in (
'auto_save', 'server',
'ping_count', 'ping_timeout',
'limit','default', 'terms'
):
if self._params[0][n] != self._params[1][n]:
self._config.dirty = True
break
self.print_title()
def print_title(self):
self._win.box()
token = ' *' if self._config.dirty else ''
if token:
title = self.TITLE[1:]
self._win.addstr(
0,
int((self.maxX - len(title)) / 2) - 2,
token,
curses.color_pair(3))
self._win.addstr(
title,
curses.color_pair(4))
else:
self._win.addstr(
0,
int((self.maxX - len(self.TITLE)) / 2),
self.TITLE,
curses.color_pair(4))
self._win.refresh()
def show(self, parent, init=False):
self._parent = parent
pY, pX = self._parent.getmaxyx()
# logger.error('DE pY = {}, pX = {}'.format(pY, pX))
self.Y, self.X = self._parent.getbegyx()
if self.maxY != pY or self.maxX != pX:
pY, pX = self._parent.getmaxyx()
# logger.error('DE pY = {}, pX = {}'.format(pY, pX))
self.maxY = pY
self.maxX = pX
self._win = self._parent
if self.maxX < 80 or self.maxY < 22:
self._too_small = True
else:
self._too_small = False
self._win.bkgdset(' ', curses.color_pair(5))
self._win.erase()
self.print_title()
if self._too_small:
# TODO Print messge
msg = 'Window too small'
self._win.addstr(
int(self.maxY/2), int((self.maxX - len(msg))/2),
msg, curses.color_pair(5)
)
self._win.refresh()
return
if self._widgets is None:
self._widgets = []
self._widgets.append(
SimpleCursesBoolean(
Y=2, X=3,
window=self._win,
color=curses.color_pair(5),
color_focused=curses.color_pair(6),
color_not_focused=curses.color_pair(4),
color_disabled=curses.color_pair(5),
value=self._params[0]['auto_save'],
string='Auto save config: {0}',
full_selection=(2,59)
)
)
self._widgets[-1].token = 'auto_save'
self._widgets[-1].id = 0
self._widgets.append(
SimpleCursesCounter(
Y=5, X=3,
window=self._win,
color=curses.color_pair(5),
color_focused=curses.color_pair(6),
color_not_focused=curses.color_pair(4),
color_disabled=curses.color_pair(5),
minimum=0, maximum=1000,
step=1, big_step=10,
value=self._params[0]['limit'],
string='Maximum number of results: {0}',
full_selection=(2,59)
)
)
self._widgets[-1].token = 'limit'
self._widgets[-1].id = 1
self._widgets.append(
SimpleCursesCounter(
Y=7, X=3,
window=self._win,
color=curses.color_pair(5),
color_focused=curses.color_pair(6),
color_not_focused=curses.color_pair(4),
color_disabled=curses.color_pair(5),
minimum=0, maximum=9,
step=1, big_step=5,
number_length=1,
value=self._params[0]['ping_count'],
string='Number of ping packages: {0}',
full_selection=(2,59)
)
)
self._widgets[-1].token = 'ping_count'
self._widgets[-1].id = 2
self._widgets.append(
SimpleCursesCounter(
Y=8, X=3,
window=self._win,
color=curses.color_pair(5),
color_focused=curses.color_pair(6),
color_not_focused=curses.color_pair(4),
color_disabled=curses.color_pair(5),
minimum=0, maximum=9,
step=1, big_step=5,
number_length=1,
value=self._params[0]['ping_timeout'],
string='Ping timeout (seconds): {0}',
full_selection=(2,59)
)
)
self._widgets[-1].token = 'ping_timeout'
self._widgets[-1].id = 3
self._widgets.append(
SimpleCursesString(
Y=10, X=3,
parent=self._win,
caption='Default Server: ',
string=self._params[0]['server'],
color=curses.color_pair(5),
color_focused=curses.color_pair(6),
color_not_focused=curses.color_pair(4),
color_disabled=curses.color_pair(5),
full_selection=(2,59)
)
)
self._widgets[-1].token = 'server'
self._widgets[-1].id = 4
self._widgets[-1].enabled = self.enable_servers
self._fix_focus(show=False)
for n in self._widgets:
n.show(self._win)
self._win.addstr(1, 1, 'General Options', curses.color_pair(4))
self._win.addstr(3, 5, 'If True, no confirmation will be asked before saving', curses.color_pair(5))
self._win.addstr(4, 5, 'the configuration when leaving the search window', curses.color_pair(5))
self._win.addstr(6, 5, 'A value of -1 will disable return items limiting', curses.color_pair(5))
self._win.addstr(9, 5, 'Set any ping parameter to 0 to disable server pinging', curses.color_pair(5))
self._win.addstr(11, 5, 'Set to "Random" if you cannot connet to service', curses.color_pair(5))
self._win.addstr(12, 1, 'Default Search Term', curses.color_pair(4))
self._win.addstr(13, 5, 'Not implemented yet', curses.color_pair(5))
self._fix_ping_enable()
self._win.refresh()
self._showed = True
# self._print_params()
def save_config(self):
''' RadioBrowserConfigWindow save config
Returns:
-2: config saved
-3: error saving config
-4: config not modified
'''
if self._config.dirty:
ret = self._config.save_config(
auto_save=self._params[0]['auto_save'],
search_history=self._params[0]['terms'],
search_default_history_index=self._params[0]['default'],
default_server=self._params[0]['server'] if 'Random' not in self._params[0]['server'] else '',
default_ping_count=self._params[0]['ping_count'],
default_ping_timeout=self._params[0]['ping_timeout'],
default_max_number_of_results=self._params[0]['limit']
)
if ret:
self._config.dirty = False
''' config saved '''
return -2
else:
''' error saving config '''
return -3
''' config not modified '''
return -4
def select_servers(self, with_config=False, return_function=None, init=False):
''' RadioBrowserConfigWindow select servers '''
if init:
self._server_selection_window = None
if self._server_selection_window is None:
self._server_selection_window = RadioBrowserServersSelect(
self._win,
self._dns_info.server_urls,
self._params[0]['server'],
self._params[0]['ping_count'],
self._params[0]['ping_timeout'],
Y=11, X=19,
show_random=True,
return_function=return_function)
else:
self._server_selection_window.set_parent(self._win)
# self.keyboard_handler = self._server_selection_window
self._server_selection_window.show()
return self._server_selection_window
def get_server_value(self, a_server=None):
if a_server is not None:
act_server = a_server if not 'Random' in a_server else ''
self._params[0]['server'] = act_server
self._widgets[4].string = act_server if act_server != '' else 'Random'
else:
try:
self._params[0]['server'] = self._server_selection_window.servers.server
logger.error('---=== 1. Server Selection is None ===---')
self._server_selection_window = None
self._widgets[4].string = self._params[0]['server'] if self._params[0]['server'] else 'Random'
except AttributeError:
pass
self._widgets[4].show(parent=self._win)
self._win.refresh()
def _print_params(self):
logger.error('\n\n')
for i, n in enumerate(self._params):
logger.error('-- id: {}'.format(i))
logger.error(n['auto_save'])
logger.error(n['server'])
logger.error(n['ping_count'])
logger.error(n['ping_timeout'])
logger.error(n['limit'])
logger.error(n['default'])
logger.error(n['terms'])
def keypress(self, char):
''' RadioBrowserConfigWindow keypress
Returns:
-4: config not modified
-3: error saving config
-2: config saved successfully
-1: Cancel
0: Save Config
1: Continue
2: Display help
3: Display server selection window
4: Return from server selection window
'''
if self._server_selection_window:
# ret = self._server_selection_window.keypress(char)
if self._server_selection_window.return_value < 1:
if self._server_selection_window.return_value == 0:
# logger.error('DE SSW {}'.format(self._params[0]))
self._params[0]['server'] = self._server_selection_window.servers.server
# logger.error('DE SSW {}'.format(self._params[0]))
logger.error('---=== Server Selection is None ===---')
self._server_selection_window = None
if char in (
curses.KEY_EXIT, 27, ord('q')
):
return -1
elif char in (ord(' '), curses.KEY_ENTER, ord('\n'),
ord('\r')) and self._focus == len(self._widgets) - 2:
''' enter on ok button '''
ret = self._handle_new_or_existing_search_term()
# self._print_params()
return 0 if ret == 1 else ret
elif char == ord('?'):
return 2
elif char in (ord('\t'), 9):
self._focus_next()
elif char in (curses.KEY_BTAB, ):
self._focus_previous()
elif char == ord('s'):
return self.save_config()
elif char == ord('r'):
self._revert_to_saved_params()
self.calculate_dirty()
elif char == ord('d'):
self._revert_to_default_params()
self._config.dirty = False
self.calculate_dirty()
elif char in (curses.KEY_UP, ord('j')) and self._focused < 5:
self._focus_previous()
elif char in (curses.KEY_DOWN, ord('k')) and self._focused < 5:
self._focus_next()
else:
if self._focused < 4:
ret = self._widgets[self._focused].keypress(char)
if ret == 0:
if self._focused == 0:
''' auto save '''
self._widgets[0].show(self._win)
self._params[0]['auto_save'] = self._widgets[0].value
self.calculate_dirty()
else:
''' limit '''
self._widgets[self._focused].show(self._win)
self._params[0][self._widgets[self._focused].token] = self._widgets[self._focused].value
if self._focused == 2 or self._focused == 3:
self._fix_ping_enable()
self._win.refresh()
#self._print_params()
self.calculate_dirty()
elif self._focused == 4:
''' server '''
if char in (ord(' '), curses.KEY_ENTER, ord('\n'),
ord('\r'), ord('l'), curses.KEY_RIGHT):
''' open server selection window '''
return 3
else:
''' terms '''
pass
return 1
class RadioBrowserSearchWindow(object):
NUMBER_OF_WIDGETS_AFTER_SEARCH_SECTION = 3
_cnf = None
search_by_items = (
'Votes',
'Clicks',
'Recent click',
'Recently changed'
)
sort_by_items = (
'No sorting',
'Random',
'Name',
'Tag',
'Country',
'State',
'Language',
'Votes',
'Clicks',
'Bitrate',
'Codec',
)
yx = [
[0, 0], # (0) search check box
[0, 0], # (1) caption 0
[0, 0], # (2) caption 1
[0, 0], # (3) caption 2
[0, 0], # (4) caption 3
[0, 0], # (5) caption 4
[0, 0], # (6) caption 5
[0, 0], # (7) limit
[0, 0], # (8) buttons
]
captions = (
'',
'Name',
'Country',
'Language',
'Tag',
'State',
'Codec')
''' vertical placement of widgets
used for navigation
'''
_left_column = (0, 1, 4, 5, 6, 11, 12, 17, 18)
_middle_column = (7, 8, 13, 14, 18)
_right_column = (2, 3, 9, 10, 16, 19)
''' line editors ids '''
_line_editor_id = []
''' columns widget ids '''
_columns_id = []
''' checkboxes ids to enable/disable columns widgets '''
_checkbox_to_enable_widgets = (0, 4)
_default_limit = 100
''' _selected_history_id : current id in search window
_history_id : current id (active in browser) - corresponds in browser to _search_history_index
_default_history_id : default id (autoload for service) - corresponds in browser to _default_search_history_index
'''
_history_id = _selected_history_id = _default_history_id = 1
_history = []
def __init__(self,
parent,
config,
limit=100,
init=False
):
self._parent = parent
self._cnf = config
self._default_limit = limit
self._init = init
self._too_small = False
self._focus = 0
self._win = None
self.maxY = self.maxX = 0
self.TITLE = ' RadioBrowser Search '
''' we have two columns;
this is the width of each of them
'''
self._half_width = 0
self._widgets = [ None ]
def __del__(self):
for a_widget in self._widgets:
# logger.error('DE deleting: {}'.format(a_widget))
a_widget = None
@property
def parent(self):
return self._parent
@parent.setter
def parent(self, val):
self._parent = val
@property
def focus(self):
return self._focus
@focus.setter
def focus(self, val):
if val in range(0, len(self._widgets)):
self._focus = val
else:
if val < 0:
self._focus = len(self._widgets) - 1
else:
self._focus = 0
self.show()
def _search_term_to_widgets(self, a_search):
# logger.error('DE =========================')
# logger.error('DE term = {}'.format(a_search))
# logger.error('DE type = {}'.format(a_search['type']))
self._widgets[1].selection = self._widgets[1].active = 0
self._widgets[2].selection = self._widgets[2].active = 0
self._widgets[3].checked = False
self._widgets[-self.NUMBER_OF_WIDGETS_AFTER_SEARCH_SECTION].value = self._default_limit
self._widgets[-2].enabled = True
self._widgets[-1].enabled = True
for i in range(5, len(self._widgets) - self.NUMBER_OF_WIDGETS_AFTER_SEARCH_SECTION):
if type(self._widgets[i]).__name__ == 'SimpleCursesLineEdit':
self._widgets[i].string = ''
else:
self._widgets[i].checked = False
if a_search['type'] == '':
''' for empty type '''
self._widgets[0].checked = False
self._widgets[4].checked = True
self._focus = 4
elif a_search['type'] in RADIO_BROWSER_DISPLAY_TERMS.keys():
''' populate the "Display by" part '''
self._widgets[0].checked = True
self._widgets[4].checked = False
self._widgets[1].selection = self._widgets[1].active = RADIO_BROWSER_DISPLAY_TERMS[a_search['type']]
self._widgets[-self.NUMBER_OF_WIDGETS_AFTER_SEARCH_SECTION].value = int(a_search['term'])
for i in range(5, len(self._widgets) - self.NUMBER_OF_WIDGETS_AFTER_SEARCH_SECTION):
try:
self._widgets[i].string = ''
except:
self._widgets[i].checked = False
self._widgets[i].enabled = False
self._focus = 0
# logger.error('DE RADIO_BROWSER_DISPLAY_TERMS[a_search["type"]] = {}'.format(RADIO_BROWSER_DISPLAY_TERMS[a_search['type']]))
else:
''' populate the "Search" part '''
self._widgets[0].checked = False
self._widgets[4].checked = True
self._widgets[1].selection = self._widgets[1].active = 0
self._widgets[2].selection = self._widgets[2].active = 0
self._widgets[3].checked = False
for i in range(5, len(self._widgets) - self.NUMBER_OF_WIDGETS_AFTER_SEARCH_SECTION):
self._widgets[i].enabled = True
if a_search['type'] in RADIO_BROWSER_SEARCH_BY_TERMS.keys():
line_edit = RADIO_BROWSER_SEARCH_BY_TERMS[a_search['type']]
if a_search['type'].endswith('exact'):
# logger.error('DE Exact checked!!!')
self._widgets[line_edit-1].checked = True
self._widgets[line_edit].string = a_search['term']
self._focus = 4
# logger.error('DE RADIO_BROWSER_SEARCH_BY_TERMS[a_search["type"]] = {}'.format(RADIO_BROWSER_SEARCH_BY_TERMS[a_search['type']]))
elif a_search['type'] == 'search':
''' populate the "Search" part '''
s_id_list = []
for n in a_search['post_data'].items():
# logger.error('DE s_id = {}'.format(s_id))
if type(self._widgets[s_id]).__name__ == 'SimpleCursesLineEdit':
self._widgets[s_id].string = n[1]
# logger.error('DE n[0] = {0}, string = "{1}"'.format(n[0], n[1]))
else:
self._widgets[s_id].checked = bool(n[1])
# logger.error('DE n[0] = {0}, n[1] = {1}, bool = {2}'.format(n[0], n[1], bool(n[1])))
s_id_list.append(s_id)
self._focus = 4
if a_search['post_data']:
for n in a_search['post_data'].keys():
if n == 'order':
order = a_search['post_data']['order']
if order in RADIO_BROWSER_SEARCH_SORT_TERMS.keys():
order_id = RADIO_BROWSER_SEARCH_SORT_TERMS[order]
self._widgets[2].selection = self._widgets[2].active = order_id
elif n == 'limit':
self._widgets[-self.NUMBER_OF_WIDGETS_AFTER_SEARCH_SECTION].value = int(a_search['post_data']['limit'])
elif n == 'reverse':
self._widgets[3].checked = bool(a_search['post_data']['reverse'])
# logger.error('DE =========================')
def _widgets_to_search_term(self):
ret = {'type': '', 'term': '', 'post_data': {}}
type_part = ''
order_part = ''
if self._widgets[0].checked:
''' type is "Display by" '''
''' get search type '''
for key in RADIO_BROWSER_DISPLAY_TERMS.items():
if key[1] == self._widgets[1].active:
type_part = key[0]
break
# logger.error('DE type_part = "{}"'.format(type_part))
if type_part:
ret['type'] = type_part
else:
logger.error('RadioBrowser Search: Error in search parameters!')
return None
''' get limit (term)'''
ret['term'] = str(self._widgets[-self.NUMBER_OF_WIDGETS_AFTER_SEARCH_SECTION].value)
else:
''' type is search (simple or advanced) '''
what_type = []
for i in range(5, len(self._widgets) - self.NUMBER_OF_WIDGETS_AFTER_SEARCH_SECTION):
if type(self._widgets[i]).__name__ == 'SimpleCursesLineEdit':
if self._widgets[i].string:
what_type.append(i)
if len(what_type) == 0:
logger.error('RadioBrowser Search: Error in search parameters!')
return None
if len(what_type) == 1:
''' simple search '''
logger.error('DE simple search')
for n in RADIO_BROWSER_SEARCH_BY_TERMS.items():
if n[1] == what_type[0]:
ret['type'] = n [0]
logger.error('DE type = {}'.format(ret['type']))
break
if self._widgets[what_type[0] - 1].checked:
ret['type'] += 'exact'
ret['term'] = self._widgets[what_type[0]].string
else:
''' advanced search '''
# logger.error('DE advanced search')
ret['type'] = 'search'
# logger.error('DE what_type = {}'.format(what_type))
for a_what_type in what_type:
for n in RADIO_BROWSER_SEARCH_TERMS.items():
if n[1] == a_what_type:
if n[0] == 'tagList':
if ',' not in self._widgets[a_what_type].string:
continue
if n[0] == 'tag':
if ',' in self._widgets[a_what_type].string:
continue
ret['post_data'][n[0]] = self._widgets[a_what_type].string
if self._widgets[a_what_type-1].checked:
if n[0] in RADIO_BROWSER_EXACT_SEARCH_TERM.keys():
ret['post_data'][RADIO_BROWSER_EXACT_SEARCH_TERM[n[0]]] = 'true'
''' get limit (term)'''
if self._widgets[-self.NUMBER_OF_WIDGETS_AFTER_SEARCH_SECTION].value != self._default_limit:
ret['post_data']['limit'] = str(self._widgets[-self.NUMBER_OF_WIDGETS_AFTER_SEARCH_SECTION].value)
''' get order '''
self._order_to_term(ret)
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Search term returned: {}'.format(ret))
return ret
def _order_to_term(self, ret):
if self._widgets[2].active > 0:
for key in RADIO_BROWSER_SEARCH_SORT_TERMS.items():
if key[1] == self._widgets[2].active:
order_part = key[0]
break
# logger.error('DE order_part = "{}"'.format(order_part))
if order_part:
ret['post_data']['order'] = order_part
''' check for reverse order '''
if self._widgets[3].checked:
ret['post_data']['reverse'] = 'true'
def get_history(self):
return self._selected_history_id, self._default_history_id, self._history
def set_search_history(
self,
main_window_default_search_history_index,
main_window_search_history_index,
main_window_search_history,
init=False
):
''' get search history from main window
Return self._search_term
'''
self._history_id = main_window_search_history_index
if init:
self._default_history_id = main_window_default_search_history_index
self._selected_history_id = main_window_search_history_index
logger.error('DE set_search_history - _selected_history_id={}'.format(self._selected_history_id))
self._history = deepcopy(main_window_search_history)
self._search_term = self._history[self._history_id]
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Search history')
logger.debug(' search history: {}'.format(self._history))
logger.debug(' search history index: {}'.format(self._history_id))
logger.debug(' active search term: {}'.format(self._search_term))
def _get_a_third(self):
''' calculate window / 3 X
this is the length of a line editor + 2
'''
X = int ((self.maxX - 6) / 3)
return X
return X if X % 2 == 0 else X - 1
def _calculate_widgets_yx(self, Y, X):
# logger.error('DE {}'.format(self.yx))
''' set Y and X for search check box and limit field '''
self.yx[0] = self.yx[7] = [Y, 2]
''' set y for all consecutive widgets '''
self.yx[1][0] = self.yx[2][0] = self.yx[3][0] = Y
self.yx[4][0] = self.yx[5][0] = self.yx[6][0] = Y + 3
self.yx[7][0] = self.yx[6][0] + 3
self.yx[8][0] = self.yx[7][0] + 2
self.yx[1][1] = self.yx[4][1] = 3
self.yx[2][1] = self.yx[5][1] = 3 + X
self.yx[3][1] = self.yx[6][1] = 3 + 2 * X
# logger.error('DE {}'.format(self.yx))
def show(self, parent=None):
pY, pX = self._parent.getmaxyx()
# logger.error('DE pY = {}, pX = {}'.format(pY, pX))
self.Y, self.X = self._parent.getbegyx()
if self.maxY != pY or self.maxX != pX:
pY, pX = self._parent.getmaxyx()
# logger.error('DE pY = {}, pX = {}'.format(pY, pX))
self.maxY = pY
self.maxX = pX
self._win = self._parent
# self._win = curses.newwin(
# self.maxY, self.maxX,
# Y, X
# )
self._half_width = int((self.maxX -2 ) / 2) -3
# logger.error('>>>> hajf width = {} <<<<'.format(self._half_width))
if self.maxX < 80 or self.maxY < 22:
self._too_small = True
else:
self._too_small = False
self._win.bkgdset(' ', curses.color_pair(5))
self._win.erase()
self._win.box()
self._win.addstr(0, int((self.maxX - len(self.TITLE)) / 2),
self.TITLE,
curses.color_pair(4))
if self._too_small:
# TODO Print messge
msg = 'Window too small'
self._win.addstr(
int(self.maxY/2), int((self.maxX - len(msg))/2),
msg, curses.color_pair(5)
)
self._win.refresh()
return
X = self._get_a_third()
if self._widgets[0] is None:
''' display by '''
self._widgets[0] = SimpleCursesCheckBox(
2, 2,
'Display by',
curses.color_pair(9), curses.color_pair(4), curses.color_pair(5))
''' display by columns (index = 1) '''
self._widgets.append(SimpleCursesWidgetColumns(
Y=2, X=3, window=self._win,
selection=0,
active=0,
items=self.search_by_items,
color=curses.color_pair(5),
color_active=curses.color_pair(4),
color_cursor_selection=curses.color_pair(6),
color_cursor_active=curses.color_pair(9),
margin=1,
max_width=self._half_width,
on_up_callback_function=self._focus_up,
on_down_callback_function=self._focus_down,
on_left_callback_function=self._focus_previous,
on_right_callback_function=self._focus_next
))
''' sort by (index = 2) '''
self._widgets.append(SimpleCursesWidgetColumns(
Y=2, X=self.maxX - 1 - self._half_width,
max_width=self._half_width,
window=self._win,
selection=0,
active=0,
items=self.sort_by_items,
color=curses.color_pair(5),
color_active=curses.color_pair(4),
color_cursor_selection=curses.color_pair(6),
color_cursor_active=curses.color_pair(9),
margin=1,
on_up_callback_function=self._focus_up,
on_down_callback_function=self._focus_down,
on_left_callback_function=self._focus_previous,
on_right_callback_function=self._focus_next
))
'''' sort ascending / descending (index = 3) '''
self._widgets.append(SimpleCursesCheckBox(
self._widgets[2].Y + self._widgets[2].height + 1,
self._widgets[2].X - 2 + self._widgets[2].margin,
'Reverse order',
curses.color_pair(9), curses.color_pair(5), curses.color_pair(5)))
''' Two lines under the lists '''
Y = max(self._widgets[2].Y, self._widgets[1].Y + self._widgets[1].height, self._widgets[3].Y) + 2
self._win.addstr(
self._widgets[2].Y - 1,
self._widgets[2].X - 2,
'Sort by', curses.color_pair(4))
''' search check box (index = 4) '''
self._widgets.append(SimpleCursesCheckBox(
Y, 2, 'Search for',
curses.color_pair(9), curses.color_pair(4), curses.color_pair(5)))
self._calculate_widgets_yx(Y, X)
for n in range(1,7):
if n == 6:
self._widgets.append(DisabledWidget())
else:
self._widgets.append(SimpleCursesCheckBox(
self.yx[n][0] + 1,
self.yx[n][1] + len(self.captions[n]) + 2,
'Exact',
curses.color_pair(9), curses.color_pair(5), curses.color_pair(5)))
self._widgets.append(SimpleCursesLineEdit(
parent=self._win,
width=X-2,
begin_y=self.yx[n][0]+2,
begin_x=self.yx[n][1],
boxed=False,
has_history=False,
caption='',
box_color=curses.color_pair(9),
caption_color=curses.color_pair(4),
edit_color=curses.color_pair(9),
cursor_color=curses.color_pair(8),
unfocused_color=curses.color_pair(5),
key_up_function_handler=self._focus_up,
key_down_function_handler=self._focus_down))
self._widgets[-1].bracket = False
self._widgets[-1].use_paste_mode = True
#self._widgets[-1].string = self.captions[n]
''' limit - index = -3 '''
self._widgets.append(
SimpleCursesCounter(
Y=self.yx[-2][0],
X=self.yx[-2][1],
window=self._win,
color=curses.color_pair(5),
color_focused=curses.color_pair(9),
color_not_focused=curses.color_pair(4),
color_disabled=curses.color_pair(5),
minimum=0, maximum=1000,
step=1, big_step=10,
value=self._default_limit,
string='Limit results to {0} stations'
)
)
''' buttons - index -2, -1 '''
self._h_buttons = SimpleCursesHorizontalPushButtons(
self.yx[-1][0],
captions=('OK', 'Cancel'),
color_focused=curses.color_pair(9),
color=curses.color_pair(4),
bracket_color=curses.color_pair(5),
parent=self._win)
self._widgets.append(self._h_buttons.buttons[0])
self._widgets.append(self._h_buttons.buttons[1])
for i, n in enumerate(self._widgets):
self._widgets[i].id = i
if type(self._widgets[i]).__name__ == 'SimpleCursesLineEdit':
self._line_editor_id.append(i)
elif type(self._widgets[i]).__name__ == 'SimpleCursesWidgetColumns':
self._columns_id.append(i)
if i < 5:
self._widgets[i].enabled = True
else:
self._widgets[i].enabled = False
for i in range(-self.NUMBER_OF_WIDGETS_AFTER_SEARCH_SECTION, 0):
self._widgets[i].enabled = True
self._search_term_to_widgets(self._search_term)
self._win.refresh()
# set vertical placement variable
for i in range(0, len(self._widgets)):
if type(self._widgets[i]).__name__ != 'DisabledWidget':
if self._widgets[i].id in self._left_column:
self._widgets[i]._vert = self._left_column
elif self._widgets[i].id in self._middle_column:
self._widgets[i]._vert = self._middle_column
elif self._widgets[i].id in self._right_column:
self._widgets[i]._vert = self._right_column
self._widgets[i]._vert_id = self._widgets[i]._vert.index(self._widgets[i].id)
# logger.error('DE =======\ni = {0}\nw = {1}\nid = {2}\n_vert = {3}\n_vert_id = {4}'.format(i, self._widgets[i], self._widgets[i].id, self._widgets[i]._vert, self._widgets[i]._vert_id))
else:
''' update up to lists '''
self._widgets[1].window = self._widgets[2].window = self._win
self._widgets[1].max_width = self._widgets[2].max_width = self._half_width
self._widgets[2].X = self.maxX - 1 - self._half_width
self._widgets[3].move(
self._widgets[2].Y + self._widgets[2].height + 1,
self._widgets[2].X - 2 + self._widgets[2].margin
)
self._widgets[1].recalculate_columns()
self._widgets[2].recalculate_columns()
''' search check box (index = 4) '''
self._win.addstr(
self._widgets[2].Y - 1,
self._widgets[2].X - 2,
'Sort by', curses.color_pair(4))
''' Two lines under the lists '''
Y = max(self._widgets[2].Y, self._widgets[1].Y + self._widgets[1].height, self._widgets[3].Y) + 2
''' place search check box '''
self._widgets[4].move(Y, 2)
''' show descending check box '''
self._widgets[3].Y = self._widgets[2].Y + self._widgets[2].height + 1
self._widgets[3].X = self._widgets[2].X - 2 + self._widgets[2].margin,
self._widgets[3].show()
''' search check box (index = 4) '''
self._win.addstr(
self._widgets[2].Y - 1,
self._widgets[2].X - 2,
'Sort by', curses.color_pair(4))
''' Search check box not moved, will be handled by show '''
self._win.refresh()
self._calculate_widgets_yx(Y, X)
for n in range(0, 6):
''' place editors' captions '''
# self._win.addstr(
# self.yx[n+1][0],
# self.yx[n+1][1],
# self.captions[n+1],
# curses.color_pair(5)
# )
''' move exact check boxes '''
if type(self._widgets[5+n*2]).__name__ != 'DisabledWidget':
self._widgets[5+n*2].move(
self.yx[n+1][0] + 1,
self.yx[n+1][1] + len(self.captions[n+1]) + 2
)
''' move line editors '''
self._widgets[6+n*2].move(
self._win,
self.yx[n+1][0]+2,
self.yx[n+1][1],
update=False
)
''' change line editors width '''
self._widgets[6+n*2].width = X - 2
''' move limit field '''
self._widgets[-self.NUMBER_OF_WIDGETS_AFTER_SEARCH_SECTION].move(
self.yx[-2][0],
self.yx[-2][1]
)
''' move buttons Y '''
self._h_buttons.move(self.yx[-1][0])
self._win.refresh()
self._h_buttons.calculate_buttons_position()
self._print_history_legend()
self._display_all_widgets()
def _print_history_legend(self):
self._win.addstr(self.maxY - 3, 2, 25 * ' ')
if self._selected_history_id == 0:
self._win.addstr(self.maxY - 3, 2, 'Empty item!!!', curses.color_pair(4))
elif self._selected_history_id == self._history_id:
if self._default_history_id == self._history_id:
self._win.addstr(self.maxY - 3, 2, 'Last search, Default', curses.color_pair(4))
else:
self._win.addstr(self.maxY - 3, 2, 'Last search', curses.color_pair(4))
elif self._selected_history_id == self._default_history_id:
self._win.addstr(self.maxY - 3, 2, 'Default item', curses.color_pair(4))
msg = 'History navigation: ^N/^P, HOME,0/END,g,$, PgUp/PgDown'
thisX = self.maxX - 2 - len(msg)
self._win.addstr(self.maxY - 3, thisX, msg.split(':')[0] + ':', curses.color_pair(5))
msg = msg.split(':')[1]
thisX = self.maxX - 3 - len(msg)
self._win.addstr(msg, curses.color_pair(4))
self._other_chgat(self.maxY - 3, thisX, msg)
#self._carret_chgat(self.maxY-3, thisX, msg)
msg = 'Add/Del: ^Y/^X, Make default: ^B, Save history: ^W'
thisX = self.maxX - 2 - len(msg)
self._win.addstr(self.maxY - 2, thisX, msg)
self._carret_chgat(self.maxY-2, thisX, msg)
self._win.addstr(self.maxY - 2, 2 , 'History item: ')
self._win.addstr('{}'.format(self._selected_history_id), curses.color_pair(4))
self._win.addstr('/{} '.format(len(self._history)-1))
def _other_chgat(self, Y, X, a_string):
indexes = [i for i, c in enumerate(a_string) if c == '/' or c == ',']
logger.error(indexes)
for n in indexes:
self._win.chgat(Y, X+n+1, 1, curses.color_pair(5))
def _carret_chgat(self, Y, X, a_string):
indexes = [i for i, c in enumerate(a_string) if c == '^']
for n in indexes:
self._win.chgat(Y, X+n, 2, curses.color_pair(4))
def _activate_search_term(self, a_search_term):
self._search_term_to_widgets(a_search_term)
self._win.refresh()
self._display_all_widgets()
def _display_all_widgets(self):
self._update_focus()
self._fix_widgets_enabling()
self._fix_search_captions_color()
for n in self._widgets:
try:
n.show()
except:
n.show(self._win, opening=False)
self._win.refresh()
def _fix_search_captions_color(self):
col = 5 if self._widgets[0].checked else 4
for n in range(1,7):
self._win.addstr(
self.yx[n][0],
self.yx[n][1],
self.captions[n],
curses.color_pair(col))
self._win.refresh()
def _update_focus(self):
# use _focused here to avoid triggering
# widgets' refresh
for i, x in enumerate(self._widgets):
if x:
if self._focus == i:
# logger.error('_update_focus: {} - True'.format(i))
x._focused = True
else:
# logger.error('_update_focus: {} - False'.format(i))
x._focused = False
def _get_search_term_index(self, new_search_term):
''' search for a search term in history
if found return True, index
if not found return False, len(self._history) - 1
and append the search term in the history
'''
found = False
for a_search_term_index, a_search_term in enumerate(self._history):
if new_search_term == a_search_term:
# self._history_id = self._selected_history_id
index = a_search_term_index
found = True
if logger.isEnabledFor(logging.DEBUG):
logger.debug('New search term already in history, id = {}'.format(self._selected_history_id))
break
if not found:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('Adding new search term to history, id = {}'.format(len(self._history)))
self._history.append(new_search_term)
# self._history_id = self._selected_history_id = len(self._history) - 1
index = len(self._history) - 1
self._cnf.dirty = True
return found, index
def _goto_first_history_item(self):
self._handle_new_or_existing_search_term()
self._selected_history_id = 0
self._print_history_legend()
self._activate_search_term(self._history[self._selected_history_id])
def _goto_last_history_item(self):
self._handle_new_or_existing_search_term()
self._selected_history_id = len(self._history) - 1
self._print_history_legend()
self._activate_search_term(self._history[self._selected_history_id])
def _jump_history_up(self):
self._handle_new_or_existing_search_term()
self._selected_history_id -= 5
if self._selected_history_id < 0:
self._selected_history_id = len(self._history) - 1
self._print_history_legend()
self._activate_search_term(self._history[self._selected_history_id])
def _jump_history_down(self):
self._handle_new_or_existing_search_term()
self._selected_history_id += 5
if self._selected_history_id >= len(self._history):
self._selected_history_id = 0
self._print_history_legend()
self._activate_search_term(self._history[self._selected_history_id])
def _ctrl_n(self):
''' ^N - Next history item '''
cur_history_id = self._selected_history_id
self._handle_new_or_existing_search_term()
if abs(self._selected_history_id - cur_history_id) > 1:
self._selected_history_id = cur_history_id
if len(self._history) > 1:
self._selected_history_id += 1
if self._selected_history_id >= len(self._history):
self._selected_history_id = 0
self._print_history_legend()
self._activate_search_term(self._history[self._selected_history_id])
def _ctrl_p(self):
''' ^P - Previous history item '''
cur_history_id = self._selected_history_id
self._handle_new_or_existing_search_term()
if abs(self._selected_history_id - cur_history_id) > 1:
self._selected_history_id = cur_history_id
if len(self._history) > 1:
self._selected_history_id -= 1
if self._selected_history_id < 0:
self._selected_history_id = len(self._history) - 1
self._print_history_legend()
self._activate_search_term(self._history[self._selected_history_id])
def _ctrl_x(self):
''' ^X - Delete history item '''
self._handle_new_or_existing_search_term()
if len(self._history) > 2 and \
self._selected_history_id > 0:
if self._default_history_id == self._selected_history_id:
self._default_history_id = 1
self._history.pop(self._selected_history_id)
if self._selected_history_id == len(self._history):
self._selected_history_id -= 1
self._print_history_legend()
self._activate_search_term(self._history[self._selected_history_id])
self._cnf.dirty = True
def _ctrl_b(self):
''' ^B - Set default item '''
ret = self._handle_new_or_existing_search_term()
if self._selected_history_id > 0:
if ret == 1:
self._default_history_id = self._selected_history_id
self._print_history_legend()
self._win.refresh()
self._cnf.dirty = True
def _ctrl_f(self):
''' ^T - Go to template (item 0) '''
self._selected_history_id = 0
self._print_history_legend()
self._activate_search_term(self._history[self._selected_history_id])
def selected_widget_class_name(self):
return type(self._widgets[self._focus]).__name__
def line_editor_has_focus(self):
if self.selected_widget_class_name() == 'SimpleCursesLineEdit':
return True
return False
def keypress(self, char):
''' RadioBrowserSearchWindow keypress
Returns
-------
-1 - Cancel
0 - do search
1 - Continue
2 - Display help
3 - Display Line Editor Help
4 - Error in search paremeter
5 - Save search history
'''
if char in (
curses.KEY_EXIT, 27
):
return -1
if char == ord('q') and \
type(self._widgets[self._focus]).__name__ != 'SimpleCursesLineEdit':
return -1
if self._too_small:
return 1
class_name = type(self._widgets[self._focus]).__name__
if char == ord('0'):
self._goto_first_history_item()
elif char == ord('$'):
self._goto_last_history_item()
elif char in (curses.KEY_PPAGE, ) and self._focus != len(self._widgets) -3:
self._jump_history_up()
elif char in (curses.KEY_NPAGE, ) and self._focus != len(self._widgets) -3:
self._jump_history_down()
elif char in (ord('\t'), 9):
self._focus_next()
elif char in (curses.KEY_BTAB, ):
self._focus_previous()
elif char in (ord(' '), curses.KEY_ENTER, ord('\n'),
ord('\r')) and self._focus == len(self._widgets) - 1:
''' enter on cancel button '''
return -1
elif char in (ord(' '), curses.KEY_ENTER, ord('\n'),
ord('\r')) and self._focus == len(self._widgets) - 2:
''' enter on ok button '''
ret = self._handle_new_or_existing_search_term()
return 0 if ret == 1 else ret
elif char in (curses.ascii.SO, ):
''' ^N - Next history item '''
logger.error('^N')
self._ctrl_n()
elif char in (curses.ascii.DLE, ):
''' ^P - Previous history item '''
self._ctrl_p()
elif char in (curses.ascii.ETB, ):
''' ^W - Save search history '''
self._handle_new_or_existing_search_term()
''' Save search history '''
return 5
elif char in (curses.ascii.EM, ):
''' ^Y - Add history item '''
self._handle_new_or_existing_search_term()
elif char in (curses.ascii.CAN, ):
''' ^X - Delete history item '''
self._ctrl_x()
elif char in (curses.ascii.STX, ):
''' ^B - Set default item '''
self._ctrl_b()
elif char in (curses.ascii.ACK, ):
''' ^F - Go to template (item 0) '''
self._ctrl_f()
else:
if class_name == 'SimpleCursesWidgetColumns':
ret = self._widgets[self._focus].keypress(char)
if ret == 0:
# item selected
self._win.refresh()
elif ret == 2:
# cursor moved
self._win.refresh()
elif self._focus in self._checkbox_to_enable_widgets:
ret = self._widgets[self._focus].keypress(char)
if not ret:
tp = list(self._checkbox_to_enable_widgets)
tp.remove(self._focus)
other = tp[0]
self._widgets[other].checked = not self._widgets[self._focus].checked
self._fix_widgets_enabling()
self._win.refresh()
return 1
elif class_name == 'SimpleCursesCheckBox':
ret = self._widgets[self._focus].keypress(char)
if not ret:
return 1
elif class_name == 'SimpleCursesCounter':
ret = self._widgets[self._focus].keypress(char)
if ret == 0:
self._win.refresh()
return 1
elif class_name == 'SimpleCursesLineEdit':
ret = self._widgets[self._focus].keypress(self._win, char)
if ret == -1:
# Cancel
return -1
elif ret == 2:
''' display Line Editor Help '''
return 3
elif ret < 2:
return 1
if char in (ord('s'), ):
''' prerform search '''
ret = self._handle_new_or_existing_search_term()
return 0 if ret == 1 else ret
elif char == curses.KEY_HOME:
self._goto_first_history_item()
elif char in (curses.KEY_END, ord('g')):
self._goto_last_history_item()
elif char in (ord('n'), ):
''' ^N - Next history item '''
self._ctrl_n()
elif char in (ord('p'), ):
''' ^P - Previous history item '''
self._ctrl_p()
elif char in (ord('w'), ):
''' ^W - Save search history '''
self._handle_new_or_existing_search_term()
''' Save search history '''
return 5
elif char in (ord('f'), ):
''' ^F - Go to template (item 0) '''
self._ctrl_f()
elif char in (ord('x'), ):
''' ^X - Delete history item '''
self._ctrl_x()
elif char in (ord('b'), ):
''' ^B - Set default item '''
self._ctrl_b()
elif char in (ord('y'), ):
''' ^Y - Add current item to history'''
self._handle_new_or_existing_search_term()
elif char in (ord('k'), curses.KEY_UP) and \
class_name != 'SimpleCursesWidgetColumns':
self._focus_up()
elif char in (ord('j'), curses.KEY_DOWN) and \
class_name != 'SimpleCursesWidgetColumns':
self._focus_down()
elif char in (ord('l'), curses.KEY_RIGHT) and \
class_name not in ('SimpleCursesWidgetColumns',
'SimpleCursesLineEdit'):
if 5 <= self._widgets[self._focus].id <= 13:
new_focus = self._focus + 2
# logger.error('DE focus = {}'.format(new_focus))
if new_focus == 15:
new_focus = 16
# logger.error('DE focus = {}'.format(new_focus))
self._apply_new_focus(new_focus)
else:
self._focus_next()
elif char in (ord('h'), curses.KEY_LEFT) and \
class_name not in ('SimpleCursesWidgetColumns',
'SimpleCursesLineEdit'):
if 5 <= self._widgets[self._focus].id <= 13:
new_focus = self._focus - 2
# logger.error('DE focus = {}'.format(new_focus))
if new_focus == 3:
new_focus = 4
# logger.error('DE focus = {}'.format(new_focus))
self._apply_new_focus(new_focus)
else:
self._focus_previous()
if char == ord('?'):
''' display help '''
return 2
''' continue '''
return 1
def _handle_new_or_existing_search_term(self):
''' read alla widgets and create a search term
if it does not exist add it to history
'''
test_search_term = self._widgets_to_search_term()
if test_search_term:
found, index = self._get_search_term_index(test_search_term)
# TODO: check if item is altered
self._selected_history_id = index
self._print_history_legend()
self._win.refresh()
else:
''' parameter error'''
return 4
return 1
def _fix_widgets_enabling(self):
self._fix_search_captions_color()
col = True if self._widgets[0].checked else False
self._widgets[1].enabled = col
for i in range(self._checkbox_to_enable_widgets[1] + 1, len(self._widgets) - self.NUMBER_OF_WIDGETS_AFTER_SEARCH_SECTION):
self._widgets[i].enabled = not col
# logger.error('DE widget {0} enabled: {1}'.format(i, not col))
def _focus_next(self):
# logger.error('DE focus next ==========================')
new_focus = self._focus + 1
if new_focus == len(self._widgets):
new_focus = 0
# logger.error('DE new_focus = {}'.format(new_focus))
focus_ok = False
for i in range(new_focus, len(self._widgets)):
if self._widgets[i].enabled:
new_focus = i
focus_ok = True
# logger.error('DE 1 new_focus = {}'.format(new_focus))
break
if not focus_ok:
for i in range(0, new_focus + 1):
if self._widgets[i].enabled:
new_focus = i
focus_ok = True
# logger.error('DE 2 new_focus = {}'.format(new_focus))
break
# logger.error('DE new_focus = {}'.format(new_focus))
# logger.error('DE end focus next ==========================')
self._apply_new_focus(new_focus)
def _focus_previous(self):
# logger.error('DE focus previous ==========================')
new_focus = self._focus - 1
if new_focus == -1:
new_focus = len(self._widgets) - 1
# logger.error('DE new_focus = {}'.format(new_focus))
focus_ok = False
for i in range(new_focus, -1, -1):
# logger.error('DE i = {}'.format(i))
if self._widgets[i].enabled:
new_focus = i
focus_ok = True
# logger.error('DE 1 new_focus = {}'.format(new_focus))
break
if not focus_ok:
for i in range(len(self._widgets) - 1, new_focus, -1):
# logger.error('DE i = {}'.format(i))
if self._widgets[i].enabled:
new_focus = i
focus_ok = True
# logger.error('DE 2 new_focus = {}'.format(new_focus))
break
# logger.error('DE end focus previous ==========================')
self._apply_new_focus(new_focus)
def _focus_up(self):
# logger.error('DE self._focus_up()')
new_focus, col = self._get_column_list(self._focus)
# logger.error('DE new_focus = {0}, col = {1}'.format(new_focus, col))
while True:
new_focus -= 1
# logger.error('DE new_focus = {}'.format(new_focus))
if new_focus < 0:
new_focus = len(col) - 1
# logger.error('DE new_focus = {}'.format(new_focus))
# logger.error('DE col[new_focus] = {}'.format(col[new_focus]))
if self._widgets[col[new_focus]].enabled:
break
self._apply_new_focus(col[new_focus])
def _focus_down(self):
new_focus, col = self._get_column_list(self._focus)
# logger.error('DE new_focus = {0}, col = {1}'.format(new_focus, col))
while True:
new_focus += 1
# logger.error('DE new_focus = {}'.format(new_focus))
if new_focus == len(col):
new_focus = 0
# logger.error('DE new_focus = {}'.format(new_focus))
# logger.error('DE col[new_focus] = {}'.format(col[new_focus]))
if self._widgets[col[new_focus]].enabled:
break
self._apply_new_focus(col[new_focus])
def _apply_new_focus(self, new_focus):
if new_focus != self._focus:
self._widgets[self._focus].focused = False
self._focus = new_focus
self._widgets[self._focus].focused = True
self._win.refresh()
def _get_column_list(self, this_id):
if this_id in self._left_column:
return self._left_column.index(this_id), self._left_column
elif this_id in self._middle_column:
return self._middle_column.index(this_id), self._middle_column
elif this_id in self._right_column:
return self._right_column.index(this_id), self._right_column
class RadioBrowserData(object):
''' Read search parameters for radio.browser.info service
parameters are:
tags, countries(and states), codecs, languages
'''
_data = {}
_connection_error = False
_lock = threading.Lock()
_stop_thread = False
_timeout = 3
data_thread = None
def __init__(self, url, timeout=3):
self._url = url
self._timeout = timeout
def start(self, force_update=False):
''' Start data acquisition thread '''
self.data_thread = threading.Thread(
target=self._get_all_data_thread,
args=(
self._lock, force_update, lambda: self._stop_thread,
self._update_data
)
)
self.data_thread.start()
def stop(self):
''' Stop (cancel) data acquisition thread '''
self._stop_thread = True
@property
def lock(self):
''' Return thread lock (read only)'''
return self._lock
@lock.setter
def lock(self, val):
raise ValueError('property is read only')
@property
def terminated(self):
''' Return True if thread is not alive (read only)
which means that data has been retrieved'''
if self.data_thread.is_alive():
return False
return True
@terminated.setter
def terminated(self, val):
raise ValueError('property is read only')
@property
def connection_error(self):
self._lock.acquire()
ret = self._connection_error
self._lock.release()
return ret
@connection_error.setter
def connection_error(self, val):
raise ValueError('property is read only')
@property
def tags(self):
self._lock.acquire()
ret = self._data['tags']
self._lock.release()
return ret
@tags.setter
def tags(self, val):
raise ValueError('property is read only')
@property
def codecs(self):
self._lock.acquire()
if 'codecs' in self._data:
ret = self._data['codecs']
else:
ret = {}
self._lock.release()
return ret
@codecs.setter
def codecs(self, val):
raise ValueError('property is read only')
@property
def countries(self):
self._lock.acquire()
ret = self._data['countries']
self._lock.release()
return ret
@countries.setter
def countries(self, val):
raise ValueError('property is read only')
@property
def languages(self):
self._lock.acquire()
ret = self._data['languages']
self._lock.release()
return ret
@languages.setter
def languages(self, val):
raise ValueError('property is read only')
def reset_all_data(self):
self._data = {}
self.start()
def _update_data(self, data, connection_error):
self._connection_error = connection_error
self._data = data
def _get_all_data_thread(self, lock, force_update, stop, callback): # noqa
def get_data(data):
ret = {}
json_data = []
connection_error, json_data = get_data_dict(data)
if connection_error:
return True, {}
if json_data:
for a_tag in json_data:
ret[a_tag['name']] = a_tag['stationcount']
return False, ret
def get_countries(stop):
ret = {}
connection_error, json_countrycodes = get_data_dict('countrycodes')
if connection_error:
return True, {}
from countries import countries
st = 'stationcount'
for n in json_countrycodes:
if n['name'] in countries.keys():
ret[countries[n['name']]] = {}
ret[countries[n['name']]]['code'] = n['name']
ret[countries[n['name']]]['stationcount'] = n[st]
ret[countries[n['name']]]['states'] = {}
connection_error, json_states = get_data_dict('states')
if connection_error:
return True, {}
for n in json_states:
if n['country'] in ret.keys():
ret[n['country']]['states'][n['name']] = n['stationcount']
return False, ret
def get_data_dict(data):
url = 'http://' + self._url + '/json/' + data
jdata = {'hidebroken': 'true'}
headers = {'user-agent': 'PyRadio/dev',
'encoding': 'application/json'}
if self._pyradio_info:
headers['user-agent'] = self._pyradio_info.replace(' ', '/')
try:
r = requests.get(url, headers=headers, json=jdata, timeout=self._timeout)
r.raise_for_status()
return False, json.loads(r.text)
# if r.status_code == 200:
# return False, json.loads(r.text)
# else:
# return True, []
except requests.exceptions.RequestException as e:
if logger.isEnabledFor(logger.ERROR):
logger.error(e)
return True, []
my_data = {}
data_items = ['tags', 'countries', 'codecs', 'languages']
for an_item in data_items:
if an_item == 'countries':
ret, my_data['countries'] = get_countries(stop)
else:
ret, my_data[an_item] = get_data(an_item)
if stop():
if logger.isEnabledFor(logging.DEBUG):
logger.info('Asked to stop after working on "{}"...'.format(an_item))
self._terminated = True
return
lock.acquire()
callback(my_data, ret)
lock.release()
class RadioBrowserDns(object):
''' Preforms query the DNS SRV record of
_api._tcp.radio-browser.info which
gives the list of server names directly
without reverse dns lookups '''
_urls = None
_countries = None
def __init__(self):
pass
@property
def connected(self):
return self._urls
@property
def server_urls(self):
''' Returns server urls in a tuple '''
if self._urls is None:
self._get_urls()
return tuple(self._urls) if self._urls is not None else None
@server_urls.setter
def server_urls(self, val):
return
@property
def server_countries(self):
''' Returns server countries in a tuple '''
if self._urls is None:
self._get_urls()
self._get_countries()
return self._countries
@server_countries.setter
def server_countries(self, value):
return
def get_server_names_and_urls(self):
if self._urls is None:
self._get_urls()
if self._countries is None:
self._get_countries()
zipped = list(zip(self._countries, self._urls))
self._names_and_urls = []
for n in zipped:
self._names_and_urls.append(n[1] + ' (' + n[0] + ')')
return self._names_and_urls
def _get_urls(self):
# self._urls = None
# return
self._urls = []
result = None
try:
result = resolver.query('_api._tcp.radio-browser.info', 'SRV')
except:
self._urls = None
if result:
for n in result:
self._urls.append(str(n).split(' ')[-1][:-1])
else:
self._urls = None
def _get_countries(self):
self._countries = []
for n in self._urls:
self._countries.append(country_from_server(n))
logger.error('DE countries = {}'.format(self._countries))
def give_me_a_server_url(self):
''' Returns a random server '''
if self._urls is None:
self._get_urls()
if self._urls:
ping_response = [-2] * len(self._urls)
start_num = num = random.randint(0, len(self._urls) - 1)
while ping_response[num] == -2:
if logger.isEnabledFor(logging.INFO):
logger.info('pinging random server: ' + self._urls[num])
ping_response[num] = ping(self._urls[num], count=1, timeout_in_seconds=1)
if ping_response[num] == 1:
''' ping was ok '''
if logger.isEnabledFor(logging.INFO):
logger.info('ping was successful!')
break
if logger.isEnabledFor(logging.INFO):
logger.info('ping was unsuccessful!')
num += 1
if num == len(self._urls):
num = 0
if num == start_num:
return None
return self._urls[num]
else:
return None
def servers(self):
''' server urls as generator '''
if self._urls is None:
self._get_urls()
for a_url in self._urls:
yield a_url
class RadioBrowserSort(object):
TITLE = ' Sort by '
items = collections.OrderedDict({
'Name': 'name',
'Votes': 'votes',
'Clicks': 'clickcount',
'Bitrate': 'bitrate',
'Codec': 'codec',
'Country': 'country',
'State': 'state',
'Language': 'language',
'Tag': 'tags'
})
_too_small = False
def __init__(self, parent, search_by=None):
self._parent = parent
self.active = self.selection = 0
if search_by:
if search_by in self.items.values():
self.active = self.selection = self._value_to_index(search_by)
self.maxY = len(self.items) + 2
self.maxX = max(len(x) for x in self.items.keys()) + 4
if len(self.TITLE) + 4 > self.maxX:
self.maxX = len(self.TITLE) + 4
self._win = None
if search_by:
self.set_active_by_value(search_by)
def _value_to_index(self, val):
for i, n in enumerate(self.items.values()):
if val == n:
return i
return -1
def set_parent(self, parent):
self._parent = parent
self.show()
def set_active_by_value(self, a_string, set_selection=True):
for i, n in enumerate(self.items.values()):
if a_string == n:
if set_selection:
self.active = self.selection = i
else:
self.active = i
return
if set_selection:
self.active = self.selection = 0
else:
self.active = 0
def show(self, parent=None):
self._too_small = False
pY, pX = self._parent.getmaxyx()
Y, X = self._parent.getbegyx()
if self.maxY > pY or self.maxX > pX -2:
self._too_small = True
msg = 'Window too small to display content!'
if pX < len(msg) + 2:
msg = 'Window too small!'
self._win = curses.newwin(
3, len(msg) + 2,
Y + int((pY - 3) / 2),
int((pX - len(msg)) / 2))
self._win.bkgdset(' ', curses.color_pair(3))
self._win.box()
try:
self._win.addstr( 1, 1, msg,
curses.color_pair(5))
except:
pass
self._win.refresh()
return
self._win = curses.newwin(
self.maxY, self.maxX,
Y + int((pY - self.maxY) / 2),
int((pX - self.maxX) / 2)
)
self._win.bkgdset(' ', curses.color_pair(3))
# self._win.erase()
self._win.box()
self._win.addstr(0, 1,
self.TITLE,
curses.color_pair(4))
self._refresh()
def _refresh(self):
for i, n in enumerate(self.items.keys()):
col = 5
if i == self.active == self.selection:
col = 9
elif i == self.selection:
col = 6
elif i == self.active:
col = 4
self._win.addstr(i + 1, 1,
' {}'.format(n.ljust(self.maxX - 3)),
curses.color_pair(col))
self._win.refresh()
def keypress(self, char):
''' RadioBrowserSort keypress
Returns:
-1: Cancel
0: Done, result is in ....
1: Continue
'''
if self._too_small:
return 1
if char in (
curses.KEY_EXIT, ord('q'), 27,
ord('h'), curses.KEY_LEFT
):
return -1
elif char in (
ord('l'), ord(' '), ord('\n'), ord('\r'),
curses.KEY_RIGHT, curses.KEY_ENTER
):
for i, n in enumerate(self.items.keys()):
if i == self.selection:
self.search_by = self.items[n]
self.active = i
break
return 0
elif char in (ord('g'), curses.KEY_HOME):
self.selection = 0
self._refresh()
elif char in (ord('G'), curses.KEY_END):
self.selection = len(self.items) - 1
self._refresh()
elif char in (curses.KEY_PPAGE, ):
if self.selection == 0:
self.selection = len(self.items) - 1
else:
self.selection -= 5
if self.selection < 0:
self.selection = 0
self._refresh()
elif char in (curses.KEY_NPAGE, ):
if self.selection == len(self.items) - 1:
self.selection = 0
else:
self.selection += 5
if self.selection >= len(self.items):
self.selection = len(self.items) - 1
self._refresh()
elif char in (ord('k'), curses.KEY_UP):
self.selection -= 1
if self.selection < 0:
self.selection = len(self.items) - 1
self._refresh()
elif char in (ord('j'), curses.KEY_DOWN):
self.selection += 1
if self.selection == len(self.items):
self.selection = 0
self._refresh()
return 1
class RadioBrowserServersSelect(object):
''' Server selection Window
Uses RadioBrowserServers
'''
TITLE = ' Server Selection '
return_value = 1
def __init__(self,
parent,
servers,
current_server,
ping_count,
ping_timeout,
Y=None,
X=None,
show_random=False,
return_function=None):
''' Server selection Window
if Y and X are valid (not None)
keypress just returns 0
If Y is None
keypress returns 0 and self.server is set
'''
self._parent = parent
self.items = list(servers)
self.server = current_server
self.ping_count = ping_count
self.ping_timeout = ping_timeout
self._show_random = self.from_config = show_random
self._return_function = return_function
self.servers = RadioBrowserServers(
parent, servers, current_server, show_random
)
self.maxY = self.servers.maxY + 2
self.maxX = self.servers.maxX + 2
self._Y = Y
self._X = X
logger.error('DE self._Y ={0}, self._X = {1}'.format(self._Y, self._X))
def show(self, parent=None):
if parent:
self._parent = parent
self.servers._parent = parent
self._too_small = False
pY, pX = self._parent.getmaxyx()
Y, X = self._parent.getbegyx()
if self.maxY > pY or self.maxX > pX -2:
self._too_small = True
msg = 'Window too small to display content!'
if pX < len(msg) + 2:
msg = 'Window too small!'
self._win = curses.newwin(
3, len(msg) + 2,
Y + int((pY - 3) / 2),
int((pX - len(msg)) / 2))
self._win.bkgdset(' ', curses.color_pair(3))
self._win.box()
try:
self._win.addstr( 1, 1, msg,
curses.color_pair(5))
except:
pass
self._win.refresh()
return
if self._Y is None:
self._win = curses.newwin(
self.maxY, self.maxX,
Y + int((pY - self.maxY) / 2),
int((pX - self.maxX) / 2)
)
else:
self._win = curses.newwin(
self.maxY, self.maxX,
self._Y, self._X
)
self._win.bkgdset(' ', curses.color_pair(3))
# self._win.erase()
self._box_and_title()
self.servers._parent = self._win
self.servers.show()
def _box_and_title(self):
self._win.box()
self._win.addstr(
0, int((self.maxX - len(self.TITLE)) / 2),
self.TITLE,
curses.color_pair(4)
)
self._win.refresh()
def set_parent(self, parent):
self._parent = parent
self.servers._parent = parent
def keypress(self, char):
''' RadioBrowserServersSelect keypress
Returns:
-1: Cancel
0: Done, result is in ....
1: Continue
'''
self.return_value = self.servers.keypress(char)
if self.return_value == 2:
self.return_value = 1
if self.return_value == 0:
if self.servers.server:
if self.ping_count > 0 and self.ping_timeout > 0:
msg = ' Checking Host '
self._win.addstr(
self.maxY - 1, int((self.maxX - len(msg)) / 2),
msg,
curses.color_pair(3)
)
self._win.refresh()
if ping(self.servers.server,
count=self.ping_count,
timeout_in_seconds=self.ping_timeout) != 1:
''' ping was not ok '''
msg = ' Host is unreachable '
self._win.addstr(
self.maxY - 1, int((self.maxX - len(msg)) / 2),
msg,
curses.color_pair(3)
)
self._win.refresh()
th = threading.Timer(1, self._box_and_title)
th.start()
th.join()
self.show()
return 1
if self._Y is None:
self.server = self.servers.server
if self._return_function:
if logger.isEnabledFor(logging.DEBUG):
logger.debug('selected server: {}'.format(self.servers.server))
self._return_function(self.servers.server)
return 2
return self.return_value
class RadioBrowserServers(object):
''' Display RadioBrowser server
This is supposed to be pluged into
another widget
'''
_too_small = False
from_config = False
def __init__(self, parent, servers, current_server, show_random=False):
self._parent = parent
self.items = list(servers)
self.server = current_server
self.from_config = show_random
s_max = 0
for i, n in enumerate(self.items):
if self.server == n:
self.selection = self.active = i
self.items[i] = ' ' + country_from_server(n) + ' ({}) '.format(n)
if len(self.items[i]) > s_max:
s_max = len(self.items[i])
self.items.sort()
for i, n in enumerate(self.items):
if len(self.items[i]) < s_max:
self.items[i] = self.items[i].replace('(', ' ' * (s_max - len(self.items[i])) + '(')
self.maxY = len(self.items)
self.maxX = len(self.items[0])
if show_random:
self.items.reverse()
self.items.append(' Random' + (s_max - 7) * ' ')
self.items.reverse()
self.maxY = len(self.items)
logger.error('DE items = {}'.format(self.items))
''' get selection and active server id '''
if show_random and (
self.server == '' or 'Random' in self.server
):
self.active = self.selection = 0
else:
for i, n in enumerate(self.items):
if self.server in n:
self.active = self.selection = i
break
def show(self, parent=None):
self._too_small = False
pY, pX = self._parent.getmaxyx()
Y, X = self._parent.getbegyx()
if self.maxY > pY or self.maxX > pX -2:
''' display nothing
let the parent do whatever
'''
self._too_small = True
else:
self._win = curses.newwin(
self.maxY, self.maxX,
Y + 1, X + 1
)
for i, n in enumerate(self.items):
col = 5
if i == self.active == self.selection:
col = 9
elif i == self.selection:
col = 6
elif i == self.active:
col = 4
try:
self._win.addstr(i, 0 , n, curses.color_pair(col))
except:
pass
self._win.refresh()
def keypress(self, char):
''' RadioBrowserServers keypress
Returns:
-1: Cancel
0: Done, result is in ....
1: Continue
2: Show help
'''
if self._too_small:
return 1
if char in (
curses.KEY_EXIT, ord('q'), 27,
ord('h'), curses.KEY_LEFT
):
return -1
elif char in (
ord('l'), ord(' '), ord('\n'), ord('\r'),
curses.KEY_RIGHT, curses.KEY_ENTER
):
for i, n in enumerate(self.items):
if i == self.selection:
if 'Random' in n:
self.server = ''
else:
self.server = n.split('(')[1].replace(') ', '')
self.active = i
break
return 0
elif char in (ord('?'), ):
return 2
elif char in (ord('g'), curses.KEY_HOME):
self.selection = 0
self.show()
elif char in (ord('G'), curses.KEY_END):
self.selection = len(self.items) - 1
self.show()
elif char in (curses.KEY_PPAGE, ):
if self.selection == 0:
self.selection = len(self.items) - 1
else:
self.selection -= 5
if self.selection < 0:
self.selection = 0
self.show()
elif char in (curses.KEY_NPAGE, ):
if self.selection == len(self.items) - 1:
self.selection = 0
else:
self.selection += 5
if self.selection >= len(self.items):
self.selection = len(self.items) - 1
self.show()
elif char in (ord('k'), curses.KEY_UP):
self.selection -= 1
if self.selection < 0:
self.selection = len(self.items) - 1
self.show()
elif char in (ord('j'), curses.KEY_DOWN):
self.selection += 1
if self.selection == len(self.items):
self.selection = 0
self.show()
return 1
def probeBrowsers(a_browser_url):
base_url = a_browser_url.split('/')[2]
if not base_url:
base_url = a_browser_url
implementedBrowsers = PyRadioStationsBrowser.__subclasses__()
if logger.isEnabledFor(logging.INFO):
logger.info('Implemented browsers: {}'.format(implementedBrowsers))
for a_browser in implementedBrowsers:
if a_browser.BASE_URL == base_url:
if logger.isEnabledFor(logging.INFO):
logger.info('Supported browser: {}'.format(a_browser))
return a_browser
if logger.isEnabledFor(logging.INFO):
logger.info('No supported browser found for: ' + a_browser_url)
return None
|
the-stack_0_24039 | import re
import yaml
from Tests.test_utils import print_error, run_command
class ScriptValidator(object):
"""ScriptValidator is designed to validate the correctness of the file structure we enter to content repo. And
also try to catch possible Backward compatibility breaks due to the preformed changes.
Attributes:
_is_valid (bool): the attribute which saves the valid/in-valid status of the current file.
file_path (str): the path to the file we are examining at the moment.
change_string (string): the change string for the examined script.
yaml_data (dict): the data of the script in dictionary format.
"""
def __init__(self, file_path, check_git=True):
self._is_valid = True
self.file_path = file_path
if check_git:
self.change_string = run_command("git diff origin/master {0}".format(self.file_path))
with open(file_path, 'r') as file_data:
self.yaml_data = yaml.safe_load(file_data)
def is_backward_compatible(self):
"""Check if the script is backward compatible."""
self.is_arg_changed()
self.is_context_path_changed()
self.is_docker_image_changed()
self.is_there_duplicates_args()
return self._is_valid
def is_there_duplicates_args(self):
"""Check if there are duplicated arguments."""
args = self.yaml_data.get('args', [])
existing_args = []
for arg in args:
arg_name = arg['name']
if arg_name not in existing_args:
existing_args.append(arg_name)
else:
self._is_valid = False
return True
return False
def is_arg_changed(self):
"""Check if the argument has been changed."""
deleted_args = re.findall("-([ ]+)?- name: (.*)", self.change_string)
added_args = re.findall("\+([ ]+)?- name: (.*)", self.change_string)
deleted_args = [arg[1] for arg in deleted_args]
added_args = [arg[1] for arg in added_args]
for added_arg in added_args:
if added_arg in deleted_args:
deleted_args.remove(added_arg)
if deleted_args:
print_error("Possible backwards compatibility break, You've changed the name of a command or its arg in"
" the file {0} please undo, the line was:{1}".format(self.file_path,
"\n".join(deleted_args)))
self._is_valid = False
return True
return False
def is_context_path_changed(self):
"""Check if the context path as been changed."""
deleted_args = re.findall("-([ ]+)?- contextPath: (.*)", self.change_string)
added_args = re.findall("\+([ ]+)?- contextPath: (.*)", self.change_string)
deleted_args = [arg[1] for arg in deleted_args]
added_args = [arg[1] for arg in added_args]
for added_arg in added_args:
if added_arg in deleted_args:
deleted_args.remove(added_arg)
if deleted_args:
print_error("Possible backwards compatibility break, You've changed the context in the file {0} please "
"undo, the line was:{1}".format(self.file_path, "\n".join(deleted_args)))
self._is_valid = False
return True
return False
def is_docker_image_changed(self):
"""Check if the docker image as been changed."""
is_docker_added = re.search("\+([ ]+)?dockerimage: .*", self.change_string)
is_docker_deleted = re.search("-([ ]+)?dockerimage: .*", self.change_string)
if is_docker_added or is_docker_deleted:
print_error("Possible backwards compatibility break, You've changed the docker for the file {}"
" this is not allowed.".format(self.file_path))
self._is_valid = False
return True
return False
|
the-stack_0_24040 | import asyncio
import logging
from aiogram import Bot, types
from aiogram.dispatcher import Dispatcher
from aiogram.utils.executor import start_webhook
API_TOKEN = 'BOT TOKEN HERE'
# webhook settings
WEBHOOK_HOST = 'https://your.domain'
WEBHOOK_PATH = '/path/to/api'
WEBHOOK_URL = f"{WEBHOOK_HOST}{WEBHOOK_PATH}"
# webserver settings
WEBAPP_HOST = 'localhost' # or ip
WEBAPP_PORT = 3001
logging.basicConfig(level=logging.INFO)
loop = asyncio.get_event_loop()
bot = Bot(token=API_TOKEN, loop=loop)
dp = Dispatcher(bot)
@dp.message_handler()
async def echo(message: types.Message):
await bot.send_message(message.chat.id, message.text)
async def on_startup(dp):
await bot.set_webhook(WEBHOOK_URL)
# insert code here to run it after start
#
async def on_shutdown(dp):
# insert code here to run it before shutdown
#
await bot.close()
if __name__ == '__main__':
start_webhook(dispatcher=dp, webhook_path=WEBHOOK_PATH, on_startup=on_startup, on_shutdown=on_shutdown,
skip_updates=True, host=WEBAPP_HOST, port=WEBAPP_PORT)
|
the-stack_0_24041 | # Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Fixes builds in the datastore.
This code changes each time something needs to be migrated once.
"""
from google.appengine.ext import ndb
from components import utils
import bulkproc
import logging
import model
PROC_NAME = 'fix_builds'
bulkproc.register(
PROC_NAME,
lambda keys, _payload: _fix_builds(keys),
keys_only=True,
)
def launch(): # pragma: no cover
bulkproc.start(PROC_NAME)
def _fix_builds(build_keys): # pragma: no cover
res_iter = utils.async_apply(build_keys, _fix_build_async, unordered=True)
# async_apply returns an iterator. We need to traverse it, otherwise nothing
# will happen.
for _ in res_iter:
pass
@ndb.transactional_tasklet
def _fix_build_async(build_key): # pragma: no cover
out_props_key = model.BuildOutputProperties.key_for(build_key)
build, out_props = yield ndb.get_multi_async([build_key, out_props_key])
if not build or not build.is_ended:
return
to_put = []
if not out_props and build.proto.output.HasField('properties'):
to_put.append(
model.BuildOutputProperties(
key=out_props_key,
properties=build.proto.output.properties.SerializeToString(),
)
)
if to_put:
logging.info('fixing %s' % build.key.id())
yield ndb.put_multi_async(to_put)
|
the-stack_0_24043 | from typing import List
from rabin.padding.padding_strategy import PaddingStrategy
class CopyBitsStrategy(PaddingStrategy):
"""
Strategy when "padding_bits" from the end of the number
are appended to the end of the number.
The issue with this is, that it works only with the numbers that
are at least "padding_bits" long.
The "padding_bits" parameter also specifies the robustness
of the algorithm in a way, that more bits used means that the padding
is more robust and the probability of wrong choice of the result is lower.
Probability of choosing wrong result is 1/2^(padding_bits + 1).
The benchmarks show that "padding_bits" should be at least 16.
The original idea taken from the duckbill360 -
https://github.com/duckbill360/Rabin-Public-Key-Cryptosystem/blob/master/Rabin.py
"""
def __init__(self, padding_bits: int = 16):
self._padding_bits = padding_bits
def pad_plaintext(self, plaintext: int) -> int:
# check if the binary string has enough bites
if plaintext.bit_length() < self._padding_bits:
raise ValueError(f'It is not possible to pad numbers '
f'that have less then {self._padding_bits}, '
f'but {plaintext.bit_length()} was given!')
# convert to a binary string (b'0101')
binary_string = bin(plaintext)
# pad the last _padding_bits bits to the end
output = binary_string + binary_string[-self._padding_bits:]
# convert back to integer
return int(output, 2)
def extract_plaintext(self, candidates: List[int]) -> int:
# select candidate
padded_plaintext = self._choose_candidate(candidates)
# convert to a binary string (b'0101')
binary_string = bin(padded_plaintext)
# remove padding
binary_string = binary_string[:-self._padding_bits]
# convert back to integer
return int(binary_string, 2)
def _choose_candidate(self, candidates: List[int]) -> int:
matching_candidates = []
for i in candidates:
binary = bin(i)
# take the last _padding_bits
append = binary[-self._padding_bits:]
# remove the last _padding_bits
binary = binary[:-self._padding_bits]
if append == binary[-self._padding_bits:]:
matching_candidates.append(i)
if len(matching_candidates) != 1:
raise ValueError('It was not possible to determine candidate! '
f'There were {len(matching_candidates)} plaintext candidates!')
return matching_candidates[0]
|
the-stack_0_24044 | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao ([email protected])
# Modified by Dequan Wang and Xingyi Zhou
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import math
import logging
import torch
import torch.nn as nn
# from dcn_v2 import DCN
# from .DCN import ModulatedDeformConvWithOff as DCN
from .DCN import Fake_DCN as DCN
import torch.utils.model_zoo as model_zoo
BN_MOMENTUM = 0.1
logger = logging.getLogger(__name__)
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1,
bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion,
momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def fill_up_weights(up):
w = up.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
def fill_fc_weights(layers):
for m in layers.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, std=0.001)
# torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
# torch.nn.init.xavier_normal_(m.weight.data)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class PoseResNet(nn.Module):
def __init__(self, block, layers, heads, head_conv):
self.inplanes = 64
self.heads = heads
self.deconv_with_bias = False
super(PoseResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
# used for deconv layers
self.deconv_layer1 = self._make_deconv_layer(256, 4)
self.deconv_layer2 = self._make_deconv_layer(128, 4)
self.deconv_layer3 = self._make_deconv_layer(64, 4)
self.smooth_layer1 = DeformConv(256, 256)
self.smooth_layer2 = DeformConv(128, 128)
self.smooth_layer3 = DeformConv(64, 64)
self.project_layer1 = DeformConv(256 * block.expansion, 256)
self.project_layer2 = DeformConv(128 * block.expansion, 128)
self.project_layer3 = DeformConv(64 * block.expansion, 64)
for head in self.heads:
classes = self.heads[head]
if head_conv > 0:
fc = nn.Sequential(
nn.Conv2d(64, head_conv,
kernel_size=3, padding=1, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(head_conv, classes,
kernel_size=1, stride=1,
padding=0, bias=True))
if 'hm' in head:
fc[-1].bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
else:
fc = nn.Conv2d(64, classes,
kernel_size=1, stride=1,
padding=0, bias=True)
if 'hm' in head:
fc.bias.data.fill_(-2.19)
else:
fill_fc_weights(fc)
self.__setattr__(head, fc)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _get_deconv_cfg(self, deconv_kernel):
if deconv_kernel == 4:
padding = 1
output_padding = 0
elif deconv_kernel == 3:
padding = 1
output_padding = 1
elif deconv_kernel == 2:
padding = 0
output_padding = 0
return deconv_kernel, padding, output_padding
def _make_deconv_layer(self, num_filters, num_kernels):
layers = []
kernel, padding, output_padding = \
self._get_deconv_cfg(num_kernels)
planes = num_filters
fc = DCN(self.inplanes, planes,
kernel_size=(3,3), stride=1,
padding=1, dilation=1, deformable_groups=1)
# fc = nn.Conv2d(self.inplanes, planes,
# kernel_size=3, stride=1,
# padding=1, dilation=1, bias=False)
# fill_fc_weights(fc)
up = nn.ConvTranspose2d(
in_channels=planes,
out_channels=planes,
kernel_size=kernel,
stride=2,
padding=padding,
output_padding=output_padding,
bias=self.deconv_with_bias)
fill_up_weights(up)
layers.append(fc)
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
layers.append(up)
layers.append(nn.BatchNorm2d(planes, momentum=BN_MOMENTUM))
layers.append(nn.ReLU(inplace=True))
self.inplanes = planes
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
c1 = self.layer1(x)
c2 = self.layer2(c1)
c3 = self.layer3(c2)
c4 = self.layer4(c3)
p4 = c4
p3 = self.smooth_layer1(self.deconv_layer1(p4) + self.project_layer1(c3))
p2 = self.smooth_layer2(self.deconv_layer2(p3) + self.project_layer2(c2))
p1 = self.smooth_layer3(self.deconv_layer3(p2) + self.project_layer3(c1))
ret = {}
for head in self.heads:
ret[head] = self.__getattr__(head)(p1)
return [ret]
def init_weights(self, num_layers):
if 1:
url = model_urls['resnet{}'.format(num_layers)]
pretrained_state_dict = model_zoo.load_url(url)
print('=> loading pretrained model {}'.format(url))
self.load_state_dict(pretrained_state_dict, strict=False)
print('=> init deconv weights from normal distribution')
class DeformConv(nn.Module):
def __init__(self, chi, cho):
super(DeformConv, self).__init__()
self.actf = nn.Sequential(
nn.BatchNorm2d(cho, momentum=BN_MOMENTUM),
nn.ReLU(inplace=True)
)
self.conv = DCN(chi, cho, kernel_size=(3, 3), stride=1, padding=1, dilation=1, deformable_groups=1)
for name, m in self.actf.named_modules():
if isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
x = self.conv(x)
x = self.actf(x)
return x
resnet_spec = {18: (BasicBlock, [2, 2, 2, 2]),
34: (BasicBlock, [3, 4, 6, 3]),
50: (Bottleneck, [3, 4, 6, 3]),
101: (Bottleneck, [3, 4, 23, 3]),
152: (Bottleneck, [3, 8, 36, 3])}
def get_pose_net(num_layers, heads, head_conv=256, id_head='base'):
block_class, layers = resnet_spec[num_layers]
model = PoseResNet(block_class, layers, heads, head_conv=head_conv, id_head=id_head)
model.init_weights(num_layers)
return model
|
the-stack_0_24045 | import sys
from itertools import product
import numpy as np
from sdpsolve.sdp.sdp import SDP
from sdpsolve.utils.matreshape import vec2block
from sdpsolve.solvers.bpsdp import solve_bpsdp
from representability.fermions.constraints.antisymm_sz_constraints import sz_adapted_linear_constraints, d2_e2_mapping
from representability.fermions.constraints.spin_orbital_constraints import spin_orbital_linear_constraints, \
d2_e2_mapping as d2_e2_mapping_spinorbital
from representability.fermions.basis_utils import geminal_spin_basis
from representability.fermions.hamiltonian import spin_orbital_marginal_norm_min
from representability.tensor import Tensor
from representability.multitensor import MultiTensor
def sdp_nrep_sz_reconstruction(corrupted_tpdm_aa, corrupted_tpdm_bb,
corrupted_tpdm_ab, num_alpha, num_beta,
disp=False, inner_iter_type='EXACT', epsilon=1.0E-8,
max_iter=5000):
if np.ndim(corrupted_tpdm_aa) != 2:
raise TypeError("corrupted_tpdm_aa must be a 2-tensor")
if np.ndim(corrupted_tpdm_bb) != 2:
raise TypeError("corrupted_tpdm_bb must be a 2-tensor")
if np.ndim(corrupted_tpdm_ab) != 2:
raise TypeError("corrupted_tpdm_ab must be a 2-tensor")
if num_alpha != num_beta:
raise ValueError("right now we are not supporting differing spin numbers")
spatial_basis_rank = int(np.sqrt(corrupted_tpdm_ab.shape[0]))
# get basis bijection
bij_bas_aa, bij_bas_ab = geminal_spin_basis(spatial_basis_rank)
# build basis look up table
bas_aa = {}
bas_ab = {}
cnt_aa = 0
cnt_ab = 0
# iterate over spatial orbital indices
for p, q in product(range(spatial_basis_rank), repeat=2):
if q > p:
bas_aa[(p, q)] = cnt_aa
cnt_aa += 1
bas_ab[(p, q)] = cnt_ab
cnt_ab += 1
dual_basis = sz_adapted_linear_constraints(spatial_basis_rank, num_alpha, num_beta,
['ck', 'cckk', 'kkcc', 'ckck'])
dual_basis += d2_e2_mapping(spatial_basis_rank, bas_aa, bas_ab,
corrupted_tpdm_aa, corrupted_tpdm_bb, corrupted_tpdm_ab)
c_cckk_me_aa = spin_orbital_marginal_norm_min(corrupted_tpdm_aa.shape[0], tensor_name='cckk_me_aa')
c_cckk_me_bb = spin_orbital_marginal_norm_min(corrupted_tpdm_bb.shape[0], tensor_name='cckk_me_bb')
c_cckk_me_ab = spin_orbital_marginal_norm_min(corrupted_tpdm_ab.shape[0], tensor_name='cckk_me_ab')
copdm_a = Tensor(np.zeros((spatial_basis_rank, spatial_basis_rank)), name='ck_a')
copdm_b = Tensor(np.zeros((spatial_basis_rank, spatial_basis_rank)), name='ck_b')
coqdm_a = Tensor(np.zeros((spatial_basis_rank, spatial_basis_rank)), name='kc_a')
coqdm_b = Tensor(np.zeros((spatial_basis_rank, spatial_basis_rank)), name='kc_b')
ctpdm_aa = Tensor(np.zeros_like(corrupted_tpdm_aa), name='cckk_aa', basis=bij_bas_aa)
ctpdm_bb = Tensor(np.zeros_like(corrupted_tpdm_bb), name='cckk_bb', basis=bij_bas_aa)
ctpdm_ab = Tensor(np.zeros_like(corrupted_tpdm_ab), name='cckk_ab', basis=bij_bas_ab)
ctqdm_aa = Tensor(np.zeros_like(corrupted_tpdm_aa), name='kkcc_aa', basis=bij_bas_aa)
ctqdm_bb = Tensor(np.zeros_like(corrupted_tpdm_bb), name='kkcc_bb', basis=bij_bas_aa)
ctqdm_ab = Tensor(np.zeros_like(corrupted_tpdm_ab), name='kkcc_ab', basis=bij_bas_ab)
cphdm_ab = Tensor(np.zeros((spatial_basis_rank, spatial_basis_rank, spatial_basis_rank, spatial_basis_rank)), name='ckck_ab')
cphdm_ba = Tensor(np.zeros((spatial_basis_rank, spatial_basis_rank, spatial_basis_rank, spatial_basis_rank)), name='ckck_ba')
cphdm_aabb = Tensor(np.zeros((2 * spatial_basis_rank**2, 2 * spatial_basis_rank**2)), name='ckck_aabb')
ctensor = MultiTensor([copdm_a, copdm_b, coqdm_a, coqdm_b, ctpdm_aa, ctpdm_bb, ctpdm_ab, ctqdm_aa, ctqdm_bb,
ctqdm_ab, cphdm_ab, cphdm_ba, cphdm_aabb, c_cckk_me_aa, c_cckk_me_bb, c_cckk_me_ab])
ctensor.dual_basis = dual_basis
A, _, b = ctensor.synthesize_dual_basis()
nc, nv = A.shape
nnz = A.nnz
sdp = SDP()
sdp.nc = nc
sdp.nv = nv
sdp.nnz = nnz
sdp.blockstruct = list(map(lambda x: int(np.sqrt(x.size)), ctensor.tensors))
sdp.nb = len(sdp.blockstruct)
sdp.Amat = A.real
sdp.bvec = b.todense().real
sdp.cvec = ctensor.vectorize_tensors().real
sdp.Initialize()
sdp.epsilon = float(epsilon)
sdp.epsilon_inner = float(epsilon)
sdp.inner_solve = inner_iter_type
sdp.disp = disp
sdp.iter_max = max_iter
solve_bpsdp(sdp)
rdms_solution = vec2block(sdp.blockstruct, sdp.primal)
return rdms_solution[4], rdms_solution[5], rdms_solution[6]
def sdp_nrep_reconstruction(corrupted_tpdm, num_alpha, num_beta):
"""
Reconstruct a 2-RDm that looks like the input corrupted tpdm
This reconstruction scheme uses the spin-orbital reconstruction code which is not the optimal size SDP
:param corrupted_tpdm: measured 2-RDM from the device
:param num_alpha: number of alpha spin electrons
:param num_beta: number of beta spin electrons
:return: purified 2-RDM
"""
if np.ndim(corrupted_tpdm) != 4:
raise TypeError("corrupted_tpdm must be a 4-tensor")
if num_alpha != num_beta:
raise ValueError("right now we are not supporting differing spin numbers")
sp_dim = corrupted_tpdm.shape[0] # single-particle rank
opdm = np.zeros((sp_dim, sp_dim), dtype=int)
oqdm = np.zeros((sp_dim, sp_dim), dtype=int)
tpdm = np.zeros_like(corrupted_tpdm)
tqdm = np.zeros_like(corrupted_tpdm)
tgdm = np.zeros_like(corrupted_tpdm)
opdm = Tensor(tensor=opdm, name='ck')
oqdm = Tensor(tensor=oqdm, name='kc')
tpdm = Tensor(tensor=tpdm, name='cckk')
tqdm = Tensor(tensor=tqdm, name='kkcc')
tgdm = Tensor(tensor=tgdm, name='ckck')
error_matrix = spin_orbital_marginal_norm_min(sp_dim ** 2, tensor_name='cckk_me')
rdms = MultiTensor([opdm, oqdm, tpdm, tqdm, tgdm, error_matrix])
db = spin_orbital_linear_constraints(sp_dim, num_alpha + num_beta, ['ck', 'cckk', 'kkcc', 'ckck'])
db += d2_e2_mapping_spinorbital(sp_dim, corrupted_tpdm)
rdms.dual_basis = db
A, _, c = rdms.synthesize_dual_basis()
nv = A.shape[1]
nc = A.shape[0]
nnz = A.nnz
blocklist = [sp_dim, sp_dim, sp_dim ** 2, sp_dim ** 2, sp_dim ** 2, 2 * sp_dim ** 2]
nb = len(blocklist)
sdp = SDP()
sdp.nc = nc
sdp.nv = nv
sdp.nnz = nnz
sdp.blockstruct = blocklist
sdp.nb = nb
sdp.Amat = A.real
sdp.bvec = c.todense().real
sdp.cvec = rdms.vectorize_tensors().real
sdp.Initialize()
sdp.epsilon = float(1.0E-8)
sdp.inner_solve = "EXACT"
sdp.disp = True
solve_bpsdp(sdp)
solution_rdms = vec2block(blocklist, sdp.primal)
tpdm_reconstructed = np.zeros_like(corrupted_tpdm)
for p, q, r, s in product(range(sp_dim), repeat=4):
tpdm_reconstructed[p, q, r, s] = solution_rdms[2][p * sp_dim + q, r * sp_dim + s]
return tpdm_reconstructed
|
the-stack_0_24047 | """Moderate Ray Tune run (32 trials, 4 actors).
This training run will start 32 Ray Tune trials, each starting 4 actors.
The cluster comprises 32 nodes.
Test owner: krfricke
Acceptance criteria: Should run through and report final results, as well
as the Ray Tune results table. No trials should error. All trials should
run in parallel.
"""
from collections import Counter
import json
import os
import time
import ray
from ray import tune
from xgboost_ray import RayParams
from _train import train_ray
def train_wrapper(config, ray_params):
train_ray(
path="/data/classification.parquet",
num_workers=4,
num_boost_rounds=100,
num_files=64,
regression=False,
use_gpu=False,
ray_params=ray_params,
xgboost_params=config,
)
if __name__ == "__main__":
search_space = {
"eta": tune.loguniform(1e-4, 1e-1),
"subsample": tune.uniform(0.5, 1.0),
"max_depth": tune.randint(1, 9)
}
ray.init(address="auto")
ray_params = RayParams(
elastic_training=False,
max_actor_restarts=2,
num_actors=4,
cpus_per_actor=1,
gpus_per_actor=0)
start = time.time()
analysis = tune.run(
tune.with_parameters(train_wrapper, ray_params=ray_params),
config=search_space,
num_samples=32,
resources_per_trial=ray_params.get_tune_resources())
taken = time.time() - start
result = {
"time_taken": taken,
"trial_states": dict(
Counter([trial.status for trial in analysis.trials]))
}
test_output_json = os.environ.get("TEST_OUTPUT_JSON",
"/tmp/tune_32x4.json")
with open(test_output_json, "wt") as f:
json.dump(result, f)
print("PASSED.")
|
the-stack_0_24051 | # -*- coding: utf-8 -*-
import logging
import unittest
import os
from .cronjobparser import CronJobParser, CronJob
log = logging.getLogger(__name__)
CRONTAB_TEST = "./testdata/crontab"
CRONTAB_TMP = "./testdata/tmpfile"
class TestCrontabParser(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
try:
os.unlink(CRONTAB_TMP)
except (IOError, OSError):
pass
def test_import_from_file(self):
CP = CronJobParser(CRONTAB_TEST)
config_raw = CP.get()
self.assertEqual(len(config_raw), 8)
config = CP.config
self.assertEqual(len(config), 2)
self.assertTrue("assignments" in config)
self.assertTrue("cronjobs" in config)
self.assertEqual(len(config.get("cronjobs")), 6)
self.assertTrue("PATH" in config.get("assignments"))
self.assertTrue("SHELL" in config.get("assignments"))
def test_from_config(self):
CP = CronJobParser(CRONTAB_TEST)
config = CP.config
self.assertEqual(len(config), 2)
self.assertTrue("assignments" in config)
self.assertTrue("cronjobs" in config)
def test_save(self):
CP = CronJobParser(CRONTAB_TEST)
config = CP.config
self.assertEqual(len(config.get("cronjobs")), 6)
cronj = config.get("cronjobs")[0]
self.assertEqual(cronj.minute, "17")
self.assertEqual(cronj.dow, "*")
# now save it to a file
CP.save(outfile=CRONTAB_TMP)
# Read the file again!
CP2 = CronJobParser(CRONTAB_TMP)
config = CP2.config
self.assertEqual(len(config.get("cronjobs")), 6)
cronj = config.get("cronjobs")[0]
self.assertEqual(cronj.minute, "17")
self.assertEqual(cronj.dow, "*")
# update the file and reread the cronjobs
with open(CRONTAB_TMP, 'a') as f:
f.write('1 2 3 4 5 foo bär')
CP2.read()
self.assertEqual(len(CP2.config.get("cronjobs")), 7)
self.assertEqual(CP2.config.get("cronjobs")[6].command, u'bär')
def test_forward_slash(self):
CP = CronJobParser(infile=None)
CP.cronjobs.append(CronJob.from_time('cmd', 'user', ['*/5', '*', '*', '*', '*']))
config = CP.config
self.assertEqual(len(config.get('cronjobs')), 1)
self.assertEqual(config.get('cronjobs')[0].minute, '*/5', config.get('cronjobs')[0])
CP.save(CRONTAB_TMP)
self.assertTrue(os.path.exists(CRONTAB_TMP))
# check that we have correctly written the crontab
with open(CRONTAB_TMP, 'r') as f:
tmp_data = f.read().splitlines()
self.assertEqual(tmp_data[-1], str(config.get('cronjobs')[0]), tmp_data)
# now re-read the written crontab
CP2 = CronJobParser(infile=CRONTAB_TMP)
config = CP2.config
self.assertEqual(config.get('cronjobs')[0].minute, '*/5', config.get('cronjobs')[0])
def test_cronjob_api(self):
CP = CronJobParser(CRONTAB_TEST)
config = CP.config
self.assertEqual(len(config.get("cronjobs")), 6)
backup_job2 = config['cronjobs'][-1]
self.assertEqual(backup_job2.command, "/usr/bin/privacyidea-backup")
self.assertEqual(backup_job2.time, ("1", "10", "1", "*", "*"))
self.assertEqual(backup_job2.get_time_comment(), "monthly")
self.assertEqual(backup_job2.get_time_summary(), "monthly at time 10:01, "
"day of month: 1, month: "
"*, day of week: *")
backup_copy = CronJob.from_time("/usr/bin/privacyidea-backup",
"privacyidea", ("1", "10", "1"))
self.assertEqual(str(backup_job2), str(backup_copy))
cronjob = CronJob.from_time("foo", "user", ())
assert cronjob.time == ("*",) * 5
self.assertEqual(cronjob.get_time_summary(),
"time *:*, day of month: *, month: *, day of week: *")
self.assertRaises(RuntimeError, CronJob.from_time, "foo", "user",
("1", "2", "3", "4", "5", "6"))
rm_cmd = "find /var/log/privacyidea -name privacyidea.log.* " \
"--exec 'rm' '-f' '{}' ';'"
rm_job = CronJob.from_time(rm_cmd, "root", ('1', '2', '3', '4', '5'))
self.assertEqual(rm_job.command, rm_cmd)
self.assertEqual(rm_job.get_time_comment(), "yearly")
|
the-stack_0_24053 | from bokeh.io import curdoc
from bokeh.layouts import row, widgetbox, column
from bokeh.models import ColumnDataSource, Range1d
from bokeh.models.widgets import Slider, Button, RadioGroup, Dropdown, RadioButtonGroup
from bokeh.plotting import figure
from bokeh.models.widgets import DataTable, DateFormatter, TableColumn
from bokeh.models import ColumnDataSource, CustomJS
from os.path import dirname, join
import numpy as np
import pandas as pd
df = pd.read_hdf("data/mean_stdev.h5")
arteries = ["1-aortic_arch_I", "2-brachiocephalic_trunk", "3-subclavian_R_I", "4-subclavian_R_II",
"5-radial_R", "6-ulnar_R_I", "7-ulnar_R_II", "8-common_interosseous_R", "9-vertebral_R",
"10-common_carotid_R", "11-external_carotid_R", "12-internal_carotid_R", "13-aortic_arch_II",
"14-common_carotid_L", "15-internal_carotid_L", "16-external_carotid_L", "17-aortic_arch_III",
"18-subclavian_L_I", "19-vertebral_L", "20a-subclavian_L_II", "20b-axillary_L", "21-radial_L",
"22-ulnar_L_I", "23-ulnar_L_II", "24-common_interosseous_L", "25-aortic_arch_IV",
"26-posterior_intercostal_T6_R", "27-thoracic_aorta_II", "28-posterior_intercostal_T6_L",
"29-thoracic_aorta_III", "30-posterior_intercostal_T7_R", "31-thoracic_aorta_IV",
"32-posterior_intercostal_T7_L", "33-thoracic_aorta_V", "34-celiac_trunk", "35-common_hepatic",
"36-splenic_I", "37-splenic_II", "38-left_gastric", "39-abdominal_aorta_I", "40-superior_mesenteric",
"41-abdominal_aorta_II", "42-renal_L", "43-abdominal_aorta_III", "44-renal_R",
"45-abdominal_aorta_IV", "46-inferior_mesenteric", "47-abdominal_aorta_V", "48-common_iliac_R",
"49-internal_iliac_R", "50-external_iliac_R", "51-profunda_femoris_R", "52-femoral_R_II",
"53-popliteal_R_II", "54-anterior_tibial_R", "55-common_iliac_L", "56-internal_iliac_L",
"57-external_iliac_L", "58-profunda_femoris_L", "59-femoral_L_II", "60-popliteal_L_II",
"61-anterior_tibial_L", "62-basilar", "63-posterior_cerebral_P1_L", "64-posterior_cerebral_P2_L",
"65-posterior_communicating_L", "66-internal_carotid_II_L", "67-middle_cerebral_L",
"68-anterior_cerebral_I_L", "69-anterior_cerebral_II_L", "70-anterior_communicating",
"71-anterior_cerebral_II_R", "72-posterior_cerebral_P1_R", "73-posterior_cerebral_P2_R",
"74-posterior_communicating_R", "75-internal_carotid_II_R", "76-middle_cerebral_R",
"77-anterior_cerebral_I_R"]
arteries_labels = []
arteries_menu = []
for a in arteries:
aa = a.split("-")
aa[1] = aa[1].capitalize()
lbl = ''
for b in aa:
lbl += b+' '
arteries_menu.append((lbl, a))
# Set up data
a = 0
c = 0
dropdown = Dropdown(label="Select artery",
button_type="warning", menu=arteries_menu)
def change_dropdown_label(attr, old, new):
if dropdown.value.split("-")[0] == "20a":
idx = 19
elif dropdown.value.split("-")[0] == "20b":
idx = 20
elif int(dropdown.value.split("-")[0]) <= 19:
idx = int(dropdown.value.split("-")[0])-1
else:
idx = int(dropdown.value.split("-")[0])
dropdown.label = arteries_menu[idx][0]
dropdown.button_type = "default"
dropdown.on_change('value', change_dropdown_label)
x = np.linspace(0,1,100)
y = np.linspace(0,1,100)
source = ColumnDataSource(data=dict(xs=[x, x, x], ys=[y, y+2, y-2],
colors=["white", "white", "white"]))
# Set up plot
plot = figure(plot_height=500, plot_width=500, title=" ",
tools="crosshair, pan, reset, save, wheel_zoom, box_zoom, hover",
x_range=[0, 1])
plot.multi_line(xs='xs', ys='ys', source=source, color='colors')
plot.xaxis.axis_label = "time (s)"
plot.yaxis.axis_label = " "
table_source = ColumnDataSource(data=dict(time=[0], iavg=[0], istd=[0]))
download_button = Button(label="Download waveform (.csv)", button_type="default")
download_button.callback = CustomJS(args=dict(source=table_source),
code=open(join(dirname(__file__), "./download.js")).read())
table_columns = [
TableColumn(field="time", title="Time (s)"),
TableColumn(field="iavg", title="Mean"),
TableColumn(field="istd", title="SD")
]
data_table = DataTable(source=table_source, columns=table_columns, width=800)
ages = ["20", "30", "40", "50", "60", "70"]
ages_lbl = ["20+", "30+", "40+", "50+", "60+", "70+"]
radio_group_age = RadioButtonGroup(labels=ages_lbl, active=0)
radio_group = RadioButtonGroup(labels=["SI units", "Clinical units"], active=1)
radio_group_q = RadioButtonGroup(labels=["Flow", "Pressure", "Velocity"], active=1)
def plot_wave():
# Get the current slider values
a = dropdown.value
# b = int(locatn.value)
# c = int(r_age.value)
ci = radio_group_age.active
c = int(ages[ci])
units = radio_group.active
q = radio_group_q.active
# Generate the new curve
if q == 0:
iavg = df[(df["q"] == "Q") & (df["Artery"] == a) & (df["Age"] == c)]["inlet_mean"].values
istd = df[(df["q"] == "Q") & (df["Artery"] == a) & (df["Age"] == c)]["inlet_std"].values
elif q == 1:
iavg = df[(df["q"] == "P") & (df["Artery"] == a) & (df["Age"] == c)]["inlet_mean"].values
istd = df[(df["q"] == "P") & (df["Artery"] == a) & (df["Age"] == c)]["inlet_std"].values
elif q == 2:
iavg = df[(df["q"] == "u") & (df["Artery"] == a) & (df["Age"] == c)]["inlet_mean"].values
istd = df[(df["q"] == "u") & (df["Artery"] == a) & (df["Age"] == c)]["inlet_std"].values
if units == 1:
if q == 0:
plot.yaxis.axis_label = "Flow Q (ml/s)"
elif q == 1:
plot.yaxis.axis_label = "Pressure P (mmHg)"
elif q == 2:
iavg *= 100
istd *= 100
plot.yaxis.axis_label = "Velocity P (cm/s)"
else:
if q == 0:
iavg *= 1e-6
istd *= 1e-6
plot.yaxis.axis_label = "Flow Q (m^3/s)"
elif q == 1:
iavg *= 133.332
istd *= 133.332
plot.yaxis.axis_label = "Pressure P (kPa)"
elif q == 2:
plot.yaxis.axis_label = "Pressure P (m/s)"
x = np.linspace(0, 1, len(iavg))
source.data = dict(xs=[x, x, x], ys=[iavg-istd, iavg+istd, iavg],
colors=["silver", "silver", "black"])
qs = ["Volumetric flow rate", "Transmural pressure", "Blood velocity"]
if a.split("-")[0] == "20a":
idx = 19
elif a.split("-")[0] == "20b":
idx = 20
elif int(a.split("-")[0]) <= 19:
idx = int(a.split("-")[0])-1
else:
idx = int(a.split("-")[0])
plot.title.text = "{0} - {1}".format(arteries_menu[idx][0], qs[q])
table_source.data = dict(time=list(x), iavg=list(iavg), istd=list(istd))
button_plot = Button(label="Plot", button_type="success")
button_plot.on_click(plot_wave)
inputs = widgetbox(dropdown, radio_group_age, radio_group, radio_group_q, button_plot,
download_button)
curdoc().add_root(column(row(inputs, plot, width=800), data_table))
curdoc().title = "openBF-db" |
the-stack_0_24054 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import synthtool as s
import synthtool.gcp as gcp
import logging
import subprocess
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
versions = ['v1beta1']
for version in versions:
library = gapic.node_library('automl', version)
s.copy(library, excludes=['src/index.js', 'README.md', 'package.json'])
common_templates = gcp.CommonTemplates()
templates = common_templates.node_library()
s.copy(templates)
# [START fix-dead-link]
s.replace('**/doc/google/protobuf/doc_timestamp.js',
'https:\/\/cloud\.google\.com[\s\*]*http:\/\/(.*)[\s\*]*\)',
r"https://\1)")
s.replace('**/doc/google/protobuf/doc_timestamp.js',
'toISOString\]',
'toISOString)')
# [END fix-dead-link]
# Node.js specific cleanup
subprocess.run(['npm', 'install'])
subprocess.run(['npm', 'run', 'fix'])
|
the-stack_0_24055 | '''
###################################
Modified from Mike's predict_acc.py
###################################
'''
import os
import sys
import random
import pickle
import numpy as np
import pandas as pd
from keras.utils import to_categorical
from keras.models import load_model
base_data_path = os.path.join('/tmp2/b03902110/newphase1', 'data')
filename = os.path.join(base_data_path, 'X_train.npy')
X_train = np.load(filename)
filename = os.path.join(base_data_path, 'X_test.npy')
X_test = np.load(filename)
X_all = np.concatenate((X_train, X_test))
def getUnData():
fileX = os.path.join(base_data_path, 'X_unverified.npy')
fileY = os.path.join(base_data_path, 'y_unverified.npy')
filefname = os.path.join(base_data_path, 'fname_unverified.npy')
X_un = np.load(fileX)
y_un = np.load(fileY)
fname_un = np.load(filefname)
return X_un, y_un, fname_un
def getValData(idx):
filename = os.path.join(base_data_path, 'X/X{}.npy'.format(idx+1))
X_val = np.load(filename)
filename = os.path.join(base_data_path, 'y/y{}.npy'.format(idx+1))
y_val = np.load(filename)
return X_val, y_val
def normalize(X_norm):
mean = np.mean(X_all, axis=0)
std = np.std(X_all, axis=0)
X_norm = (X_norm - mean) / std
return X_norm
if __name__ == '__main__':
base_path = '/tmp2/b03902110/finalphase1'
X_un, y_un, fname_un = getUnData()
X_un = normalize(X_un)
for i in range(10):
base_model_path = os.path.join(base_path, 'cnn_model')
model_name = 'model{}'.format(i)
filename = os.path.join(base_model_path, model_name)
npy_predict = os.path.join(base_path, 'npy_predict_un')
if not os.path.exists(npy_predict):
os.makedirs(npy_predict)
csv_predict = os.path.join(base_path, 'csv_predict_un')
if not os.path.exists(csv_predict):
os.makedirs(csv_predict)
model = load_model(filename)
print('Evaluating X_val...')
X_val, y_val = getValData(i)
X_val = normalize(X_val)
score = model.evaluate(X_val, y_val)
print('model{} validation acc: {}'.format(i, score))
print('Predicting X_un...')
result = model.predict(X_un)
np.save(os.path.join(npy_predict, 'mow_cnn2d_unverified_{}.npy'.format(i+1)), result)
df = pd.DataFrame(result)
df.insert(0, 'fname', fname_un)
df.to_csv(os.path.join(csv_predict, 'mow_cnn2d_unverified_{}.csv'.format(i+1)), index=False, header=True)
print('Evaluating X_un...')
score = model.evaluate(X_un, y_un)
print('model{} unverified acc: {}'.format(i, score))
|
the-stack_0_24056 | from PyQt5 import QtWidgets
from imageprocess import Ui_MainWindow
# from PyQt5.QtWidgets import QFileDialog
from imageedit import imageedit_self
from fourier import fourier_self
from addnoise import noise_self
from allfilter import filter_self
from histogram import histogram_self
from imageenhance import image_enhance_self
from threshold import threshold_self
from morphology import morphology_self
from featuredetect import feature_self
from bottombutton import button_self
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
class mywindow(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self):
super(mywindow, self).__init__()
self.setupUi(self)
self.open.triggered.connect(self.read_file) # 打开
self.save.triggered.connect(self.save_file) # 保存
# 编辑
self.zoomin.triggered.connect(self.zoomin_file) # 放大
self.zoomout.triggered.connect(self.zoomout_file) # 缩小
self.gray.triggered.connect(self.gray_file) # 灰度
self.light.triggered.connect(self.light_file) # 亮度
self.rotate.triggered.connect(self.rotate_file) # 旋转
self.screenshots.triggered.connect(self.screenshots_file) # 截图
# 变换
self.FFT.triggered.connect(self.fft_file) # 傅里叶变换
self.cos.triggered.connect(self.cos_file) # 离散余弦变换
self.Radon.triggered.connect(self.radon_file) # Radon变换
# 噪声
self.gauss.triggered.connect(self.gauss_file) # 高斯噪声
self.sault.triggered.connect(self.sault_file) # 椒盐噪声
self.spot.triggered.connect(self.spot_file) # 斑点噪声
self.poisson.triggered.connect(self.poisson_file) # 泊松噪声
# 滤波
self.highpass.triggered.connect(self.highpass_file) # 高通滤波
self.lowpass.triggered.connect(self.lowpass_file) # 低通滤波
self.linearsmooth.triggered.connect(self.linearsmooth_file) # 平滑滤波(线性)
self.nonlinear.triggered.connect(self.nonlinear_file) # 平滑滤波(非线性)
self.linearsharpen.triggered.connect(self.linearsharpen_file) # 锐化滤波(线性)
self.nonlinearsharp.triggered.connect(self.nonlinearsharp_file) # 锐化滤波(非线性)
# 直方图统计
self.Rhistogram.triggered.connect(self.Rhistogram_file) # R直方图
self.Ghistogram.triggered.connect(self.Ghistogram_file) # G直方图
self.Bhistogram.triggered.connect(self.Bhistogram_file) # B直方图
# 图像增强
self.pseenhance.triggered.connect(self.pseenhance_file) # 伪彩色增强
self.realenhance.triggered.connect(self.realenhance_file) # 真彩色增强
self.histogramequal.triggered.connect(self.histogramequal_file) # 直方图均衡
self.NTSC.triggered.connect(self.NTSC_file) # NTSC颜色模型
self.YCbCr.triggered.connect(self.YCbCr_file) # YCbCr颜色模型
self.HSV.triggered.connect(self.HSV_file) # HSV颜色模型
# 阈值分割
self.divide.triggered.connect(self.divide_file) # 阈值分割
# 形态学处理
self.morphology.triggered.connect(self.morphology_file) # 形态学处理
# 特征提取
self.feature.triggered.connect(self.feature_file) # 特征提取
# 图像分类与识别
# self.imageclassify.triggered.connect(self.imageclassify_file)#图像分类与识别
# 按钮功能
# 浏览
self.Scan.clicked.connect(self.scan_file)
# 上一张
self.Back.clicked.connect(self.pre_file)
# 下一张
self.Next.clicked.connect(self.next_file)
# 添加水印
self.Mark.clicked.connect(self.mark_file) # 添加水印
# 放大
self.Magnify.clicked.connect(self.manify_file)
# 顺时针旋转90°
self.R90CW.clicked.connect(self.r90cw_file)
# 逆时针旋转90°
self.R90CCW.clicked.connect(self.r90ccw_file)
def read_file(self):
# 选取文件
filename, filetype = QFileDialog.getOpenFileName(self, "打开文件", "imagetest", "All Files(*);;Text Files(*.png)")
print(filename, filetype)
self.lineEdit.setText(filename)
self.label_pic.setPixmap(QPixmap(filename))
def save_file(self):
# 获取文件路径
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
# 用全局变量保存所有需要保存的变量在内存中的值。
file_name = QFileDialog.getSaveFileName(self, "文件保存", "imagetest/save", "All Files (*);;Text Files (*.png)")
print(file_name[0])
btn = button_self()
btn.file_save(file_path, file_name[0])
def zoomin_file(self): # 放大
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
imageedit_self.imagemagnification(file_path)
def zoomout_file(self): # 缩小
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
imageedit_self.imagereduction(file_path)
def gray_file(self): # 灰度
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
imageedit_self.imagegray(file_path)
def light_file(self): # 亮度
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
imageedit_self.imagebrightness(file_path, 1.3, 3)
def rotate_file(self): # 旋转
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
imageedit_self.imagerotate(file_path)
def screenshots_file(self): # 截图
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
imageedit_self.imagegrab(file_path)
# 变换
def fft_file(self): # 傅里叶变换
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
fourier_self.fourier_transform(file_path)
def cos_file(self): # 离散余弦变换
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
fourier_self.Dct_image(file_path)
def radon_file(self): # Radon变换
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
fourier = fourier_self()
fourier.image_radon(file_path)
# 噪声
def gauss_file(self): # 高斯噪声
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
noise_self.addGaussianNoise(file_path, 0.01) # 添加10%的高斯噪声
def sault_file(self): # 椒盐噪声
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
noise_self.saltpepper(file_path, 0.01) # 添加10%的椒盐噪声
def spot_file(self): # 斑点噪声
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
noise_self.speckle_img(file_path) # 添加斑点噪声
def poisson_file(self): # 泊松噪声
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
noise_self.poisson_img(file_path) # 添加泊松噪声
# 滤波
def highpass_file(self): # 高通滤波
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
filter_self.high_pass_filter(file_path)
def lowpass_file(self): # 低通滤波
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
filter_self.low_pass_filter(file_path)
def linearsmooth_file(self): # 平滑滤波(线性)
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
filter_self.Linear_Smooth_Filter(file_path)
def nonlinear_file(self): # 平滑滤波(非线性)
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
filter1 = filter_self()
filter1.NonLinear_Smooth_Filter(file_path) #
def linearsharpen_file(self): # 锐化滤波(线性)
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
filter1 = filter_self()
filter1.Sharpen_Filter(file_path)
def nonlinearsharp_file(self): # 锐化滤波(非线性)
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
filter1 = filter_self()
filter1.NonLinear_Smooth_Filter(file_path) #
# 直方图统计
def Rhistogram_file(self): # R直方图
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
histogram_self.R_histogram(file_path) #
def Ghistogram_file(self): # G直方图
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
histogram_self.G_histogram(file_path) #
def Bhistogram_file(self): # B直方图
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
histogram_self.B_histogram(file_path) #
# 图像增强
def pseenhance_file(self): # 伪彩色增强
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
image_enhance = image_enhance_self()
image_enhance.Color(file_path) #
def realenhance_file(self): # 真彩色增强
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
image_enhance = image_enhance_self()
image_enhance.Color(file_path) #
def histogramequal_file(self): # 直方图均衡
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
image_enhance_self.colorhistogram(file_path) #
def NTSC_file(self): # NTSC颜色模型
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
image_enhance_self.colorhistogram(file_path) #
def YCbCr_file(self): # YCbCr颜色模型
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
image_enhance_self.ycrcbimage(file_path) #
def HSV_file(self): # HSV颜色模型
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
image_enhance_self.hsvimage(file_path) #
# 阈值分割
def divide_file(self): # 阈值分割
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
threshold_self.threshold_image(file_path) #
# 形态学处理
def morphology_file(self): # 形态学处理
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
morphology_self.morphology(file_path)
# 特征提取
def feature_file(self): # 特征提取
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
feature_self.feature_detection(file_path) #
# 按钮功能
# 浏览
def scan_file(self):
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
btn = button_self()
btn.scan_pic(file_path) #
# 上一张
def pre_file(self):
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
btn = button_self()
pre_path = btn.pre_file(file_path) #
self.lineEdit.setText('')
self.lineEdit.setText(pre_path)
self.label_pic.setPixmap(QPixmap(pre_path))
# 下一张
def next_file(self):
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
btn = button_self()
next_path = btn.next_file(file_path) #
self.lineEdit.setText('')
self.lineEdit.setText(next_path)
self.label_pic.setPixmap(QPixmap(next_path))
# 添加水印
def mark_file(self): #
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
btn = button_self()
btn.mark_pic(file_path) #
# 图片放大
def manify_file(self):
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
btn = button_self()
btn.zoomout_pic(file_path)
# 顺时针旋转90°
def r90cw_file(self):
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
btn = button_self()
btn.cw_pic(file_path)
# 顺时针旋转90°
def r90ccw_file(self):
file_path = self.lineEdit.text()
if file_path == '':
self.showMessageBox()
else:
btn = button_self()
btn.rcw_pic(file_path)
def showMessageBox(self):
res_3 = QMessageBox.warning(self, "警告", "请选择文件,再执行该操作!", QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
ui = mywindow()
ui.show()
sys.exit(app.exec_())
|
the-stack_0_24058 | import os
class Empresa():
def __init__(self,nom="",ruc=0,dire="",tele=0,ciud="",tipEmpr=""):
self.nombre=nom
self.ruc=ruc
self.direccion=dire
self.telefono=tele
self.ciudad=ciud
self.tipoEmpresa=tipEmpr
def datosEmpresa(self):#3
self.nombre=input("Ingresar nombre de la empresa: ")
self.ruc=int(input("Ingresar ruc de la empresa: "))
self.direccion=input("Ingresar la direccion de la empresa: ")
self.telefono=int(input("Ingresar el numero de telefono de la empresa: "))
self.ciudad=input("Ingresar ciudad donde esta la empresa: ")
self.tipoEmpresa=input("Ingresar tipo de empresa publica o privada: ")
def mostrarEmpresa(self):
print("")
print("Empresa")
print("La empresa de nombre {}\n De RUC #{} \n Está ubicada en {}\n Se puede comunicar al #{}\n Está empresa esta en la ciudad de {}\n Es una entidad {}".format(self.nombre,self.ruc,self.direccion, self.telefono,self.ciudad, self.tipoEmpresa))
class Empleado(Empresa):
def __init__(self,nom="",cedu=0,dire="",tele=0,email="",estado="",profe=""):
self.nombre=nom
self.cedula=cedu
self.direccion=dire
self.telefono=tele
self.correo=email
self.estadocivil=estado
self.profesion=profe
def empleado(self):
self.nombre=input("Ingresar nombre del empleado: ")
self.cedula=int(input("Ingresar numero de cedula del empleado: "))
self.direccion=input("Ingresar la direccion del empleado: ")
self.telefono=int(input("Ingresar numero de contacto del empleado: "))
self.correo=input("Ingresar correo personal del empleado: ")
def empleadoObrero(self):
self.estadocivil=input("Ingresar estado civil del empleado: ")
def empleadoOficina(self):
self.profesion=input("Ingresar profesion del empleado: ")
def mostrarempleado(self):
print("El empleado: {} con # de C.I. {} \n Con direccion {}, y numero de contacto{}\n Y correo {}".format(self.nombre,self.cedula,self.direccion,self.telefono,self.correo))
class Departamento(Empleado):
def __init__(self,dep=""):
self.departamento=dep
def departa(self):
self.departamento=input("Ingresar el departamento al que pertenece el empleado: ")
def mostrarDeparta(self):
print("El empleado pertenece al departamento de: {}".format(self.departamento))
class Pagos(Empleado):
def __init__(self, desper=0,valhora=0,hotraba=0,extra=0,suel=0,hrecar=0,hextra=0,pres=0,mcou=0,valho=0,sobtiem=0,comofi=0,antobre=0,iemple=0,cuopres=0,tot=0,liquid=0,cuota=0,anti=0,comi=0,fNomina="",fIngreso="",iess=0):
self.permisos=desper
self.valorhora=valhora
self.horastrabajadas=hotraba
self.valextra=extra
self.sueldo= suel
self.horasRecargo= hrecar
self.horasExtraordinarias=hextra
self.prestamo= pres
self.mesCuota= mcou
self.valor_hora= valho
self.sobretiempo=sobtiem
self.comEmpOficina = comofi
self.antiEmpObrero = antobre
self.iessEmpleado = iemple
self.cuotaPrestamo=cuopres
self.totdes = tot
self.liquidoRecibir = liquid
self.mesCuota=cuota
self.antiguedad=anti
self.comision=comi
self.fechaNomina=fNomina
self.fechaIngreso=fIngreso
self.iess=iess
def pagoNormal(self):
self.sueldo=float(input("Ingresar sueldo del trabajador: $ "))
self.prestamo=float(input("Ingresar monto del prestamo que ha generado el empleado: $ "))
self.mesCuota=int(input("Ingresar meses a diferir el prestamo: "))
self.comision=float(input("Ingresar valor de la comsion: "))
self.antiguedad=int(input("Ingresar antiguedad: "))
self.iess=float(input("Ingresar valor del iees recordar que debe ser porcentuado Ejemplo si quiere decir 20% debe ingresar 0.20"))
def pagoExtra(self):
self.horasRecargo=int(input("Ingresar horas de recargo: "))
self.horasExtraordinarias=int(input("Ingresar horas extraordinarias: "))
self.fechaNomina=float(input("Ingresar fecha de nomida (formato año-mes-dia): "))
self.fechaIngreso=float(input("Ingresar fecha de ingreso (formato año-mes-dia): "))
def calculoSueldo(self):
self.valor_hora=self.sueldo/240
self.sobretiempo= self.valor_hora * (self.horasRecargo*0.50+self.horasExtraordinarias*2)
self.comEmpOficina = self.comision*self.sueldo
self.antiEmpObrero = self.antiguedad*(self.fechaNomina - self.fechaIngreso)/365*self.sueldo
self.iessEmpleado = self.iess*(self.sueldo+self.sobretiempo)
self.cuotaPrestamo=self.prestamo/self.mesCuota
self.toting = self.sueldo+self.sobretiempo+ self.comEmpOficina + self.antiEmpObrero
self.totdes = self.iessEmpleado + self.prestamo
self.liquidoRecibir = self.toting - self.totdes
def mostrarSueldo(self):
print("SUELDO BASE")
print("El empleado tiene un sueldo de {}".format(self.sueldo))
print("")
print("SOBRETIEMPO")
print("El valor de sobretiempo es de {}, con {} horas extras trabajadas".format(self.sobretiempo,self.ho))
emp=Empresa()
emp.datosEmpresa()
os.system ("cls")
emple=Empleado()
emple.empleado()
os.system ("cls")
emple.empleadoObrero()
emple.empleadoOficina()
os.system ("cls")
depa=Departamento()
depa.departa()
pag=Pagos()
pag.pagoNormal()
pag.pagoExtra()
pag.calculoSueldo()
os.system ("cls")
emp.mostrarEmpresa()
print("")
emple.mostrarempleado()
print("")
pag.mostrarSueldo() |
the-stack_0_24060 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from base import GAETestCase
from datetime import datetime, date
from decimal import Decimal
from category_app.category_model import Category
from routes.categorys.edit import index, save
from mommygae import mommy
from tekton.gae.middleware.redirect import RedirectResponse
class IndexTests(GAETestCase):
def test_success(self):
category = mommy.save_one(Category)
template_response = index(category.key.id())
self.assert_can_render(template_response)
class EditTests(GAETestCase):
def test_success(self):
category = mommy.save_one(Category)
old_properties = category.to_dict()
redirect_response = save(category.key.id(), name='name_string', slug='slug_string')
self.assertIsInstance(redirect_response, RedirectResponse)
edited_category = category.key.get()
self.assertEquals('name_string', edited_category.name)
self.assertEquals('slug_string', edited_category.slug)
self.assertNotEqual(old_properties, edited_category.to_dict())
def test_error(self):
category = mommy.save_one(Category)
old_properties = category.to_dict()
template_response = save(category.key.id())
errors = template_response.context['errors']
self.assertSetEqual(set(['name', 'slug']), set(errors.keys()))
self.assertEqual(old_properties, category.key.get().to_dict())
self.assert_can_render(template_response)
|
the-stack_0_24061 | import Distance
def test(p1 ,p2, p):
if p1[0] == p2[0] and p1[1] != p2[1]:
'''
conducting testing
'''
if p[0] == p1[0] :
#print("----------------")
vy = sorted([p1[1], p2[1]])
r2 = range(vy[0] - 1, vy[1] + 1)
if (p[1] in r2):
#print("1---> 0 ")
return 0
else:
#print("1---> 2 ")
return 2
elif p[0] > p1[0]:
return 1
else:
return -1
elif p1[1] == p2[1] and p1[0] != p2[0]:
'''
conducting testing
'''
if p[1] == p1[1]:
#print("----------------")
vx = sorted([p1[0], p2[0]])
r1 = range(vx[0] - 1, vx[1] + 1)
if (p[0] in r1):
#print("2 ---> 0 ")
return 0
else:
#print("2 ---> 2 ")
return 2
elif p[1] > p1[1]:
return 1
else:
return -1
else:
''''
line equation is w1 * x1 + w2 * x2 + b = 0
'''
# compute slope
m = (p2[1] - p1[1]) / (p2[0] - p1[0])
# compute w1
w2 = 1
# compute w2
w1 = - m
# compute b
b = -(p1[1] - m * p1[0])
if w2 * p[1] + w1 * p[0] + b > 0.000000000000001:
return 1
elif w2 * p[1] + w1 * p[0] + b < -0.000000000000001:
return -1
elif (w2 * p[1] + w1 * p[0] + b) > -0.000000000000001 and (w2 * p[1] + w1 * p[0] + b) < 0.000000000000001:
#print("----------------")
vx = sorted([p1[0], p2[0]])
vy = sorted([p1[1], p2[1]])
r1 = range(vx[0] - 1, vx[1] + 1)
r2 = range(vy[0] - 1, vy[1] + 1)
if (p[0] in r1) and (p[1] in r2):
#print("3 ---> 0 ")
return 0
else:
#print("3 ---> 2 ")
return 2
def GenerateEdges(vertex):
''''
generate a list of all edges of an object in a form [point1, point2]
'''
combinlist = []
N = len(vertex)
for i in range(N):
if i < N - 1:
j = i + 1
else:
j = 0
combin = [vertex[i], vertex[j]]
combinlist.append(combin)
return combinlist
def AdjacencyCheck(coordinates, centerx):
#print(points)
# create a group of edges for object one!
Group1 = GenerateEdges(coordinates[0])
#print(Group1)
# create a group of edges for object two!
Group2 = GenerateEdges(coordinates[1])
#print(Group2)
c2 = centerx[1]
c1 = centerx[0]
#print(centerx)
# append the center of each object to the list of vertices.
'''
I have done that because i want to check centers of each objects in case when two objects completely overlapped.
'''
coordinates[0].append(centerx[0])
coordinates[1].append(centerx[1])
#print("points0", points[0])
#print("points1", points[1])
# initialize the flags to zero
flag0 = 0 # flag0 is for indicate whether the two objects are touching
flag1 = 0 # flag1 is for indicating whether the two objects are overlapping
# it also shows how many vertices are overlapped
flag2 = 0 # flag2 is to indicate whether the two objects are adjacent
# These matrices help me to store and indicate which point is bounded or touching the line.
Matrix1 = [[0 for x in range(len(Group2))] for x in range(len(coordinates[0]))]
Matrix2 = [[0 for x in range(len(Group1))] for x in range(len(coordinates[1]))]
for idx, x in enumerate(coordinates[0]):
#print(x)
total = 0
for idy, y in enumerate(Group2):
#print(y)
c2_sign = test(y[0], y[1], c2)
x_sign = test(y[0], y[1], x)
total += (x_sign * c2_sign)
if x_sign == 0:
Matrix1[idx][idy] = 1
if total == len(Group2):
flag1 += 1
else:
flag1 += 0
for idx, x in enumerate(coordinates[1]):
total = 0
#print(x)
for idy, y in enumerate(Group1):
#print(y)
c1_sign = test(y[0], y[1], c1)
x_sign = test(y[0], y[1], x)
total += (x_sign * c1_sign)
if x_sign == 0:
Matrix2[idx][idy] = 1
if total == len(Group1):
flag1 += 1
else:
flag1 += 0
#print(Matrix1)
#print(Matrix2)
idex1 = [sum(Matrix1[i]) for i in range(len(Matrix1))]
idex2 = [sum(Matrix2[i]) for i in range(len(Matrix2))]
# Index of Non-Zero elements in Python list
# using list comprehension + enumerate()
res1 = [idx for idx, val in enumerate(idex1) if val != 0]
res2 = [idx for idx, val in enumerate(idex2) if val != 0]
if len(res1) == 1 and len(res2) == 1:
flag0 = 1
print('touching at the same vertex {}, and {}'.format(coordinates[0][res1[0]], coordinates[1][res2[0]]))
elif len(res1) == 1 or len(res2) == 1:
flag0 = -1
if len(res1) == 1:
e1 = Matrix1[res1[0]]
id1 = e1.index(max(e1))
print('touching at one vertex and an Edge {}'.format(Group2[id1], coordinates[0][res1[0]]))
else:
e1 = Matrix2[res2[0]]
id1 = e1.index(max(e1))
print('touching at one vertex and an Edge {}'.format(Group1[id1]), coordinates[1][res2[0]])
elif len(res1) == 2 and len(res2) == 2:
flag2 = 1
print('Adjacent at an Edge with two vertices {}, and {}'.format(coordinates[0][res1[0]], coordinates[0][res1[1]]))
if flag1 >= 1:
print("overlapped with {} vertex !!".format(flag1))
coordinates[0].remove(centerx[0])
coordinates[1].remove(centerx[1])
return flag0, flag1, flag2
# if __name__ == "__main__":
# points1 = [[[100,100],[200,300],[300,100]],[[450,450],[300,400],[400,200]]]
# #d = Distance(c1, c2)
# points2 = [[[80,90], [170, 80], [90, 130]], [[70,130], [90,130], [110, 180], [70, 180]]]
# # a triangle and sequare adjacent
# points3 = [[[140,40],[180,100],[100,100]],[[100,100],[180,100],[180,160],[100,180]]]
# # two triangles overlapped
# points4 = [[[200,60],[280,30],[250,100]],[[240,60],[310,50],[290,110]]]
# # pentagon and rectangle overlapped
# points5 = [[[110,180],[180,220],[150,290],[100,290],[70,250]],[[150,180],[230,180],[230,260],[150,260]]]
# # two object touching in a vertex and an edge
# points6 = [[[40,10],[40,70],[10,40]],[[70,10],[70,70],[40,40]]]
# # two objects overlapped with more than two vertex.
# points = [[[40,370],[110,370],[110,430],[40,430]],[[90,310],[90,390],[50,390]]]
# obj1 = points[0]
# obj2 = points[1]
# cx1 = sum([obj1[i][0] for i in range(len(obj1))]) / len(obj1)
# cy1 = sum([obj1[i][1] for i in range(len(obj1))]) / len(obj1)
# cx2 = sum([obj2[i][0] for i in range(len(obj2))]) / len(obj2)
# cy2 = sum([obj2[i][1] for i in range(len(obj2))]) / len(obj2)
# centerx = [[cx1, cy1], [cx2, cy2]]
# #print(centerx)
# flag0, flag1, flag2 = AdjacencyCheck(points, centerx)
# print("Touching flag", flag0)
# print("Overlapping flag", flag1)
# print("Adjacent flag", flag2)
|
the-stack_0_24063 | # -*- coding: utf-8 -*-
'''
/***************************************************************************
PagLuxembourg
A QGIS plugin
Gestion de Plans d'Aménagement Général du Grand-Duché de Luxembourg
-------------------
begin : 2015-08-25
git sha : $Format:%H$
copyright : (C) 2015 by arx iT
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
'''
from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication
from PyQt4.QtGui import QAction, QIcon, QPushButton
# Initialize Qt resources from file resources.py
import resources
# Import the code for the dialog
import os.path
# Widgets
from widgets.create_project.create_project import *
from widgets.import_data.import_data import *
from widgets.import_manager.import_manager import *
from widgets.export_gml.export_gml import *
from widgets.stylize.stylize import *
from widgets.data_checker.data_checker import *
from widgets.topoclean.topoclean import *
from widgets.topology.topology import *
from widgets.about.about import *
import editor.simple_filename
import editor.precise_range
# Schema
from PagLuxembourg.schema import *
from PagLuxembourg.project import *
# Global variables
plugin_dir = os.path.dirname(__file__)
xsd_schema = PAGSchema()
qgis_interface = None
current_project = Project()
class PAGLuxembourg(object):
'''
QGIS Plugin Implementation.
'''
def __init__(self, iface):
'''Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
'''
# Save reference to the QGIS interface
global qgis_interface
qgis_interface = iface
self.iface = iface
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
plugin_dir,
'i18n',
'{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Register custom editors widgets
# Declare instance attributes
self.actions = []
self.pag_actions = [] #PAG actions, disabled if the project is not PAG
self.menu = self.tr(u'&PAG Luxembourg')
# Toolbar initialization
self.toolbar = self.iface.addToolBar(u'PagLuxembourg')
self.toolbar.setObjectName(u'PagLuxembourg')
# QGIS interface hooks
self.iface.projectRead.connect(current_project.open)
self.iface.newProjectCreated.connect(current_project.open)
current_project.ready.connect(self.updateGui)
# Load current project
current_project.open()
# noinspection PyMethodMayBeStatic
def tr(self, message):
'''Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
'''
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('PAGLuxembourg', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
'''Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
'''
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
if callback is not None:
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
'''
Create the menu entries and toolbar icons inside the QGIS GUI.
'''
# New project
self.create_project_widget=CreateProject()
self.add_action(
':/plugins/PagLuxembourg/widgets/create_project/icon.png',
text=self.tr(u'New project'),
callback=self.create_project_widget.run,
status_tip=self.tr(u'Creates a new PAG project'),
parent=self.iface.mainWindow())
# Import data
self.import_data_widget = ImportData()
self.pag_actions.append(self.add_action(
':/plugins/PagLuxembourg/widgets/import_data/icon.png',
text=self.tr(u'Import data'),
callback=self.import_data_widget.run,
status_tip=self.tr(u'Import data from files (GML, SHP, DXF)'),
parent=self.iface.mainWindow()))
# Import manager
self.import_manager_widget = ImportManager()
self.pag_actions.append(self.add_action(
':/plugins/PagLuxembourg/widgets/import_manager/icon.png',
text=self.tr(u'Import manager'),
callback=self.import_manager_widget.run,
status_tip=self.tr(u'Open the import manager'),
parent=self.iface.mainWindow()))
# Export GML
self.export_gml_widget = ExportGML()
self.pag_actions.append(self.add_action(
':/plugins/PagLuxembourg/widgets/export_gml/icon.png',
text=self.tr(u'Export GML'),
callback=self.export_gml_widget.run,
status_tip=self.tr(u'Export the current project to a GML file'),
parent=self.iface.mainWindow()))
# Apply styles
self.stylize_project_widget = StylizeProject()
self.pag_actions.append(self.add_action(
':/plugins/PagLuxembourg/widgets/stylize/icon.png',
text=self.tr(u'Apply styles'),
callback=self.stylize_project_widget.run,
status_tip=self.tr(u'Apply predefined styles to the project'),
parent=self.iface.mainWindow()))
# Topo clean tool
'''self.topoclean_widget = TopoClean()
self.pag_actions.append(self.add_action(
':/plugins/PagLuxembourg/widgets/topoclean/icon.png',
text=self.tr(u'Clean topology'),
callback=self.topoclean_widget.run,
status_tip=self.tr(u'Clean the topology of a layer'),
parent=self.iface.mainWindow()))'''
# Geometry checker
found = False
for action in self.iface.vectorMenu().actions():
if action.text().replace("&","")==QCoreApplication.translate("QgsGeometryCheckerPlugin","G&eometry Tools").replace("&",""):
for subaction in action.menu().actions():
if subaction.text().replace("&","")==QCoreApplication.translate("QgsGeometryCheckerPlugin","Check Geometries").replace("&",""):
found = True
self.topoclean_widget = TopoClean(subaction)
self.pag_actions.append(self.add_action(
':/plugins/PagLuxembourg/widgets/topoclean/icon.png',
text=self.tr(u'Check geometry'),
callback=self.topoclean_widget.run,
status_tip=self.tr(u'Check geometries and fix errors'),
parent=self.iface.mainWindow()))
# Topology checker plugin is not enabled, ask the user to install it
if not found:
self.iface.initializationCompleted.connect(self._showMissingGeometryCheckerPluginMessage)
# Topology checker
found = False
for action in self.iface.vectorToolBar().actions():
if action.parent().objectName()==u'qgis_plugin_topolplugin':
found = True
self.topology_widget = TopologyChecker(action)
self.pag_actions.append(self.add_action(
':/plugins/PagLuxembourg/widgets/topology/icon.png',
text=self.tr(u'Check topology'),
callback=self.topology_widget.run,
status_tip=self.tr(u'Check layers topology according to predefined rules'),
parent=self.iface.mainWindow()))
# Topology checker plugin is not enabled, ask the user to install it
if not found:
self.iface.initializationCompleted.connect(self._showMissingTopolPluginMessage)
# Data checker
self.data_checker_widget = DataChecker()
self.pag_actions.append(self.add_action(
':/plugins/PagLuxembourg/widgets/data_checker/icon.png',
text=self.tr(u'Check data'),
callback=self.data_checker_widget.run,
status_tip=self.tr(u'Check project data for errors'),
parent=self.iface.mainWindow()))
# About
self.about_widget = About()
self.add_action(
':/plugins/PagLuxembourg/widgets/about/icon.png',
text=self.tr(u'About'),
callback=self.about_widget.run,
status_tip=self.tr(u'About the PAG plugin'),
parent=self.iface.mainWindow())
# Update buttons availability
self.updateGui()
def updateGui(self):
'''
Updates the plugin GUI
Disable buttons
'''
enabled = current_project.isPagProject()
#enabled = True
for action in self.pag_actions:
action.setEnabled(enabled)
def _showMissingTopolPluginMessage(self):
'''
Display a message to prompt the user to install the topology checker plugin
'''
self._showMissingPluginMessage(u'Topology Checker')
def _showMissingGeometryCheckerPluginMessage(self):
'''
Display a message to prompt the user to install the topology checker plugin
'''
self._showMissingPluginMessage(u'Geometry Checker')
def _showMissingPluginMessage(self, plugin):
'''
Display a message to prompt the user to install the geometry checker plugin
'''
widget = self.iface.messageBar().createMessage(self.tr(u'PAG Luxembourg'),self.tr(u'The "') + plugin + self.tr(u'" plugin is required by the "PAG Luxembourg" plugin, please install it and restart QGIS.'))
button = QPushButton(widget)
button.setText(self.tr(u'Show plugin manager'),)
button.pressed.connect(self.iface.actionManagePlugins().trigger)
widget.layout().addWidget(button)
self.iface.messageBar().pushWidget(widget, QgsMessageBar.CRITICAL)
def unload(self):
'''
Removes the plugin menu item and icon from QGIS GUI.
'''
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&PAG Luxembourg'),
action)
self.iface.removeToolBarIcon(action)
# remove the toolbar
del self.toolbar
# Disconnect Signals
self.iface.projectRead.disconnect(current_project.open)
self.iface.newProjectCreated.disconnect(current_project.open)
current_project.ready.disconnect(self.updateGui) |
the-stack_0_24064 | from pathlib import Path
import luigi
import matplotlib.pyplot as plt
import xarray as xr
from .....bulk_statistics import cross_correlation_with_height
from ...data.base import XArrayTarget
from ...data.extraction import ExtractField3D
from ...data.masking import MakeMask
from ...data.tracking_2d.cloud_base import ExtractBelowCloudEnvironment
class JointDistProfile(luigi.Task):
dk = luigi.IntParameter()
z_max = luigi.FloatParameter(significant=False, default=700.0)
v1 = luigi.Parameter()
v2 = luigi.Parameter()
base_name = luigi.Parameter()
mask_method = luigi.Parameter(default=None)
mask_method_extra_args = luigi.Parameter(default="")
plot_limits = luigi.ListParameter(default=None)
data_only = luigi.BoolParameter(default=False)
cloud_age_max = luigi.FloatParameter(default=200.0)
cumulative_contours = luigi.Parameter(default="10,90")
add_mean_ref = luigi.BoolParameter(default=False)
add_cloudbase_peak_ref = luigi.Parameter(default=False)
add_legend = luigi.Parameter(default=True)
figsize = luigi.ListParameter(default=[4.0, 3.0])
def requires(self):
reqs = dict(
full_domain=[
ExtractField3D(field_name=self.v1, base_name=self.base_name),
ExtractField3D(field_name=self.v2, base_name=self.base_name),
],
)
if self.add_cloudbase_peak_ref:
reqs["cloudbase"] = [
ExtractBelowCloudEnvironment(
base_name=self.base_name,
field_name=self.v1,
cloud_age_max=self.cloud_age_max,
ensure_tracked=self.add_cloudbase_peak_ref == "tracked_only",
),
ExtractBelowCloudEnvironment(
base_name=self.base_name,
field_name=self.v2,
cloud_age_max=self.cloud_age_max,
ensure_tracked=self.add_cloudbase_peak_ref == "tracked_only",
),
]
if self.mask_method is not None:
reqs["mask"] = MakeMask(
method_name=self.mask_method,
method_extra_args=self.mask_method_extra_args,
base_name=self.base_name,
)
return reqs
def output(self):
if self.mask_method is not None:
mask_name = MakeMask.make_mask_name(
base_name=self.base_name,
method_name=self.mask_method,
method_extra_args=self.mask_method_extra_args,
)
out_fn = "{}.cross_correlation.{}.{}.masked_by.{}.png".format(
self.base_name, self.v1, self.v2, mask_name
)
else:
out_fn = "{}.cross_correlation.{}.{}.png".format(
self.base_name, self.v1, self.v2
)
if self.data_only:
out_fn = out_fn.replace(".png", ".nc")
p_out = Path("data") / self.base_name / out_fn
return XArrayTarget(str(p_out))
else:
out_fn = out_fn.replace(
".png",
".{}__contour_levels.png".format(
self.cumulative_contours.replace(",", "__")
),
)
return luigi.LocalTarget(out_fn)
def run(self):
ds_3d = xr.merge([xr.open_dataarray(r.fn) for r in self.input()["full_domain"]])
if "cloudbase" in self.input():
ds_cb = xr.merge(
[xr.open_dataarray(r.fn) for r in self.input()["cloudbase"]]
)
else:
ds_cb = None
mask = None
if "mask" in self.input():
mask = self.input()["mask"].open()
ds_3d = ds_3d.where(mask)
ds_3d = ds_3d.sel(zt=slice(0, self.z_max))
ds_3d_levels = ds_3d.isel(zt=slice(None, None, self.dk))
if self.data_only:
if "mask" in self.input():
ds_3d_levels.attrs["mask_desc"] = mask.long_name
ds_3d_levels.to_netcdf(self.output().fn)
else:
self.make_plot(
ds_3d=ds_3d, ds_cb=ds_cb, ds_3d_levels=ds_3d_levels, mask=mask
)
plt.savefig(self.output().fn, bbox_inches="tight")
def make_plot(self, ds_3d, ds_cb, ds_3d_levels, mask):
fig_w, fig_h = self.figsize
fig, ax = plt.subplots(figsize=(fig_w, fig_h))
if self.add_mean_ref:
ax.axvline(x=ds_3d[self.v1].mean(), color="grey", alpha=0.4)
ax.axhline(y=ds_3d[self.v2].mean(), color="grey", alpha=0.4)
normed_levels = [int(v) for v in self.cumulative_contours.split(",")]
ax, _ = cross_correlation_with_height.main(
ds_3d=ds_3d_levels,
ds_cb=ds_cb,
normed_levels=normed_levels,
ax=ax,
add_cb_peak_ref_line=self.add_cloudbase_peak_ref,
add_legend=self.add_legend,
)
title = ax.get_title()
title = "{}\n{}".format(self.base_name, title)
if "mask" in self.input():
title += "\nmasked by {}".format(mask.long_name)
ax.set_title(title)
if self.plot_limits:
x_min, x_max, y_min, y_max = self.plot_limits
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
return ax
|
the-stack_0_24065 | #!/usr/bin/env python
import numpy as np
class Variable:
def __init__(self, data):
self.data = data
self.grad = None
self.creator = None
def set_creator(self, func):
self.creator = func
def backward(self):
f = self.creator # 1. 関数を取得
if f is not None:
x = f.input # 2. 関数の入力を取得
x.grad = f.backward(self.grad) # 3. 関数のbackwardメソッドを呼ぶ
x.backward() # 自分より1つ前の変数のbackwardを呼ぶ(再起)
class Function:
def __call__(self, input):
x = input.data
y = self.forward(x)
output = Variable(y)
output.set_creator(self)
self.input = input
self.output = output
return output
def forward(self, x):
raise NotImplementedError()
def backward(self, gy):
raise NotImplementedError()
class Square(Function):
def forward(self, x):
y = x ** 2
return y
def backward(self, gy):
x = self.input.data
gx = 2 * x * gy
return gx
class Exp(Function):
def forward(self, x):
y = np.exp(x)
return y
def backward(self, gy):
x = self.input.data
gx = np.exp(x) * gy
return gx
def numerical_diff(f, x, eps=1e-4):
x0 = Variable(x.data - eps)
x1 = Variable(x.data + eps)
y0 = f(x0)
y1 = f(x1)
return (y1.data - y0.data) / (2 * eps)
def main():
A = Square()
B = Exp()
C = Square()
# forward
x = Variable(np.array(0.5))
a = A(x)
b = B(a)
y = C(b)
# 逆向きに計算グラフのノードを辿る
assert y.creator == C
assert y.creator.input == b
assert y.creator.input.creator == B
assert y.creator.input.creator.input == a
assert y.creator.input.creator.input.creator == A
assert y.creator.input.creator.input.creator.input == x
# 逆伝播
y.grad = np.array(1.0)
y.backward()
print(x.grad)
if __name__ == '__main__':
main()
|
the-stack_0_24066 | import pandas as pd
import sys
import utils
import cate_encoding
import config
def convert(ori, des, feats):
df_ori = utils.load_df(ori)
for f in feats:
tmp = utils.load_df(config.feat+'m3_' +f)
print(f)
df_ori = pd.concat([df_ori,tmp.drop(['session_id','impressions'],axis=1)],axis=1)
df_ori = utils.reduce_mem(df_ori)
df_ori.columns = df_ori.columns.astype(str)
utils.save_df(df_ori,des)
tr_cols = ['tr_last_item_diff.ftr',
'tr_item_uid_last_act.ftr',
'tr_sid_impr_rank.ftr',
'tr_user_feat.ftr',
'tr_imprlist_feat.ftr',
'tr_imprlist_feat2.ftr'
]
te_cols = [ c.replace('tr_','te_') for c in tr_cols ]
print(tr_cols)
print(te_cols)
#te_cols = ['te_item_sid_act.ftr','te_item_sid_clk_impr_debug.ftr','te_item_act_pv.ftr','te_ctr.ftr','te_item_last_act.ftr']
ori = 13
des = sys.argv[0][4:-3]
convert(config.feat+'m3_tr_%d.ftr' % ori,config.feat+'m3_tr_%s.ftr' % des, tr_cols)
convert(config.feat+'m3_te_%d.ftr' % ori,config.feat+'m3_te_%s.ftr' % des, te_cols)
|
the-stack_0_24071 | import time
from time import sleep
from threading import Thread
import requests
def operation_step_response(content, name, result, close_operation):
'''
Setup operation step response
'''
operation_name = content['operation']['request']['name']
operation_id = content['operation']['request']['id']
step_response = {
'version': '7.0',
'operation': {
'response': {
'timestamp': int(round(time.time() * 1000)),
'name': operation_name,
'id': operation_id,
'variableList': [],
'steps': [
{
'name': name,
'title': name,
'description': name,
'result': result
}
]
}
}
}
if close_operation:
step_response['operation']['response']['resultCode'] = result
step_response['operation']['response']['resultDescription'] = result
return step_response
def multi_step_response(content, device_id, publish_operation_step_response):
'''
Emulating multi step response
'''
# Wait 2 seconds before start the file download operation
sleep(2)
print('Multi-step response process')
parameters = content['operation']['request']['parameters']
download_status = None
for parameter in parameters:
if parameter['name'] == 'deploymentElements':
deployment_elements = parameter['value']['array']
for deployment_element in deployment_elements:
download_url = deployment_element['downloadUrl']
file_path = deployment_element['path']
publish_operation_step_response(
content, device_id, 'DOWNLOADFILE', 'SUCCESSFUL', False)
sleep(2)
print('Downloading {0}...'.format(download_url))
response = requests.get(
download_url, headers=conf.HEADERS, stream=True)
print('Status code received {}'.format(response.status_code))
if response.status_code == 200:
print('Writing file to disk...')
with open(file_path, 'wb') as downloading_file:
for chunk in response:
downloading_file.write(chunk)
publish_operation_step_response(
content, device_id, 'ENDINSTALL', 'SUCCESSFUL', False)
download_status = 'SUCCESSFUL'
print('...done')
else:
publish_operation_step_response(
content, device_id, 'DOWNLOADFILE', 'ERROR', False)
download_status = 'ERROR'
print('Something went wrong downloading file')
sleep(2)
publish_operation_step_response(
content, device_id, 'ENDUPDATE', download_status, True)
def reboot(content, device_id):
'''
REBOOT_EQUIPMENT response emulation
'''
print('Simulating the reboot of the equipment')
operation_id = content['operation']['request']['id']
return {
'version': '7.0',
'operation': {
'response': {
'deviceId': device_id,
'timestamp': int(round(time.time() * 1000)),
'name': 'REBOOT_EQUIPMENT',
'id': operation_id,
'resultCode': 'SUCCESSFUL',
'resultDescription': 'Success',
'steps': [
{
'name': 'REBOOT_EQUIPMENT',
'timestamp': int(round(time.time() * 1000)),
'result': 'SUCCESSFUL',
'description': 'Hardware reboot Ok',
'response': [
{
'name': 'responseParamName',
'value': 'responseParamValue'
}
]
}
]
}
}
}
def refresh_info(content, device_id):
'''
REFRESH_INFO response emulation
'''
print('Simulating the REFRESH_INFO of the equipment')
operation_id = content['operation']['request']['id']
return {
'version': '7.0',
'operation': {
'response': {
'deviceId': device_id,
'timestamp': int(round(time.time() * 1000)),
'name': 'REFRESH_INFO',
'id': operation_id,
'resultCode': 'SUCCESSFUL',
'resultDescription': 'Success',
'steps': [
{
'name': 'REFRESH_INFO',
'timestamp': int(round(time.time() * 1000)),
'result': 'SUCCESSFUL',
'description': 'Refresh Info Ok',
'response': [
{
'name': 'ccare.bps',
'value': 200
}
]
}
]
}
}
}
def update(content, device_id, publish_operation_step_response):
'''
Update response emulation
'''
thread = Thread(target=multi_step_response,
args=(content, device_id, publish_operation_step_response,))
thread.start()
operation_id = content['operation']['request']['id']
return {
'version': '7.0',
'operation': {
'response': {
'timestamp': int(round(time.time() * 1000)),
'name': 'UPDATE',
'id': operation_id,
'variableList': [],
'steps': [
{
'name': 'BEGINUPDATE',
'title': 'Begin Update',
'description': '',
'result': 'SUCCESSFUL'
}
]
}
}
}
def set_device_parameters(content, device_id):
'''
SET_DEVICE_PARAMETERS response emulation
'''
print('Simulating the SET_DEVICE_PARAMETERS operation')
operation_id = content['operation']['request']['id']
return {
'version': '7.0',
'operation': {
'response': {
'deviceId': device_id,
'timestamp': int(round(time.time() * 1000)),
'name': 'SET_DEVICE_PARAMETERS',
'id': operation_id,
'resultCode': 'SUCCESSFUL',
'resultDescription': 'Success',
'steps': [
{
'name': 'SET_DEVICE_PARAMETERS',
'timestamp': int(round(time.time() * 1000)),
'result': 'SUCCESSFUL',
'description': 'Parameters set ok',
'response': [
{
'name': 'responseParamName',
'value': 'responseParamValue'
}
]
}
]
}
}
}
|
the-stack_0_24073 | #! /usr/bin/env python3
"""Brute-force test script: test libpqxx against many compilers etc.
This script makes no changes in the source tree; all builds happen in
temporary directories.
To make this possible, you may need to run "make distclean" in the
source tree. The configure script will refuse to configure otherwise.
"""
# Without this, pocketlint does not yet understand the print function.
from __future__ import print_function
from abc import (
ABCMeta,
abstractmethod,
)
from argparse import ArgumentParser
from contextlib import contextmanager
from datetime import datetime
from functools import partial
import json
from multiprocessing import (
JoinableQueue,
Process,
Queue,
)
from multiprocessing.pool import (
Pool,
)
from os import (
cpu_count,
getcwd,
)
import os.path
from queue import Empty
from shutil import rmtree
from subprocess import (
CalledProcessError,
check_call,
check_output,
DEVNULL,
)
from sys import (
stderr,
stdout,
)
from tempfile import mkdtemp
from textwrap import dedent
from traceback import print_exc
CPUS = cpu_count()
GCC_VERSIONS = list(range(8, 12))
GCC = ['g++-%d' % ver for ver in GCC_VERSIONS]
CLANG_VERSIONS = list(range(7, 12))
CLANG = ['clang++-6.0'] + ['clang++-%d' % ver for ver in CLANG_VERSIONS]
CXX = GCC + CLANG
STDLIB = (
'',
'-stdlib=libc++',
)
OPT = ('-O0', '-O3')
LINK = {
'static': ['--enable-static', '--disable-shared'],
'dynamic': ['--disable-static', '--enable-shared'],
}
DEBUG = {
'plain': [],
'audit': ['--enable-audit'],
'maintainer': ['--enable-maintainer-mode'],
'full': ['--enable-audit', '--enable-maintainer-mode'],
}
# CMake "generators." Maps a value for cmake's -G option to a command line to
# run.
#
# I prefer Ninja if available, because it's fast. But hey, the default will
# work.
#
# Maps the name of the generator (as used with cmake's -G option) to the
# actual command line needed to do the build.
CMAKE_GENERATORS = {
'Ninja': ['ninja'],
'Unix Makefiles': ['make', '-j%d' % CPUS],
}
class Fail(Exception):
"""A known, well-handled exception. Doesn't need a traceback."""
class Skip(Exception):
""""We're not doing this build. It's not an error though."""
def run(cmd, output, cwd=None):
"""Run a command, write output to file-like object."""
command_line = ' '.join(cmd)
output.write("%s\n\n" % command_line)
check_call(cmd, stdout=output, stderr=output, cwd=cwd)
def report(output, message):
"""Report a message to output, and standard output."""
print(message, flush=True)
output.write('\n\n')
output.write(message)
output.write('\n')
def file_contains(path, text):
"""Does the file at path contain text?"""
with open(path) as stream:
for line in stream:
if text in line:
return True
return False
@contextmanager
def tmp_dir():
"""Create a temporary directory, and clean it up again."""
tmp = mkdtemp()
try:
yield tmp
finally:
rmtree(tmp)
def write_check_code(work_dir):
"""Write a simple C++ program so we can tesst whether we can compile it.
Returns the file's full path.
"""
path = os.path.join(work_dir, "check.cxx")
with open(path, 'w') as source:
source.write(dedent("""\
#include <iostream>
int main()
{
std::cout << "Hello world." << std::endl;
}
"""))
return path
def check_compiler(work_dir, cxx, stdlib, check, verbose=False):
"""Is the given compiler combo available?"""
err_file = os.path.join(work_dir, 'stderr.log')
if verbose:
err_output = open(err_file, 'w')
else:
err_output = DEVNULL
try:
command = [cxx, check]
if stdlib != '':
command.append(stdlib)
check_call(command, cwd=work_dir, stderr=err_output)
except (OSError, CalledProcessError):
if verbose:
with open(err_file) as errors:
stdout.write(errors.read())
print("Can't build with '%s %s'. Skipping." % (cxx, stdlib))
return False
else:
return True
# TODO: Use Pool.
def check_compilers(compilers, stdlibs, verbose=False):
"""Check which compiler configurations are viable."""
with tmp_dir() as work_dir:
check = write_check_code(work_dir)
return [
(cxx, stdlib)
for stdlib in stdlibs
for cxx in compilers
if check_compiler(
work_dir, cxx, stdlib, check=check, verbose=verbose)
]
def find_cmake_command():
"""Figure out a CMake generator we can use, or None."""
try:
caps = check_output(['cmake', '-E', 'capabilities'])
except FileNotFoundError as error:
return None
names = {generator['name'] for generator in json.loads(caps)['generators']}
for gen, cmd in CMAKE_GENERATORS.items():
if gen in names:
return gen
return None
class Config:
"""Configuration for a build.
These classes must be suitable for pickling, so we can send its objects to
worker processes.
"""
__metaclass__ = ABCMeta
@abstractmethod
def name(self):
"""Return an identifier for this build configuration."""
def make_log_name(self):
"""Compose log file name for this build."""
return "build-%s.out" % self.name()
class Build:
"""A pending or ondoing build, in its own directory.
Each step returns True for Success, or False for failure.
These classes must be suitable for pickling, so we can send its objects to
worker processes.
"""
def __init__(self, logs_dir, config=None):
self.config = config
self.log = os.path.join(logs_dir, config.make_log_name())
# Start a fresh log file.
with open(self.log, 'w') as log:
log.write("Starting %s.\n" % datetime.utcnow())
self.work_dir = mkdtemp()
def clean_up(self):
"""Delete the build tree."""
rmtree(self.work_dir)
@abstractmethod
def configure(self, log):
"""Prepare for a build."""
@abstractmethod
def build(self, log):
"""Build the code, including the tests. Don't run tests though."""
def test(self, log):
"""Run tests."""
run(
[os.path.join(os.path.curdir, 'test', 'runner')], log,
cwd=self.work_dir)
def logging(self, function):
"""Call function, pass open write handle for `self.log`."""
# TODO: Should probably be a decorator.
with open(self.log, 'a') as log:
try:
function(log)
except Exception as error:
log.write("%s\n" % error)
raise
def do_configure(self):
"""Call `configure`, writing output to `self.log`."""
self.logging(self.configure)
def do_build(self):
"""Call `build`, writing output to `self.log`."""
self.logging(self.build)
def do_test(self):
"""Call `test`, writing output to `self.log`."""
self.logging(self.test)
class AutotoolsConfig(Config):
"""A combination of build options for the "configure" script."""
def __init__(self, cxx, opt, stdlib, link, link_opts, debug, debug_opts):
self.cxx = cxx
self.opt = opt
self.stdlib = stdlib
self.link = link
self.link_opts = link_opts
self.debug = debug
self.debug_opts = debug_opts
def name(self):
return '_'.join([
self.cxx, self.opt, self.stdlib, self.link, self.debug])
class AutotoolsBuild(Build):
"""Build using the "configure" script."""
__metaclass__ = ABCMeta
def configure(self, log):
configure = [
os.path.join(getcwd(), "configure"),
"CXX=%s" % self.config.cxx,
]
if self.config.stdlib == '':
configure += [
"CXXFLAGS=%s" % self.config.opt,
]
else:
configure += [
"CXXFLAGS=%s %s" % (self.config.opt, self.config.stdlib),
"LDFLAGS=%s" % self.config.stdlib,
]
configure += [
"--disable-documentation",
] + self.config.link_opts + self.config.debug_opts
run(configure, log, cwd=self.work_dir)
def build(self, log):
run(['make', '-j%d' % CPUS], log, cwd=self.work_dir)
# Passing "TESTS=" like this will suppress the actual running of
# the tests. We run them in the "test" stage.
run(['make', '-j%d' % CPUS, 'check', 'TESTS='], log, cwd=self.work_dir)
class CMakeConfig(Config):
"""Configuration for a CMake build."""
def __init__(self, generator):
self.generator = generator
self.builder = CMAKE_GENERATORS[generator]
def name(self):
return "cmake"
class CMakeBuild(Build):
"""Build using CMake.
Ignores the config for now.
"""
__metaclass__ = ABCMeta
def configure(self, log):
source_dir = getcwd()
generator = self.config.generator
run(
['cmake', '-G', generator, source_dir], output=log,
cwd=self.work_dir)
def build(self, log):
run(self.config.builder, log, cwd=self.work_dir)
def parse_args():
"""Parse command-line arguments."""
parser = ArgumentParser(description=__doc__)
parser.add_argument('--verbose', '-v', action='store_true')
parser.add_argument(
'--compilers', '-c', default=','.join(CXX),
help="Compilers, separated by commas. Default is %(default)s.")
parser.add_argument(
'--optimize', '-O', default=','.join(OPT),
help=(
"Alternative optimisation options, separated by commas. "
"Default is %(default)s."))
parser.add_argument(
'--stdlibs', '-L', default=','.join(STDLIB),
help=(
"Comma-separated options for choosing standard library. "
"Defaults to %(default)s."))
parser.add_argument(
'--logs', '-l', default='.', metavar='DIRECTORY',
help="Write build logs to DIRECTORY.")
parser.add_argument(
'--jobs', '-j', default=CPUS, metavar='CPUS',
help=(
"When running 'make', run up to CPUS concurrent processes. "
"Defaults to %(default)s."))
parser.add_argument(
'--minimal', '-m', action='store_true',
help="Make it as short a run as possible. For testing this script.")
return parser.parse_args()
def soft_get(queue, block=True):
"""Get an item off `queue`, or `None` if the queue is empty."""
try:
return queue.get(block)
except Empty:
return None
def read_queue(queue, block=True):
"""Read entries off `queue`, terminating when it gets a `None`.
Also terminates when the queue is empty.
"""
entry = soft_get(queue, block)
while entry is not None:
yield entry
entry = soft_get(queue, block)
def service_builds(in_queue, fail_queue, out_queue):
"""Worker process for "build" stage: process one job at a time.
Sends successful builds to `out_queue`, and failed builds to `fail_queue`.
Terminates when it receives a `None`, at which point it will send a `None`
into `out_queue` in turn.
"""
for build in read_queue(in_queue):
try:
build.do_build()
except Exception as error:
fail_queue.put((build, "%s" % error))
else:
out_queue.put(build)
in_queue.task_done()
# Mark the end of the queue.
out_queue.put(None)
def service_tests(in_queue, fail_queue, out_queue):
"""Worker process for "test" stage: test one build at a time.
Sends successful builds to `out_queue`, and failed builds to `fail_queue`.
Terminates when it receives a final `None`. Does not send out a final
`None` of its own.
"""
for build in read_queue(in_queue):
try:
build.do_test()
except Exception as error:
fail_queue.put((build, "%s" % error))
else:
out_queue.put(build)
in_queue.task_done()
def report_failures(queue, message):
"""Report failures from a failure queue. Return total number."""
failures = 0
for build, error in read_queue(queue, block=False):
print("%s: %s - %s" % (message, build.config.name(), error))
failures += 1
return failures
def count_entries(queue):
"""Get and discard all entries from `queue`, return the total count."""
total = 0
for _ in read_queue(queue, block=False):
total += 1
return total
def gather_builds(args):
"""Produce the list of builds we want to perform."""
if args.verbose:
print("\nChecking available compilers.")
compiler_candidates = args.compilers.split(',')
compilers = check_compilers(
compiler_candidates, args.stdlibs.split(','),
verbose=args.verbose)
if list(compilers) == []:
raise Fail(
"Did not find any viable compilers. Tried: %s."
% ', '.join(compiler_candidates))
opt_levels = args.optimize.split(',')
link_types = LINK.items()
debug_mixes = DEBUG.items()
if args.minimal:
compilers = compilers[:1]
opt_levels = opt_levels[:1]
link_types = list(link_types)[:1]
debug_mixes = list(debug_mixes)[:1]
builds = [
AutotoolsBuild(
args.logs,
AutotoolsConfig(
opt=opt, link=link, link_opts=link_opts, debug=debug,
debug_opts=debug_opts, cxx=cxx, stdlib=stdlib))
for opt in sorted(opt_levels)
for link, link_opts in sorted(link_types)
for debug, debug_opts in sorted(debug_mixes)
for cxx, stdlib in compilers
]
cmake = find_cmake_command()
if cmake is not None:
builds.append(CMakeBuild(args.logs, CMakeConfig(cmake)))
return builds
def enqueue(queue, build, *args):
"""Put `build` on `queue`.
Ignores additional arguments, so that it can be used as a clalback for
`Pool`.
We do this instead of a lambda in order to get the closure right. We want
the build for the current iteration, not the last one that was executed
before the lambda runs.
"""
queue.put(build)
def enqueue_error(queue, build, error):
"""Put the pair of `build` and `error` on `queue`."""
queue.put((build, error))
def main(args):
"""Do it all."""
if not os.path.isdir(args.logs):
raise Fail("Logs location '%s' is not a directory." % args.logs)
builds = gather_builds(args)
if args.verbose:
print("Lined up %d builds." % len(builds))
# The "configure" step is single-threaded. We can run many at the same
# time, even when we're also running a "build" step at the same time.
# This means we may run a lot more processes than we have CPUs, but there's
# no law against that. There's also I/O time to be covered.
configure_pool = Pool()
# Builds which have failed the "configure" stage, with their errors. This
# queue must never stall, so that we can let results pile up here while the
# work continues.
configure_fails = Queue(len(builds))
# Waiting list for the "build" stage. It contains Build objects,
# terminated by a final None to signify that there are no more builds to be
# done.
build_queue = JoinableQueue(10)
# Builds that have failed the "build" stage.
build_fails = Queue(len(builds))
# Waiting list for the "test" stage. It contains Build objects, terminated
# by a final None.
test_queue = JoinableQueue(10)
# The "build" step tries to utilise all CPUs, and it may use a fair bit of
# memory. Run only one of these at a time, in a single worker process.
build_worker = Process(
target=service_builds, args=(build_queue, build_fails, test_queue))
build_worker.start()
# Builds that have failed the "test" stage.
test_fails = Queue(len(builds))
# Completed builds. This must never stall.
done_queue = JoinableQueue(len(builds))
# The "test" step can not run concurrently (yet). So, run tests serially
# in a single worker process. It takes its jobs directly from the "build"
# worker.
test_worker = Process(
target=service_tests, args=(test_queue, test_fails, done_queue))
test_worker.start()
# Feed all builds into the "configure" pool. Each build which passes this
# stage goes into the "build" queue.
for build in builds:
configure_pool.apply_async(
build.do_configure, callback=partial(enqueue, build_queue, build),
error_callback=partial(enqueue_error, configure_fails, build))
if args.verbose:
print("All jobs are underway.")
configure_pool.close()
configure_pool.join()
# TODO: Async reporting for faster feedback.
configure_fail_count = report_failures(configure_fails, "CONFIGURE FAIL")
if args.verbose:
print("Configure stage done.")
# Mark the end of the build queue for the build worker.
build_queue.put(None)
build_worker.join()
# TODO: Async reporting for faster feedback.
build_fail_count = report_failures(build_fails, "BUILD FAIL")
if args.verbose:
print("Build step done.")
# Mark the end of the test queue for the test worker.
test_queue.put(None)
test_worker.join()
# TODO: Async reporting for faster feedback.
# TODO: Collate failures into meaningful output, e.g. "shared library fails."
test_fail_count = report_failures(test_fails, "TEST FAIL")
if args.verbose:
print("Test step done.")
# All done. Clean up.
for build in builds:
build.clean_up()
ok_count = count_entries(done_queue)
if ok_count == len(builds):
print("All tests OK.")
else:
print(
"Failures during configure: %d - build: %d - test: %d. OK: %d."
% (
configure_fail_count,
build_fail_count,
test_fail_count,
ok_count,
))
if __name__ == '__main__':
try:
exit(main(parse_args()))
except Fail as failure:
stderr.write("%s\n" % failure)
exit(2)
|
the-stack_0_24077 | #!/usr/bin/env python3
# Copyright (C) 2020 Konstantin Tokarev <[email protected]>
# Copyright (C) 2020 Rajagopalan Gangadharan <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import os
import argparse
import pathlib
import platform
import sys
import subprocess
def run_command(command):
print("Executing:", command)
exit_code = os.system(command)
print("Exit code:", exit_code)
if exit_code:
sys.exit(1)
def run_comman_output(command):
print("Executing:", command)
output = os.popen(command)
return output.read()
class ConanProfile:
def __init__(self, profile_name):
self.name = profile_name
def create(self):
run_command("conan profile new {0} --detect --force".format(self.name))
def get(self, setting):
return subprocess.check_output(f"conan profile get settings.{setting} {self.name}", shell=True).rstrip().decode('ascii')
def update(self, setting, value):
run_command("conan profile update settings.{0}={1} {2}".format(setting, value, self.name))
def set_env_variable(var, value, override):
if var in os.environ and len(os.environ[var]) != 0 and not os.environ[var].isspace():
old_value = os.environ[var]
if old_value != value:
if override:
print(f"Warning: overriding environment variable '{var}': '{old_value}' -> '{value}'")
os.environ[var] = value
else:
print(f"Note: using environment variable '{var}' = '{old_value}'. Undefine it to use '{value}' instead")
else:
os.environ[var] = value
def set_compiler_environment(cc, cxx, override):
set_env_variable("CC", cc, override)
set_env_variable("CXX", cxx, override)
def get_cc_cxx(compiler):
compiler_preset = {
"msvc": ["cl", "cl"],
"clang": ["clang", "clang++"],
"gcc": ["gcc", "g++"]
}
return compiler_preset[compiler]
def create_profile(compiler, arch):
if not compiler:
if platform.system() == "Windows":
compiler = "msvc"
elif platform.system() == "Darwin":
compiler = "clang"
elif platform.system() == "Linux":
compiler = "gcc"
try:
cc, cxx = get_cc_cxx(compiler)
except KeyError:
sys.exit(f"Error: Unsupported compiler '{compiler}' specified")
profile = ConanProfile('qtwebkit_{0}_{1}'.format(compiler, arch)) # e.g. qtwebkit_msvc_x86
if compiler == "msvc":
profile.create()
set_compiler_environment(cc, cxx, override=True)
else:
set_compiler_environment(cc, cxx, override=True)
profile.create()
if arch == 'default':
arch = profile.get('arch_build')
profile.update('arch', arch)
profile.update('arch_build', arch)
if platform.system() == "Windows" and compiler == "gcc":
profile.update('compiler.threads', 'posix')
if arch == 'x86':
profile.update('compiler.exception', 'dwarf2')
if arch == 'x86_64':
profile.update('compiler.exception', 'seh')
if args.build_type == "Debug":
profile.update('compiler.runtime', 'MTd')
else:
profile.update('compiler.runtime', 'MT')
return profile.name
def set_environment_for_profile(profile_name):
profile = ConanProfile(profile_name)
compiler = profile.get('compiler')
if compiler == "Visual Studio":
compiler = "msvc"
try:
cc, cxx = get_cc_cxx(compiler)
except KeyError:
sys.exit(f"Error: Unsupported compiler '{compiler}' specified in profile '{profile_name}'")
set_compiler_environment(cc, cxx, override=False)
def get_qt_version(qt_path):
qmake_path = os.path.join(qt_path, "bin", "qmake")
qt_version = run_comman_output(f"{qmake_path} -query QT_VERSION").rstrip()
if not qt_version:
if qt_path:
sys.exit(f"Can't find working qmake in {qt_path}")
else:
sys.exit("Can't find working qmake in PATH")
return qt_version
parser = argparse.ArgumentParser(description='Build QtWebKit with Conan. For installation of build product into Qt, use --install option')
parser.add_argument("--qt", help="Root directory of Qt Installation", type=str, metavar="QTDIR")
parser.add_argument(
"--cmakeargs", help="Space separated values that should be passed as CMake arguments", default="", type=str)
parser.add_argument("--ninjaargs", help="Ninja arguments",
default="", type=str)
parser.add_argument(
"--build_directory", help="Name of build dirtectory (defaults to build)", default="build", type=str)
parser.add_argument("--compiler", help="Specify compiler for build (msvc, gcc, clang)", default=None, choices=['gcc', 'msvc', 'clang'], type=str)
parser.add_argument("--configure", help="Execute the configuration step. When specified, build won't run unless --build is specified", action="store_true")
parser.add_argument("--build", help="Execute the build step. When specified, configure won't run unless --configure is specified", action="store_true")
parser.add_argument("--install", help="Execute the install step. When specified, configure and build steps WILL run without changes", action="store_true")
parser.add_argument("--profile", help="Name of conan profile provided by user. Note: compiler and profile options are mutually exclusive", type=str)
parser.add_argument("--arch", help="32 bit or 64 bit build, leave blank for autodetect", default="default", choices=['x86', 'x86_64'])
parser.add_argument("--build_type", help="Name of CMake build configuration to use", default="Release", choices=['', 'Release', 'Debug'])
parser.add_argument("--install_prefix", help="Set installation prefix to the given path (defaults to Qt directory)", default=None)
parser.add_argument("--ignore-qt-bundled-deps",
help="Don't try to match versions of dependencies bundled with Qt and use latest versions of them", action="store_true")
args = parser.parse_args()
# Always print commands run by conan internally
os.environ["CONAN_PRINT_RUN_COMMANDS"] = "1"
src_directory = str(pathlib.Path(__file__).resolve().parents[2])
if os.path.isabs(args.build_directory):
build_directory = args.build_directory
else:
build_directory = os.path.join(src_directory, args.build_directory)
conanfile_path = os.path.join(src_directory, "Tools", "qt", "conanfile.py")
print("Path of build directory:" + build_directory)
run_command("conan remote add -f bincrafters https://bincrafters.jfrog.io/artifactory/api/conan/public-conan")
#run_command("conan remote add -f qtproject https://qtpkgtest.jfrog.io/artifactory/api/conan/coin-ci-provisioning --insert")
if args.profile and args.compiler:
sys.exit("Error: --compiler and --profile cannot be specified at the same time")
if not args.profile:
profile_name = create_profile(args.compiler, args.arch)
else:
profile_name = args.profile
set_environment_for_profile(profile_name)
build_vars = f'-o qt="{args.qt}" -o cmakeargs="{args.cmakeargs}" -o build_type="{args.build_type}"'
if args.qt and not args.ignore_qt_bundled_deps:
qt_version = get_qt_version(args.qt)
build_vars += f' -o qt_version="{qt_version}"'
if args.install_prefix:
build_vars += ' -o install_prefix="{}"'.format(args.install_prefix)
elif args.qt:
build_vars += ' -o install_prefix="{}"'.format(args.qt)
if args.ninjaargs:
os.environ["NINJAFLAGS"] = args.ninjaargs
if not args.configure and not args.build:
# If we have neither --configure nor --build, we should do both configure and build (but install only if requested)
args.configure = True
args.build = True
if args.configure:
run_command('conan install {0} -if "{1}" --build=missing --profile={2} {3}'.format(conanfile_path, build_directory, profile_name, build_vars))
configure_flag = "--configure" if args.configure else ""
build_flag = "--build" if args.build else ""
install_flag = "--install" if args.install else ""
run_command('conan build {0} {1} {2} -sf "{3}" -bf "{4}" "{5}"'.format(configure_flag, build_flag, install_flag, src_directory, build_directory, conanfile_path))
|
the-stack_0_24078 |
R1={"type":"convective","Area":15,"h_conv":10}
R2={"type":"conductive","length":0.004,"Area":1.2,"k":0.78}
R3={"type":"conductive","length":0.01,"Area":1.2,"k":0.09}
R4={"type":"conductive","length":0.004,"Area":1.2,"k":0.78}
R5={"type":"convective","Area":15,"h_conv":40}
R_network= [R1,R2,R3,R4,R5]
def Rcalc_Series(R_network):
R_tot=0
for R in R_network:
print(str(R))
if R["type"]=="conductive":
print("\n this resistance is conductive")
print("\n In this resistance "+ "L= " + str(R["length"])+ " m "+"A = "+str(R["Area"])+" m2"+ "k= "+ str(R["k"]) +" W/(m*K) \n")
R_resistance=R["length"]/(R["k"]*R["Area"] )
print("\n The Resistance's value is "+ str(R_resistance)+ " degC/W \n")
if R["type"]=="convective":
print("\n this resistance is convective")
print("\n In this resistanc"+ "A = "+str(R["Area"])+" m2"+ "h= "+ str(R["h_conv"]) +" W/(m*K) \n")
R_resistance=1.0/(R["h_conv"] *R["Area"] )
print("\n The Resistance's value is "+ str(R_resistance)+ " degC/W \n")
R_tot=R_tot+R_resistance
print("\n We have calculated the resistance of all of the layers \n")
print("\n The Overall Resistance is "+ str(R_tot)+ " degC/W \n")
Rcalc_Series(R_network)
|
the-stack_0_24079 | ####### import requierments
from os import getcwd, system, chdir
####### end imports #######
####### global vars #######
IS_UNITTEST = False # True if you're testing
RUN_AS_GUI = False # True if you want to use eel to control django
VERBOSE = True # true if you want the returning values of the functions
####### end globals #######
# base class of management provider
class manager:
# just to intialize this class
def __init__(self, startServerPort, lanInterface:str):
self.port =startServerPort
self.interface = lanInterface # for example : 0.0.0.0 is an interface
# gives you a list of functions, not variables
def getFunctionList(self):
# get functions of this class
# some the functions all built in and we don't want them
# so we don't use functions that start with _(underline) character
funcList = [] # empty list. then we will append it
for x in dir(self):
if not x.startswith("_") and x != "getFunctionList":
func = getattr(self, x)
if callable(func):
funcList.append(x)
return funcList
# makes the instances callable
# gets the function name and arguments
# passes arguments in function and calls
def __call__(self, functionName:str, args:list=[]):
try:
function = getattr(self, functionName)
return function(*args)
except:
print("bad option ", functionName, "doesn't exist")
class djangoCommands(manager):
def __init__(self, port, inter):
super().__init__(port, inter)
def startServer(self):
system(f"start cmd.exe /c python manage.py runserver {self.interface}:{self.port}")
return "server is running"
def databaseShell(self):
system("start cmd.exe /c python manage.py dbshell")
return "the database shell is now running"
def shell(self):
system("start cmd.exe /c python manage.py shell")
return "the shell is running"
def makeMigrations(self):
system("python manage.py makemigrations")
return "migrations has been made"
def migrate(self):
system("python manage.py migrate")
return "migrations has been done"
def createSuperUser(self):
system("python manage.py createsuperuser")
return "super user has been created"
def startApp(self, name):
system("python manage.py startapp {}".format(name))
return "your app has been created"
def unittest():
i = inst(8000, "0.0.0.0")
result = i("s", ["15", "15"])
print("unit test")
print(result)
def main():
# make an instance of manager class with 0.0.0.0 interface and port 8000
# we'll use them later to runserver
djangoManagement = djangoCommands(8000, "0.0.0.0")
# print functions list
functions = djangoManagement("getFunctionList")
print("\n\n")
for i, x in enumerate(functions): print(f"{i}- [{x}]")
print("\n\n")
# get function name to call from the user
userinput = input("enter a number to be executed: ").split(";")
function = functions[int(userinput[0])]
# running function after giving its name from user
results = djangoManagement(function, userinput[1:])
print(results if VERBOSE else "", end="")
def GUI():
djangocommands = djangoCommands(8000, "0.0.0.0")
# this is gui mode allowing you to control the things with
# a graphical user interface
# here we will need the eel framework
try:
import eel
except ImportError:
print("eel framework doesn't exist so we can't run the programme as GUI")
# eel needs to be initialized with a folder to access files
eel.init("GUI")
# internal functions exposed to eel to be used by javascript
# this function runs django commands
@eel.expose
def execute_django_command(command):
system(f"start cmd.exe /c python manage.py {command}")
return "executed"
@eel.expose
def run_server():
djangocommands.startServer()
return "server started"
@eel.expose
def make_migrations():
djangocommands.makeMigrations()
return "migrations has been made"
@eel.expose
def migrate():
djangocommands.migrate()
return "migrations has been done"
# opens index.html in the window
eel.start("index.html", position=(100,100), size=(700,500), mode="edge", port=8081)
if __name__ == '__main__':
while True:
if IS_UNITTEST:
unittest();input("enter to continue ...");exit()
elif RUN_AS_GUI:
GUI()
else:
main()
|
the-stack_0_24080 | # Training procedure for CIFAR-10 using ResNet 110.
# ResNet model from https://github.com/BIGBALLON/cifar-10-cnn/blob/master/4_Residual_Network/ResNet_keras.py
import keras
import numpy as np
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.layers.normalization import BatchNormalization
from keras.layers import Conv2D, Dense, Input, add, Activation, GlobalAveragePooling2D
from keras.callbacks import LearningRateScheduler, TensorBoard, ModelCheckpoint
from keras.models import Model
from keras import optimizers, regularizers
from sklearn.model_selection import train_test_split
import pickle
# Constants
stack_n = 18
num_classes = 10
img_rows, img_cols = 32, 32
img_channels = 3
batch_size = 128
epochs = 200
iterations = 45000 // batch_size
weight_decay = 0.0001
seed = 333
def scheduler(epoch):
if epoch < 80:
return 0.1
if epoch < 150:
return 0.01
return 0.001
def residual_network(img_input,classes_num=10,stack_n=5):
def residual_block(intput,out_channel,increase=False):
if increase:
stride = (2,2)
else:
stride = (1,1)
pre_bn = BatchNormalization()(intput)
pre_relu = Activation('relu')(pre_bn)
conv_1 = Conv2D(out_channel,kernel_size=(3,3),strides=stride,padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(pre_relu)
bn_1 = BatchNormalization()(conv_1)
relu1 = Activation('relu')(bn_1)
conv_2 = Conv2D(out_channel,kernel_size=(3,3),strides=(1,1),padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(relu1)
if increase:
projection = Conv2D(out_channel,
kernel_size=(1,1),
strides=(2,2),
padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(intput)
block = add([conv_2, projection])
else:
block = add([intput,conv_2])
return block
# build model
# total layers = stack_n * 3 * 2 + 2
# stack_n = 5 by default, total layers = 32
# input: 32x32x3 output: 32x32x16
x = Conv2D(filters=16,kernel_size=(3,3),strides=(1,1),padding='same',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(img_input)
# input: 32x32x16 output: 32x32x16
for _ in range(stack_n):
x = residual_block(x,16,False)
# input: 32x32x16 output: 16x16x32
x = residual_block(x,32,True)
for _ in range(1,stack_n):
x = residual_block(x,32,False)
# input: 16x16x32 output: 8x8x64
x = residual_block(x,64,True)
for _ in range(1,stack_n):
x = residual_block(x,64,False)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = GlobalAveragePooling2D()(x)
# input: 64 output: 10
x = Dense(classes_num,activation='softmax',
kernel_initializer="he_normal",
kernel_regularizer=regularizers.l2(weight_decay))(x)
return x
if __name__ == '__main__':
# load data
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
x_train45, x_val, y_train45, y_val = train_test_split(x_train, y_train, test_size=0.1, random_state=seed) # random_state = seed
# Normalize data with per-pixel mean
img_mean = x_train45.mean(axis=0) # per-pixel mean
img_std = x_train45.std(axis=0)
x_train45 = (x_train45-img_mean)/img_std
x_val = (x_val-img_mean)/img_std
x_test = (x_test-img_mean)/img_std
# build network
img_input = Input(shape=(img_rows,img_cols,img_channels))
output = residual_network(img_input,num_classes,stack_n)
resnet = Model(img_input, output)
print(resnet.summary())
# set optimizer
sgd = optimizers.SGD(lr=.1, momentum=0.9, nesterov=True, clipnorm=1.)
resnet.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
# set callback
cbks = [LearningRateScheduler(scheduler)]
# set data augmentation
print('Using real-time data augmentation.')
datagen = ImageDataGenerator(horizontal_flip=True,
width_shift_range=0.125,
height_shift_range=0.125,
fill_mode='constant',cval=0.)
datagen.fit(x_train45)
# start training
hist = resnet.fit_generator(datagen.flow(x_train45, y_train45,batch_size=batch_size),
steps_per_epoch=iterations,
epochs=epochs,
callbacks=cbks,
validation_data=(x_val, y_val))
resnet.save('resnet_110_45kclip.h5')
print("Get test accuracy:")
loss, accuracy = resnet.evaluate(x_test, y_test, verbose=0)
print("Test: accuracy1 = %f ; loss1 = %f" % (accuracy, loss))
print("Pickle models history")
with open('hist_110_cifar10_v2_45kclip.p', 'wb') as f:
pickle.dump(hist.history, f) |
the-stack_0_24081 | import glob
import os
import h5py
import keras.layers as layers
import numpy as np
import tensorflow as tf
from keras import backend, optimizers, regularizers
from keras.models import Model
import joblib
import optuna
from optuna.integration import KerasPruningCallback
from optuna.visualization import *
from utils import format_data, slicer, split
from utils_keras import loss_norm_error
# Model name
PREFIX = "model_pred-d_{}-"
SUFFIX = "{}.h5"
def objective(trial):
# Open data file
f = h5py.File(DT_FL, "r")
dt = f[DT_DST]
# Format data for LSTM training
x_data, y_data = format_data(dt, wd=WD, get_y=True)
x_data = np.squeeze(x_data)
# Split data and get slices
idxs = split(x_data.shape[0], N_TRAIN, N_VALID)
slc_trn, slc_vld, slc_tst = slicer(x_data.shape, idxs)
# Get data
x_train = x_data[slc_trn[0]]
y_train = y_data[slc_trn[0]] - x_train
x_val = x_data[slc_vld[0]]
y_val = y_data[slc_vld[0]] - x_val
# Limits and options
# Filters
# n_lstm = [[4, 128], [4, 128], [4, 128]]
n_lstm = [[4, 196], [4, 196], [4, 196]]
# Regularizer
l2_lm = [1e-7, 1e-3]
# Activation functions
act_opts = ["relu", "elu", "tanh", "linear"]
# Latent space cfg
lt_sz = [5, 150]
lt_dv = [0.3, 0.7]
# Learning rate
lm_lr = [1e-5, 1]
# Clear tensorflow session
tf.keras.backend.clear_session()
# Input
inputs = layers.Input(shape=x_train.shape[1:])
p = inputs
# Dense layers
# n_lyr_dense = trial.suggest_int("n_lyr_dense", 0, 2)
n_lyr_dense = trial.suggest_int("n_lyr_dense", 1, 3)
for i in range(n_lyr_dense):
# For the current layer
# Get number of filters
l = trial.suggest_int("n{}_dense".format(i), n_lstm[i][0], n_lstm[i][1])
# Get the activation function
act = trial.suggest_categorical("d{}_activation".format(i), act_opts)
# Regularization value
l2 = trial.suggest_loguniform("d{}_l2".format(i), l2_lm[0], l2_lm[1])
l2_reg = regularizers.l2(l=l2)
# Set layer
p = layers.Dense(
l,
activation=act,
# kernel_regularizer=l2_reg,
name="{}_dense".format(i + 1),
)(p)
# Dropout
dp = trial.suggest_uniform("d{}_dropout".format(i), 0, 1)
p = layers.Dropout(dp, name="{}_dropout_dense".format(i + 1))(p)
bn = trial.suggest_categorical("d{}_batchnorm".format(i), [0, 1])
if bn == 1:
p = layers.BatchNormalization(name="{}_bnorm_dense".format(i + 1))(p)
out = layers.Dense(y_data.shape[1], activation="linear")(p)
pred = Model(inputs, out, name="auto_encoder_add")
# opt_opts = ["adam", "nadam", "adamax", "RMSprop"]
# opt = trial.suggest_categorical("optimizer", opt_opts)
opt = "adam"
if opt == "adam":
k_optf = optimizers.Adam
elif opt == "nadam":
k_optf = optimizers.Nadam
elif opt == "adamax":
k_optf = optimizers.Adamax
elif opt == "RMSprop":
k_optf = optimizers.RMSprop
lr = trial.suggest_loguniform("lr", lm_lr[0], lm_lr[1])
if lr > 0:
k_opt = k_optf(learning_rate=lr)
else:
k_opt = k_optf()
pred.compile(optimizer=k_opt, loss="mse", metrics=["mse", loss_norm_error])
batch_size = int(trial.suggest_uniform("batch_sz", 2, 32))
pred.summary()
hist = pred.fit(
x_train,
y_train,
epochs=100,
batch_size=batch_size,
shuffle=True,
validation_data=(x_val, y_val),
callbacks=[KerasPruningCallback(trial, "val_mse")],
verbose=1,
)
txt = PREFIX + SUFFIX
pred.save(txt.format(RUN_VERSION, trial.number))
return hist.history["val_mse"][-1]
def clean_models(study):
# Get best model
bst = study.best_trial.number
# Rename best model
txt = PREFIX + SUFFIX
nw_name = PREFIX.format(RUN_VERSION)[:-1] + ".h5"
os.rename(txt.format(RUN_VERSION, bst), nw_name)
# Remove the other models
rm_mdls = glob.glob(PREFIX.format(RUN_VERSION) + "*")
for mdl in rm_mdls:
os.remove(mdl)
pass
def main():
# Use Optuna to performa a hyperparameter optimisation
study = optuna.create_study(
direction="minimize", pruner=optuna.pruners.MedianPruner()
)
# Start the optimisation process
study.optimize(objective, n_trials=100, timeout=1600)
# Keep only the best model
clean_models(study)
# Save Optuna study
joblib.dump(study, study_nm.format(RUN_VERSION))
if __name__ == "__main__":
# Study naming
study_nm = "study_d_v{}.pkl"
# File to be used
DT_FL = "data_compact.h5"
# Dataset to be used
DT_DST = "model_ae-smp_4_scaled"
# Split train test and validation datasets
N_TRAIN = 0.8
N_VALID = 0.1
# Window size to be used to predict the next sample
WD = 2
# Current search run
RUN_VERSION = 1
main()
|
the-stack_0_24082 | import django
django.setup()
from crawler.models import *
import networkx as nx
from tqdm import tqdm
def build_collaboration_graph(docs, authors):
#docs = Document.objects.all()
#authors = Author.objects.all()
scores = {}
triplets =[]
for d in tqdm(docs):
auts = d.authors.all()
for i, a1 in enumerate(auts):
for a2 in auts[i+1:]:
if a1.id == a2.id: continue
idx = '{},{}'.format(a1.id, a2.id)
if idx in scores:
scores[idx] += 1
else:
scores[idx] = 1
print('There are {} relationships in this set'.format(len(scores)))
for idx in scores:
idxx = [int(i) for i in idx.split(',')]
i1, i2 = idxx
triplets.append((i1,i2,scores[idx]))
return triplets, scores
|
the-stack_0_24083 | import argparse
import os
import subprocess
import sys
parser = argparse.ArgumentParser()
parser.add_argument('-o', dest='out')
parser.add_argument('-s', dest='stamp')
parser.add_argument('-t', dest='target_cpu')
args = parser.parse_args()
def gen_list(out, name, obj_dirs):
out.write(name + " = [\n")
for base_dir in obj_dirs:
for dir, subdirs, files in os.walk(os.path.join('obj', base_dir)):
for f in files:
if f.endswith('.obj') or f.endswith('.o'):
out.write('"' + os.path.abspath(os.path.join(dir, f)) + '",\n')
out.write("]\n")
with open(args.out, 'w') as out:
additional_libchromiumcontent = []
if sys.platform in ['win32', 'cygwin'] and args.target_cpu == "x64":
additional_libchromiumcontent = [
"../win_clang_x64/obj/third_party/libyuv",
]
gen_list(
out,
"obj_libchromiumcontent",
[
"build",
"chrome/browser/ui/libgtkui",
"content",
"crypto",
"dbus",
"device",
"gin",
"google_apis",
"gpu",
"ipc",
"jingle",
"mojo",
"pdf",
"printing",
"sandbox",
"sdch",
"sql/sql",
"storage",
"third_party/adobe",
"third_party/boringssl",
"third_party/brotli/common",
"third_party/brotli/dec",
"third_party/ced/ced",
"third_party/crc32c", # for "third_party/leveldatabase"
"third_party/decklink",
"third_party/expat",
"third_party/flac",
"third_party/harfbuzz-ng",
"third_party/iaccessible2",
"third_party/iccjpeg",
"third_party/isimpledom",
"third_party/leveldatabase",
"third_party/libdrm",
"third_party/libXNVCtrl",
"third_party/libjingle",
"third_party/libjpeg_turbo",
"third_party/libpng",
"third_party/libsrtp",
"third_party/libusb",
"third_party/libvpx",
"third_party/libwebm",
"third_party/libwebp",
"third_party/libxml",
"third_party/libxslt",
"third_party/libyuv",
"third_party/mesa",
"third_party/modp_b64",
"third_party/mozilla",
"third_party/openh264",
"third_party/openmax_dl",
"third_party/opus",
"third_party/ots",
"third_party/protobuf/protobuf_lite",
"third_party/qcms",
"third_party/re2",
"third_party/sfntly",
"third_party/smhasher",
"third_party/snappy",
"third_party/sqlite",
"third_party/sudden_motion_sensor",
"third_party/usrsctp",
"third_party/woff2",
"third_party/zlib",
"tools",
"ui",
"url",
] + additional_libchromiumcontent)
gen_list(
out,
"obj_libcxx",
[
"buildtools/third_party/libc++",
"buildtools/third_party/libc++abi",
])
gen_list(
out,
"obj_base",
[
"base",
])
gen_list(
out,
"obj_cc",
[
"cc/animation",
"cc/base",
"cc/blink",
"cc/cc",
"cc/debug",
"cc/ipc",
"cc/paint",
"cc/proto",
"cc/surfaces",
])
gen_list(
out,
"obj_components",
[
"components/autofill/core/common",
"components/bitmap_uploader",
"components/cdm",
"components/cookie_config",
"components/crash/core/common",
"components/device_event_log",
"components/discardable_memory",
"components/display_compositor",
"components/filesystem",
"components/leveldb",
"components/link_header_util",
"components/memory_coordinator",
"components/metrics/public/interfaces",
"components/metrics/single_sample_metrics",
"components/mime_util",
"components/mus/clipboard",
"components/mus/common",
"components/mus/gles2",
"components/mus/gpu",
"components/mus/input_devices",
"components/mus/public",
"components/network_session_configurator/browser",
"components/network_session_configurator/common",
"components/os_crypt",
"components/password_manager/core/common",
"components/payments",
"components/prefs",
"components/rappor",
"components/scheduler/common",
"components/scheduler/scheduler",
"components/security_state",
"components/tracing/proto",
"components/tracing/startup_tracing",
"components/tracing/tracing",
"components/url_formatter",
"components/variations",
"components/vector_icons",
"components/viz/client",
"components/viz/common",
"components/viz/hit_test",
"components/viz/host",
"components/viz/service/service",
"components/webcrypto",
"components/webmessaging",
])
gen_list(
out,
"obj_ppapi",
[
"ppapi/cpp/objects",
"ppapi/cpp/private",
"ppapi/host",
"ppapi/proxy",
"ppapi/shared_impl",
"ppapi/thunk",
])
gen_list(
out,
"obj_media",
[
"media",
])
gen_list(
out,
"obj_net",
[
"net/base",
"net/constants",
"net/extras",
"net/http_server",
"net/net",
"net/net_with_v8",
])
gen_list(
out,
"obj_services",
[
"services/catalog",
"services/data_decoder",
"services/device",
"services/file",
"services/metrics/public",
"services/network/public",
"services/resource_coordinator",
"services/service_manager/background",
"services/service_manager/embedder",
"services/service_manager/public/cpp/cpp",
"services/service_manager/public/cpp/cpp_types",
"services/service_manager/public/cpp/standalone_service/standalone_service",
"services/service_manager/public/interfaces",
"services/service_manager/runner",
"services/service_manager/sandbox",
"services/service_manager/service_manager",
"services/service_manager/standalone",
"services/shape_detection",
"services/shell/public",
"services/shell/runner",
"services/shell/shell",
"services/tracing/public",
"services/ui/public",
"services/ui/gpu",
"services/user",
"services/video_capture",
"services/viz/privileged/interfaces",
"services/viz/public/interfaces",
])
gen_list(
out,
"obj_skia",
[
"skia",
])
gen_list(
out,
"obj_angle",
[
"third_party/angle/angle_common",
"third_party/angle/angle_gpu_info_util",
"third_party/angle/angle_image_util",
"third_party/angle/libANGLE",
"third_party/angle/libEGL",
"third_party/angle/libGLESv2",
"third_party/angle/preprocessor",
"third_party/angle/src/third_party/libXNVCtrl",
"third_party/angle/src/vulkan_support/glslang",
"third_party/angle/src/vulkan_support/vulkan_loader",
"third_party/angle/translator",
"third_party/angle/translator_lib",
])
gen_list(
out,
"obj_pdfium",
[
"third_party/freetype",
"third_party/pdfium",
])
gen_list(
out,
"obj_webkit",
[
"third_party/WebKit/common",
"third_party/WebKit/public",
"third_party/WebKit/Source/controller",
"third_party/WebKit/Source/platform/heap",
"third_party/WebKit/Source/platform/blink_common",
"third_party/WebKit/Source/platform/instrumentation",
"third_party/WebKit/Source/platform/loader",
"third_party/WebKit/Source/platform/media",
"third_party/WebKit/Source/platform/mojo",
"third_party/WebKit/Source/platform/platform",
"third_party/WebKit/Source/platform/scheduler",
"third_party/WebKit/Source/platform/wtf",
"third_party/WebKit/Source/web",
])
gen_list(
out,
"obj_webkitcore",
[
"third_party/WebKit/Source/core",
])
gen_list(
out,
"obj_webkitbindings",
[
"third_party/WebKit/Source/bindings",
])
gen_list(
out,
"obj_webkitmodules",
[
"third_party/WebKit/Source/modules",
])
gen_list(
out,
"obj_webrtc",
[
"third_party/webrtc",
"third_party/webrtc_overrides",
])
gen_list(
out,
"obj_v8",
[
"v8/src/inspector",
"v8/v8_external_snapshot",
"v8/v8_libbase",
"v8/v8_libplatform",
"v8/v8_libsampler",
"third_party/icu",
])
gen_list(
out,
"obj_v8base",
[
"v8/v8_base",
])
open(args.stamp, 'w')
|
the-stack_0_24084 | # Copyright (c) 2019-2020, NVIDIA CORPORATION.
import datetime as dt
import re
import numpy as np
import pandas as pd
import pyarrow as pa
import cudf
from cudf import _lib as libcudf
from cudf._lib.nvtx import annotate
from cudf._lib.scalar import Scalar, as_scalar
from cudf.core.column import column, string
from cudf.utils.dtypes import is_scalar, np_to_pa_dtype
from cudf.utils.utils import buffers_from_pyarrow
# nanoseconds per time_unit
_numpy_to_pandas_conversion = {
"ns": 1,
"us": 1000,
"ms": 1000000,
"s": 1000000000,
"m": 60000000000,
"h": 3600000000000,
"D": 86400000000000,
}
_dtype_to_format_conversion = {
"datetime64[ns]": "%Y-%m-%d %H:%M:%S.%9f",
"datetime64[us]": "%Y-%m-%d %H:%M:%S.%6f",
"datetime64[ms]": "%Y-%m-%d %H:%M:%S.%3f",
"datetime64[s]": "%Y-%m-%d %H:%M:%S",
}
class DatetimeColumn(column.ColumnBase):
def __init__(
self, data, dtype, mask=None, size=None, offset=0, null_count=None
):
"""
Parameters
----------
data : Buffer
The datetime values
dtype : np.dtype
The data type
mask : Buffer; optional
The validity mask
"""
dtype = np.dtype(dtype)
if data.size % dtype.itemsize:
raise ValueError("Buffer size must be divisible by element size")
if size is None:
size = data.size // dtype.itemsize
size = size - offset
super().__init__(
data,
size=size,
dtype=dtype,
mask=mask,
offset=offset,
null_count=null_count,
)
if not (self.dtype.type is np.datetime64):
raise TypeError(f"{self.dtype} is not a supported datetime type")
self._time_unit, _ = np.datetime_data(self.dtype)
def __contains__(self, item):
try:
item = np.datetime64(item, self._time_unit)
except ValueError:
# If item cannot be converted to datetime type
# np.datetime64 raises ValueError, hence `item`
# cannot exist in `self`.
return False
return item.astype("int64") in self.as_numerical
@property
def time_unit(self):
return self._time_unit
@property
def year(self):
return self.get_dt_field("year")
@property
def month(self):
return self.get_dt_field("month")
@property
def day(self):
return self.get_dt_field("day")
@property
def hour(self):
return self.get_dt_field("hour")
@property
def minute(self):
return self.get_dt_field("minute")
@property
def second(self):
return self.get_dt_field("second")
@property
def weekday(self):
return self.get_dt_field("weekday")
def get_dt_field(self, field):
return libcudf.datetime.extract_datetime_component(self, field)
def normalize_binop_value(self, other):
if isinstance(other, dt.datetime):
other = np.datetime64(other)
elif isinstance(other, dt.timedelta):
other = np.timedelta64(other)
elif isinstance(other, pd.Timestamp):
other = other.to_datetime64()
elif isinstance(other, pd.Timedelta):
other = other.to_timedelta64()
if isinstance(other, np.datetime64):
if np.isnat(other):
return as_scalar(val=None, dtype=self.dtype)
other = other.astype(self.dtype)
return as_scalar(other)
elif isinstance(other, np.timedelta64):
other_time_unit = cudf.utils.dtypes.get_time_unit(other)
if other_time_unit not in ("s", "ms", "ns", "us"):
other = other.astype("timedelta64[s]")
if np.isnat(other):
return as_scalar(val=None, dtype=other.dtype)
return as_scalar(other)
else:
raise TypeError("cannot normalize {}".format(type(other)))
@property
def as_numerical(self):
return column.build_column(
data=self.base_data,
dtype=np.int64,
mask=self.base_mask,
offset=self.offset,
size=self.size,
)
def as_datetime_column(self, dtype, **kwargs):
dtype = np.dtype(dtype)
if dtype == self.dtype:
return self
return libcudf.unary.cast(self, dtype=dtype)
def as_timedelta_column(self, dtype, **kwargs):
raise TypeError(
f"cannot astype a datetimelike from [{self.dtype}] to [{dtype}]"
)
def as_numerical_column(self, dtype, **kwargs):
return self.as_numerical.astype(dtype)
def as_string_column(self, dtype, **kwargs):
if not kwargs.get("format"):
fmt = _dtype_to_format_conversion.get(
self.dtype.name, "%Y-%m-%d %H:%M:%S"
)
kwargs["format"] = fmt
if len(self) > 0:
return string._numeric_to_str_typecast_functions[
np.dtype(self.dtype)
](self, **kwargs)
else:
return column.column_empty(0, dtype="object", masked=False)
def to_arrow(self):
mask = None
if self.nullable:
mask = pa.py_buffer(self.mask_array_view.copy_to_host())
data = pa.py_buffer(self.as_numerical.data_array_view.copy_to_host())
pa_dtype = np_to_pa_dtype(self.dtype)
return pa.Array.from_buffers(
type=pa_dtype,
length=len(self),
buffers=[mask, data],
null_count=self.null_count,
)
def default_na_value(self):
"""Returns the default NA value for this column
"""
return np.datetime64("nat", self.time_unit)
def binary_operator(self, op, rhs, reflect=False):
lhs, rhs = self, rhs
if op in ("eq", "ne", "lt", "gt", "le", "ge"):
out_dtype = np.bool
elif op == "add" and pd.api.types.is_timedelta64_dtype(rhs.dtype):
out_dtype = cudf.core.column.timedelta._timedelta_binary_op_add(
rhs, lhs
)
elif op == "sub" and pd.api.types.is_timedelta64_dtype(rhs.dtype):
out_dtype = cudf.core.column.timedelta._timedelta_binary_op_sub(
rhs if reflect else lhs, lhs if reflect else rhs
)
elif op == "sub" and pd.api.types.is_datetime64_dtype(rhs.dtype):
units = ["s", "ms", "us", "ns"]
lhs_time_unit = cudf.utils.dtypes.get_time_unit(lhs)
lhs_unit = units.index(lhs_time_unit)
rhs_time_unit = cudf.utils.dtypes.get_time_unit(rhs)
rhs_unit = units.index(rhs_time_unit)
out_dtype = np.dtype(
f"timedelta64[{units[max(lhs_unit, rhs_unit)]}]"
)
else:
raise TypeError(
f"Series of dtype {self.dtype} cannot perform "
f" the operation {op}"
)
if reflect:
lhs, rhs = rhs, lhs
return binop(lhs, rhs, op=op, out_dtype=out_dtype)
def fillna(self, fill_value):
if is_scalar(fill_value):
if not isinstance(fill_value, Scalar):
fill_value = np.datetime64(fill_value, self.time_unit)
else:
fill_value = column.as_column(fill_value, nan_as_null=False)
result = libcudf.replace.replace_nulls(self, fill_value)
if isinstance(fill_value, np.datetime64) and np.isnat(fill_value):
# If the value we are filling is np.datetime64("NAT")
# we set the same mask as current column.
# However where there are "<NA>" in the
# columns, their corresponding locations
# in base_data will contain min(int64) values.
return column.build_column(
data=result.base_data,
dtype=result.dtype,
mask=self.base_mask,
size=result.size,
offset=result.offset,
children=result.base_children,
)
return result
def find_first_value(self, value, closest=False):
"""
Returns offset of first value that matches
"""
value = pd.to_datetime(value)
value = column.as_column(value, dtype=self.dtype).as_numerical[0]
return self.as_numerical.find_first_value(value, closest=closest)
def find_last_value(self, value, closest=False):
"""
Returns offset of last value that matches
"""
value = pd.to_datetime(value)
value = column.as_column(value, dtype=self.dtype).as_numerical[0]
return self.as_numerical.find_last_value(value, closest=closest)
@property
def is_unique(self):
return self.as_numerical.is_unique
@classmethod
def from_arrow(cls, array, dtype=None):
if dtype is None:
dtype = np.dtype("M8[{}]".format(array.type.unit))
pa_size, pa_offset, pamask, padata, _ = buffers_from_pyarrow(array)
return DatetimeColumn(
data=padata,
mask=pamask,
dtype=dtype,
size=pa_size,
offset=pa_offset,
)
def can_cast_safely(self, to_dtype):
if np.issubdtype(to_dtype, np.datetime64):
to_res, _ = np.datetime_data(to_dtype)
self_res, _ = np.datetime_data(self.dtype)
max_int = np.iinfo(np.dtype("int64")).max
max_dist = np.timedelta64(
self.max().astype(np.dtype("int64"), copy=False), self_res
)
min_dist = np.timedelta64(
self.min().astype(np.dtype("int64"), copy=False), self_res
)
self_delta_dtype = np.timedelta64(0, self_res).dtype
if max_dist <= np.timedelta64(max_int, to_res).astype(
self_delta_dtype
) and min_dist <= np.timedelta64(max_int, to_res).astype(
self_delta_dtype
):
return True
else:
return False
elif to_dtype == np.dtype("int64") or to_dtype == np.dtype("O"):
# can safely cast to representation, or string
return True
else:
return False
@annotate("BINARY_OP", color="orange", domain="cudf_python")
def binop(lhs, rhs, op, out_dtype):
out = libcudf.binaryop.binaryop(lhs, rhs, op, out_dtype)
return out
def infer_format(element, **kwargs):
"""
Infers datetime format from a string, also takes cares for `ms` and `ns`
"""
fmt = pd.core.tools.datetimes._guess_datetime_format(element, **kwargs)
if fmt is not None:
return fmt
element_parts = element.split(".")
if len(element_parts) != 2:
raise ValueError("Given date string not likely a datetime.")
# There is possibility that the element is of following format
# '00:00:03.333333 2016-01-01'
second_part = re.split(r"(\D+)", element_parts[1], maxsplit=1)
subsecond_fmt = ".%" + str(len(second_part[0])) + "f"
first_part = pd.core.tools.datetimes._guess_datetime_format(
element_parts[0], **kwargs
)
# For the case where first_part is '00:00:03'
if first_part is None:
tmp = "1970-01-01 " + element_parts[0]
first_part = pd.core.tools.datetimes._guess_datetime_format(
tmp, **kwargs
).split(" ", 1)[1]
if first_part is None:
raise ValueError("Unable to infer the timestamp format from the data")
if len(second_part) > 1:
second_part = pd.core.tools.datetimes._guess_datetime_format(
"".join(second_part[1:]), **kwargs
)
else:
second_part = ""
try:
fmt = first_part + subsecond_fmt + second_part
except Exception:
raise ValueError("Unable to infer the timestamp format from the data")
return fmt
|
the-stack_0_24085 | #!/usr/bin/env python
import ast
import os
import re
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
SOURCE_DIR = "galaxy"
_version_re = re.compile(r"__version__\s+=\s+(.*)")
project_short_name = os.path.basename(os.path.dirname(os.path.realpath(__file__)))
with open(f"{SOURCE_DIR}/project_galaxy_{project_short_name}.py") as f:
init_contents = f.read()
def get_var(var_name):
pattern = re.compile(rf"{var_name}\s+=\s+(.*)")
match = pattern.search(init_contents).group(1)
return str(ast.literal_eval(match))
version = get_var("__version__")
PROJECT_NAME = get_var("PROJECT_NAME")
PROJECT_URL = get_var("PROJECT_URL")
PROJECT_AUTHOR = get_var("PROJECT_AUTHOR")
PROJECT_EMAIL = get_var("PROJECT_EMAIL")
PROJECT_DESCRIPTION = get_var("PROJECT_DESCRIPTION")
TEST_DIR = "tests"
PACKAGES = [
"galaxy",
"galaxy.job_execution",
"galaxy.job_execution.actions",
"galaxy.job_execution.ports",
"galaxy.metadata",
]
ENTRY_POINTS = """
[console_scripts]
galaxy-set-metadata=galaxy.metadata.set_metadata:set_metadata
"""
PACKAGE_DATA = {
# Be sure to update MANIFEST.in for source dist.
"galaxy": [],
}
PACKAGE_DIR = {
SOURCE_DIR: SOURCE_DIR,
}
readme = open("README.rst").read()
history = open("HISTORY.rst").read().replace(".. :changelog:", "")
if os.path.exists("requirements.txt"):
requirements = open("requirements.txt").read().split("\n")
else:
# In tox, it will cover them anyway.
requirements = []
test_requirements = open("test-requirements.txt").read().split("\n")
setup(
name=PROJECT_NAME,
version=version,
description=PROJECT_DESCRIPTION,
long_description=readme + "\n\n" + history,
long_description_content_type="text/x-rst",
author=PROJECT_AUTHOR,
author_email=PROJECT_EMAIL,
url=PROJECT_URL,
packages=PACKAGES,
entry_points=ENTRY_POINTS,
package_data=PACKAGE_DATA,
package_dir=PACKAGE_DIR,
include_package_data=True,
install_requires=requirements,
license="AFL",
zip_safe=False,
keywords="galaxy",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Environment :: Console",
"License :: OSI Approved :: Academic Free License (AFL)",
"Operating System :: POSIX",
"Topic :: Software Development",
"Topic :: Software Development :: Code Generators",
"Topic :: Software Development :: Testing",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
test_suite=TEST_DIR,
tests_require=test_requirements,
)
|
the-stack_0_24086 | from struct import pack, unpack
import hashlib
import sys
import traceback
from qtum_electrum.bitcoin import TYPE_ADDRESS, int_to_hex, var_int, TYPE_SCRIPT
from qtum_electrum.bip32 import serialize_xpub
from qtum_electrum.i18n import _
from qtum_electrum.keystore import Hardware_KeyStore
from qtum_electrum.transaction import Transaction
from qtum_electrum.wallet import Standard_Wallet
from qtum_electrum.util import print_error, bfh, bh2u, versiontuple, UserFacingException
from qtum_electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch
try:
import hid
from btchip.btchipComm import HIDDongleHIDAPI, DongleWait
from btchip.btchip import btchip
from btchip.btchipUtils import compress_public_key, format_transaction, get_regular_input_script, \
get_p2sh_input_script
from btchip.bitcoinTransaction import bitcoinTransaction
from btchip.btchipFirmwareWizard import checkFirmware, updateFirmware
from btchip.btchipException import BTChipException
BTCHIP = True
BTCHIP_DEBUG = False
except ImportError:
BTCHIP = False
MSG_NEEDS_FW_UPDATE_GENERIC = _('Firmware version too old. Please update at') + \
' https://www.ledgerwallet.com'
MSG_NEEDS_FW_UPDATE_SEGWIT = _('Firmware version (or "Qtum" app) too old for Segwit support. Please update at') + \
' https://www.ledgerwallet.com'
MULTI_OUTPUT_SUPPORT = '1.1.4'
SEGWIT_SUPPORT = '1.1.10'
SEGWIT_SUPPORT_SPECIAL = '1.0.4'
def test_pin_unlocked(func):
"""Function decorator to test the Ledger for being unlocked, and if not,
raise a human-readable exception.
"""
def catch_exception(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except BTChipException as e:
if e.sw == 0x6982:
raise UserFacingException(_('Your Ledger is locked. Please unlock it.'))
else:
raise
return catch_exception
class Ledger_Client():
def __init__(self, hidDevice):
self.dongleObject = btchip(hidDevice)
self.preflightDone = False
def is_pairable(self):
return True
def close(self):
self.dongleObject.dongle.close()
def timeout(self, cutoff):
pass
def is_initialized(self):
return True
def label(self):
return ""
def i4b(self, x):
return pack('>I', x)
def has_usable_connection_with_device(self):
try:
self.dongleObject.getFirmwareVersion()
except BaseException:
return False
return True
@test_pin_unlocked
def get_xpub(self, bip32_path, xtype):
self.checkDevice()
# bip32_path is of the form 44'/0'/1'
# S-L-O-W - we don't handle the fingerprint directly, so compute
# it manually from the previous node
# This only happens once so it's bearable
#self.get_client() # prompt for the PIN before displaying the dialog if necessary
#self.handler.show_message("Computing master public key")
if xtype in ['p2wpkh', 'p2wsh'] and not self.supports_native_segwit():
raise UserFacingException(MSG_NEEDS_FW_UPDATE_SEGWIT)
if xtype in ['p2wpkh-p2sh', 'p2wsh-p2sh'] and not self.supports_segwit():
raise UserFacingException(MSG_NEEDS_FW_UPDATE_SEGWIT)
splitPath = bip32_path.split('/')
if splitPath[0] == 'm':
splitPath = splitPath[1:]
bip32_path = bip32_path[2:]
fingerprint = 0
if len(splitPath) > 1:
prevPath = "/".join(splitPath[0:len(splitPath) - 1])
try:
nodeData = self.dongleObject.getWalletPublicKey(prevPath)
except BTChipException as e:
if e.sw == 0x6f04:
raise Exception('error 6f04, Please update your firmware or try Bitcoin mode')
raise e
publicKey = compress_public_key(nodeData['publicKey'])
h = hashlib.new('ripemd160')
h.update(hashlib.sha256(publicKey).digest())
fingerprint = unpack(">I", h.digest()[0:4])[0]
nodeData = self.dongleObject.getWalletPublicKey(bip32_path)
publicKey = compress_public_key(nodeData['publicKey'])
depth = len(splitPath)
lastChild = splitPath[len(splitPath) - 1].split('\'')
childnum = int(lastChild[0]) if len(lastChild) == 1 else 0x80000000 | int(lastChild[0])
xpub = serialize_xpub(xtype, nodeData['chainCode'], publicKey, depth, self.i4b(fingerprint), self.i4b(childnum))
return xpub
def has_detached_pin_support(self, client):
try:
client.getVerifyPinRemainingAttempts()
return True
except BTChipException as e:
if e.sw == 0x6d00:
return False
print_error('has_detached_pin_support exception', e, e.sw)
raise e
def is_pin_validated(self, client):
try:
# Invalid SET OPERATION MODE to verify the PIN status
client.dongle.exchange(bytearray([0xe0, 0x26, 0x00, 0x00, 0x01, 0xAB]))
except BTChipException as e:
print_error('is_pin_validated exception', e, e.sw)
if (e.sw == 0x6982):
return False
if (e.sw == 0x6A80):
return True
raise e
def supports_multi_output(self):
return self.multiOutputSupported
def supports_segwit(self):
return self.segwitSupported
def supports_native_segwit(self):
return self.nativeSegwitSupported
def perform_hw1_preflight(self):
try:
firmwareInfo = self.dongleObject.getFirmwareVersion()
firmware = firmwareInfo['version']
self.multiOutputSupported = versiontuple(firmware) >= versiontuple(MULTI_OUTPUT_SUPPORT)
self.nativeSegwitSupported = versiontuple(firmware) >= versiontuple(SEGWIT_SUPPORT)
self.segwitSupported = self.nativeSegwitSupported or (firmwareInfo['specialVersion'] == 0x20 and versiontuple(firmware) >= versiontuple(SEGWIT_SUPPORT_SPECIAL))
if not checkFirmware(firmwareInfo):
self.dongleObject.dongle.close()
raise UserFacingException(MSG_NEEDS_FW_UPDATE_GENERIC)
try:
self.dongleObject.getOperationMode()
except BTChipException as e:
print_error('perform_hw1_preflight ex1', e)
if (e.sw == 0x6985):
self.dongleObject.dongle.close()
self.handler.get_setup()
# Acquire the new client on the next run
else:
raise e
if self.has_detached_pin_support(self.dongleObject) and not self.is_pin_validated(self.dongleObject) and (self.handler is not None):
remaining_attempts = self.dongleObject.getVerifyPinRemainingAttempts()
if remaining_attempts != 1:
msg = "Enter your Ledger PIN - remaining attempts : " + str(remaining_attempts)
else:
msg = "Enter your Ledger PIN - WARNING : LAST ATTEMPT. If the PIN is not correct, the dongle will be wiped."
confirmed, p, pin = self.password_dialog(msg)
if not confirmed:
raise UserFacingException('Aborted by user - please unplug the dongle and plug it again before retrying')
pin = pin.encode()
self.dongleObject.verifyPin(pin)
except BTChipException as e:
if (e.sw == 0x6faa):
raise UserFacingException("Dongle is temporarily locked - please unplug it and replug it again")
if ((e.sw & 0xFFF0) == 0x63c0):
raise UserFacingException("Invalid PIN - please unplug the dongle and plug it again before retrying")
if e.sw == 0x6f00 and e.message == 'Invalid channel':
# based on docs 0x6f00 might be a more general error, hence we also compare message to be sure
raise UserFacingException("Invalid channel.\n"
"Please make sure that 'Browser support' is disabled on your device.")
raise e
def checkDevice(self):
if not self.preflightDone:
try:
self.perform_hw1_preflight()
except BTChipException as e:
print_error('checkDevice', e)
if (e.sw == 0x6d00 or e.sw == 0x6700):
raise UserFacingException(_("Device not in Qtum mode")) from e
raise e
self.preflightDone = True
def password_dialog(self, msg=None):
response = self.handler.get_word(msg)
if response is None:
return False, None, None
return True, response, response
class Ledger_KeyStore(Hardware_KeyStore):
hw_type = 'ledger'
device = 'Ledger'
def __init__(self, d):
Hardware_KeyStore.__init__(self, d)
# Errors and other user interaction is done through the wallet's
# handler. The handler is per-window and preserved across
# device reconnects
self.force_watching_only = False
self.signing = False
self.cfg = d.get('cfg', {'mode': 0, 'pair': ''})
def dump(self):
obj = Hardware_KeyStore.dump(self)
obj['cfg'] = self.cfg
return obj
def get_derivation(self):
return self.derivation
def get_client(self):
return self.plugin.get_client(self).dongleObject
def get_client_electrum(self):
return self.plugin.get_client(self)
def give_error(self, message, clear_client=False):
print_error('give_error', message)
if not self.signing:
self.handler.show_error(message)
else:
self.signing = False
if clear_client:
self.client = None
raise UserFacingException(message)
def set_and_unset_signing(func):
"""Function decorator to set and unset self.signing."""
def wrapper(self, *args, **kwargs):
try:
self.signing = True
return func(self, *args, **kwargs)
finally:
self.signing = False
return wrapper
def address_id_stripped(self, address):
# Strip the leading "m/"
change, index = self.get_address_index(address)
derivation = self.derivation
address_path = "%s/%d/%d" % (derivation, change, index)
return address_path[2:]
def decrypt_message(self, pubkey, message, password):
raise UserFacingException(_('Encryption and decryption are currently not supported for {}').format(self.device))
@test_pin_unlocked
@set_and_unset_signing
def sign_message(self, sequence, message, password):
message = message.encode('utf8')
message_hash = hashlib.sha256(message).hexdigest().upper()
# prompt for the PIN before displaying the dialog if necessary
client = self.get_client()
address_path = self.get_derivation()[2:] + "/%d/%d" % sequence
self.handler.show_message("Signing message ...\r\nMessage hash: " + message_hash)
try:
info = self.get_client().signMessagePrepare(address_path, message)
pin = ""
if info['confirmationNeeded']:
pin = self.handler.get_auth(info) # does the authenticate dialog and returns pin
if not pin:
raise UserWarning(_('Cancelled by user'))
pin = str(pin).encode()
signature = self.get_client().signMessageSign(pin)
except BTChipException as e:
print_error('ledger sign_message', e)
if e.sw == 0x6a80:
self.give_error("Unfortunately, this message cannot be signed by the Ledger wallet. Only alphanumerical messages shorter than 140 characters are supported. Please remove any extra characters (tab, carriage return) and retry.")
elif e.sw == 0x6985: # cancelled by user
return b''
elif e.sw == 0x6982:
raise # pin lock. decorator will catch it
else:
self.give_error(e, True)
except UserWarning:
self.handler.show_error(_('Cancelled by user'))
return b''
except Exception as e:
self.give_error(e, True)
finally:
self.handler.finished()
# Parse the ASN.1 signature
rLength = signature[3]
r = signature[4: 4 + rLength]
sLength = signature[4 + rLength + 1]
s = signature[4 + rLength + 2:]
if rLength == 33:
r = r[1:]
if sLength == 33:
s = s[1:]
# And convert it
return bytes([27 + 4 + (signature[0] & 0x01)]) + r + s
@test_pin_unlocked
@set_and_unset_signing
def sign_transaction(self, tx, password):
if tx.is_complete():
return
client = self.get_client()
inputs = []
inputsPaths = []
pubKeys = []
chipInputs = []
redeemScripts = []
signatures = []
changePath = ""
output = None
p2shTransaction = False
segwitTransaction = False
pin = ""
self.get_client() # prompt for the PIN before displaying the dialog if necessary
# Fetch inputs of the transaction to sign
derivations = self.get_tx_derivations(tx)
for txin in tx.inputs():
if txin['type'] == 'coinbase':
self.give_error("Coinbase not supported") # should never happen
if txin['type'] in ['p2sh']:
p2shTransaction = True
if txin['type'] in ['p2wpkh-p2sh', 'p2wsh-p2sh']:
if not self.get_client_electrum().supports_segwit():
self.give_error(MSG_NEEDS_FW_UPDATE_SEGWIT)
segwitTransaction = True
if txin['type'] in ['p2wpkh', 'p2wsh']:
if not self.get_client_electrum().supports_native_segwit():
self.give_error(MSG_NEEDS_FW_UPDATE_SEGWIT)
segwitTransaction = True
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
for i, x_pubkey in enumerate(x_pubkeys):
if x_pubkey in derivations:
signingPos = i
s = derivations.get(x_pubkey)
hwAddress = "%s/%d/%d" % (self.get_derivation()[2:], s[0], s[1])
break
else:
self.give_error("No matching x_key for sign_transaction") # should never happen
redeemScript = Transaction.get_preimage_script(txin)
txin_prev_tx = txin.get('prev_tx')
if txin_prev_tx is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
txin_prev_tx_raw = txin_prev_tx.raw if txin_prev_tx else None
inputs.append([txin_prev_tx_raw,
txin['prevout_n'],
redeemScript,
txin['prevout_hash'],
signingPos,
txin.get('sequence', 0xffffffff - 1),
txin.get('value')])
inputsPaths.append(hwAddress)
pubKeys.append(pubkeys)
# Sanity check
if p2shTransaction:
for txin in tx.inputs():
if txin['type'] != 'p2sh':
self.give_error("P2SH / regular input mixed in same transaction not supported") # should never happen
txOutput = var_int(len(tx.outputs()))
for o in tx.outputs():
output_type, addr, amount = o.type, o.address, o.value
txOutput += int_to_hex(amount, 8)
script = tx.pay_script(output_type, addr)
txOutput += var_int(len(script) // 2)
txOutput += script
txOutput = bfh(txOutput)
# Recognize outputs
# - only one output and one change is authorized (for hw.1 and nano)
# - at most one output can bypass confirmation (~change) (for all)
if not p2shTransaction:
if not self.get_client_electrum().supports_multi_output():
if len(tx.outputs()) > 2:
self.give_error("Transaction with more than 2 outputs not supported")
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
# assert _type == TYPE_ADDRESS # qtum diff
info = tx.output_info.get(o.address)
if (info is not None) and (len(tx.outputs()) > 1) \
and not has_change:
index = info.address_index
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
changePath = self.get_derivation()[2:] + "/%d/%d"%index
has_change = True
else:
output = o.address
else:
output = o.address
self.handler.show_message(_("Confirm Transaction on your Ledger device..."))
try:
# Get trusted inputs from the original transactions
for utxo in inputs:
sequence = int_to_hex(utxo[5], 4)
if segwitTransaction:
tmp = bfh(utxo[3])[::-1]
tmp += bfh(int_to_hex(utxo[1], 4))
tmp += bfh(int_to_hex(utxo[6], 8)) # txin['value']
chipInputs.append({'value': tmp, 'witness': True, 'sequence': sequence})
redeemScripts.append(bfh(utxo[2]))
elif not p2shTransaction:
txtmp = bitcoinTransaction(bfh(utxo[0]))
trustedInput = self.get_client().getTrustedInput(txtmp, utxo[1])
trustedInput['sequence'] = sequence
chipInputs.append(trustedInput)
redeemScripts.append(txtmp.outputs[utxo[1]].script)
else:
tmp = bfh(utxo[3])[::-1]
tmp += bfh(int_to_hex(utxo[1], 4))
chipInputs.append({'value': tmp, 'sequence': sequence})
redeemScripts.append(bfh(utxo[2]))
# Sign all inputs
firstTransaction = True
inputIndex = 0
rawTx = tx.serialize_to_network()
self.get_client().enableAlternate2fa(False)
if segwitTransaction:
self.get_client().startUntrustedTransaction(True, inputIndex,
chipInputs, redeemScripts[inputIndex], version=tx.version)
# we don't set meaningful outputAddress, amount and fees
# as we only care about the alternateEncoding==True branch
outputData = self.get_client().finalizeInput(b'', 0, 0, changePath, bfh(rawTx))
outputData['outputData'] = txOutput
if outputData['confirmationNeeded']:
outputData['address'] = output
self.handler.finished()
pin = self.handler.get_auth(outputData) # does the authenticate dialog and returns pin
if not pin:
raise UserWarning()
if pin != 'paired':
self.handler.show_message(_("Confirmed. Signing Transaction..."))
while inputIndex < len(inputs):
singleInput = [chipInputs[inputIndex]]
self.get_client().startUntrustedTransaction(False, 0,
singleInput, redeemScripts[inputIndex], version=tx.version)
inputSignature = self.get_client().untrustedHashSign(inputsPaths[inputIndex], pin,
lockTime=tx.locktime)
inputSignature[0] = 0x30 # force for 1.4.9+
signatures.append(inputSignature)
inputIndex = inputIndex + 1
else:
while inputIndex < len(inputs):
self.get_client().startUntrustedTransaction(firstTransaction, inputIndex,
chipInputs, redeemScripts[inputIndex], version=tx.version)
# we don't set meaningful outputAddress, amount and fees
# as we only care about the alternateEncoding==True branch
outputData = self.get_client().finalizeInput(b'', 0, 0, changePath, bfh(rawTx))
outputData['outputData'] = txOutput
if outputData['confirmationNeeded']:
outputData['address'] = output
self.handler.finished()
pin = self.handler.get_auth(outputData) # does the authenticate dialog and returns pin
if not pin:
raise UserWarning()
if pin != 'paired':
self.handler.show_message(_("Confirmed. Signing Transaction..."))
else:
# Sign input with the provided PIN
inputSignature = self.get_client().untrustedHashSign(inputsPaths[inputIndex], pin,
lockTime=tx.locktime)
inputSignature[0] = 0x30 # force for 1.4.9+
signatures.append(inputSignature)
inputIndex = inputIndex + 1
if pin != 'paired':
firstTransaction = False
except UserWarning:
self.handler.show_error(_('Cancelled by user'))
return
except BTChipException as e:
if e.sw in (0x6985, 0x6d00): # cancelled by user
return
elif e.sw == 0x6982:
raise # pin lock. decorator will catch it
else:
traceback.print_exc(file=sys.stderr)
self.give_error(e, True)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.give_error(e, True)
finally:
self.handler.finished()
for i, txin in enumerate(tx.inputs()):
signingPos = inputs[i][4]
tx.add_signature_to_txin(i, signingPos, bh2u(signatures[i]))
tx.raw = tx.serialize()
@test_pin_unlocked
@set_and_unset_signing
def show_address(self, sequence, txin_type):
client = self.get_client()
address_path = self.get_derivation()[2:] + "/%d/%d" % sequence
self.handler.show_message(_("Showing address ..."))
segwit = Transaction.is_segwit_inputtype(txin_type)
segwitNative = txin_type == 'p2wpkh'
try:
client.getWalletPublicKey(address_path, showOnScreen=True, segwit=segwit, segwitNative=segwitNative)
except BTChipException as e:
if e.sw == 0x6985: # cancelled by user
pass
elif e.sw == 0x6982:
raise # pin lock. decorator will catch it
elif e.sw == 0x6b00: # hw.1 raises this
self.handler.show_error('{}\n{}\n{}'.format(
_('Error showing address') + ':',
e,
_('Your device might not have support for this functionality.')))
else:
traceback.print_exc(file=sys.stderr)
self.handler.show_error(e)
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.handler.show_error(e)
finally:
self.handler.finished()
class LedgerPlugin(HW_PluginBase):
libraries_available = BTCHIP
keystore_class = Ledger_KeyStore
client = None
DEVICE_IDS = [
(0x2581, 0x1807), # HW.1 legacy btchip
(0x2581, 0x2b7c), # HW.1 transitional production
(0x2581, 0x3b7c), # HW.1 ledger production
(0x2581, 0x4b7c), # HW.1 ledger test
(0x2c97, 0x0000), # Blue
(0x2c97, 0x0001), # Nano-S
(0x2c97, 0x0004), # Nano-X
(0x2c97, 0x0005), # RFU
(0x2c97, 0x0006), # RFU
(0x2c97, 0x0007), # RFU
(0x2c97, 0x0008), # RFU
(0x2c97, 0x0009), # RFU
(0x2c97, 0x000a) # RFU
]
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
def __init__(self, parent, config, name):
self.segwit = config.get("segwit")
HW_PluginBase.__init__(self, parent, config, name)
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def get_btchip_device(self, device):
ledger = False
if device.product_key[0] == 0x2581 and device.product_key[1] == 0x3b7c:
ledger = True
if device.product_key[0] == 0x2581 and device.product_key[1] == 0x4b7c:
ledger = True
if device.product_key[0] == 0x2c97:
if device.interface_number == 0 or device.usage_page == 0xffa0:
ledger = True
else:
return None # non-compatible interface of a nano s or blue
dev = hid.device()
dev.open_path(device.path)
dev.set_nonblocking(True)
return HIDDongleHIDAPI(dev, ledger, BTCHIP_DEBUG)
def create_client(self, device, handler):
if handler:
self.handler = handler
client = self.get_btchip_device(device)
if client is not None:
client = Ledger_Client(client)
return client
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
client.handler = self.create_handler(wizard)
client.get_xpub("m/44'/88'", 'standard') # TODO replace by direct derivation once Nano S > 1.1
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = self.create_handler(wizard)
client.checkDevice()
xpub = client.get_xpub(derivation, xtype)
return xpub
def get_client(self, keystore, force_pair=True):
# All client interaction should not be in the main GUI thread
# assert self.main_thread != threading.current_thread()
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
# if client:
# client.used()
if client is not None:
client.checkDevice()
return client
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
if type(wallet) is not Standard_Wallet:
keystore.handler.show_error(_('This function is only available for standard wallets when using {}.').format(self.device))
return
sequence = wallet.get_address_index(address)
txin_type = wallet.get_txin_type(address)
keystore.show_address(sequence, txin_type)
|
the-stack_0_24087 | #!/usr/bin/env python
"""
run_dless2_rebecca.py
[--log_file PATH]
[--verbose]
"""
################################################################################
#
# test
#
#
# Copyright (c) 7/13/2010 Rebecca Chodroff
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#################################################################################
import sys, os, re, shutil
# add self to search path for testing
if __name__ == '__main__':
exe_path = os.path.split(os.path.abspath(sys.argv[0]))[0]
module_name = os.path.split(sys.argv[0])[1]
module_name = os.path.splitext(module_name)[0];
else:
module_name = __name__
# Use import path from <<../python_modules>>
if __name__ == '__main__':
sys.path.append(os.path.abspath(os.path.join(exe_path,"..", "python_modules")))
sys.path.insert(0, "/net/cpp-group/Leo/inprogress/oss_projects/ruffus/installation")
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# options
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
if __name__ == '__main__':
from optparse import OptionParser
import StringIO
parser = OptionParser(version="%prog 1.0", usage = "\n\n %progs [options]")
parser.add_option("--targets", dest="targets",
metavar="INTEGERS",
type="string",
help="List of comma separated targets.")
parser.add_option("--working_dir", dest="working_dir",
metavar="PATH",
type="string",
help="Working directory.")
parser.add_option("--starting_dir", dest="starting_dir",
metavar="PATH",
type="string",
help="Starting directory.")
#
# general options: verbosity / logging
#
parser.add_option("-v", "--verbose", dest = "verbose",
action="count", default=0,
help="Print more verbose messages for each additional verbose level.")
parser.add_option("-L", "--log_file", dest="log_file",
metavar="FILE",
type="string",
help="Name and path of log file")
parser.add_option("--skip_parameter_logging", dest="skip_parameter_logging",
action="store_true", default=False,
help="Do not print program parameters to log.")
parser.add_option("--debug", dest="debug",
action="count", default=0,
help="Set default program parameters in debugging mode.")
#
# pipeline
#
parser.add_option("-t", "--target_tasks", dest="target_tasks",
action="append",
default = list(),
metavar="JOBNAME",
type="string",
help="Target task(s) of pipeline.")
parser.add_option("-j", "--jobs", dest="jobs",
default=1,
metavar="N",
type="int",
help="Allow N jobs (commands) to run simultaneously.")
parser.add_option("-n", "--just_print", dest="just_print",
action="store_true", default=False,
help="Don't actually run any commands; just print the pipeline.")
parser.add_option("--flowchart", dest="flowchart",
metavar="FILE",
type="string",
help="Don't actually run any commands; just print the pipeline "
"as a flowchart.")
parser.add_option("--colour_scheme_index", dest="colour_scheme_index",
metavar="INTEGER",
type="int",
help="Index of colour scheme for flow chart.")
#
# Less common pipeline options
#
parser.add_option("--key_legend_in_graph", dest="key_legend_in_graph",
action="store_true", default=False,
help="Print out legend and key for dependency graph.")
parser.add_option("--forced_tasks", dest="forced_tasks",
action="append",
default = list(),
metavar="JOBNAME",
type="string",
help="Pipeline task(s) which will be included even if they are up to date.")
# get help string
f =StringIO.StringIO()
parser.print_help(f)
helpstr = f.getvalue()
original_args = " ".join(sys.argv)
(options, remaining_args) = parser.parse_args()
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# #
# Debug: Change these #
# #
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
if options.debug:
options.log_file = os.path.join("run_dless2.log")
if not options.verbose:
options.verbose = 1
if not options.targets:
options.targets = "87"
if not options.working_dir:
options.working_dir = "DLESS"
if not options.starting_dir:
options.starting_dir = "DUMMY"
#vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# #
# Debug: Change these #
# #
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# mandatory options
#
mandatory_options = ["targets", "starting_dir", "working_dir"]
def check_mandatory_options (options, mandatory_options, helpstr):
"""
Check if specified mandatory options have b een defined
"""
missing_options = []
for o in mandatory_options:
if not getattr(options, o):
missing_options.append("--" + o)
if not len(missing_options):
return
raise Exception("Missing mandatory parameter%s: %s.\n\n%s\n\n" %
("s" if len(missing_options) > 1 else "",
", ".join(missing_options),
helpstr))
check_mandatory_options (options, mandatory_options, helpstr)
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# imports
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
from ruffus import *
from ruffus.ruffus_exceptions import JobSignalledBreak
#from json import dumps
#from collections import defaultdict
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Constants
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#from read_project_parameter_file import get_keys
#parameters = get_keys('PARAMETERS')
#
#reference_species = parameters['REFERENCE_SPECIES']
#species = parameters['SPECIES']
#working_dir = parameters['WORKING_DIR_ROOT']
#tree = parameters['TREE']
#ref_sequences = parameters['REFERENCE_SEQUENCES']
#working_dir= parameters['WORKING_DIR_ROOT']
#tba_alignments = parameters['TBA_ALIGNMENTS_DIR']
#tba_projected_alignments = parameters['TBA_PROJECTED_ALIGNMENTS_DIR']
#fasta_alignments = parameters['FASTA_ALIGNMENTS_DIR']
#repeats_dir = parameters['REPEATS_DIR']
#neutral_mods_dir = parameters['NEUTRAL_MODELS_DIR']
#indel_hist_dir = parameters['INDEL_HISTORY_DIR']
#indel_mods_dir = parameters['INDEL_MODELS_DIR']
#
#python_code = parameters['PYTHON_CODE_DIR']
#find_ars = parameters['FIND_ARS']
#maf_project = parameters['MAF_PROJECT_BIN']
#phylo_fit = parameters['PHYLOFIT_BINARY']
#msa_view = parameters['MSA_VIEW_BINARY']
#indel_history = parameters['INDEL_HISTORY_BINARY']
#tree_doctor = parameters['TREE_DOCTOR_BINARY']
#indel_fit = parameters['INDEL_FIT_BINARY']
#dless = parameters['DLESS_BINARY']
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Functions
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#def get_all_alignment_file_names():
# alignment_file_names = os.listdir(tba_alignments)
# return alignment_file_names
#
#def get_all_targets():
# alignment_file_names = get_all_alignment_file_names()
# targets = []
# for alignment_file_name in alignment_file_names:
# targets.append(alignment_file_name.split('.')[0])
# return targets
#
#
#def find_ARs(target):
# coor_file = os.path.join(ref_sequences, target + '_' + reference_species)
# coor = open(coor_file, 'r').readline()
# species, chr, start, strand, other = coor.split(':',4)
# RM_out = os.path.join(repeats_dir, target + '_' + reference_species + '.out')
# out_file = os.path.join(repeats_dir, target + '_' + reference_species + '.ar')
# os.system('perl ' + find_ars + ' ' + RM_out + ' >' + out_file)
# return chr, start, strand, out_file
#
#def write_gff(target):
# chr, start, strand, out_file = find_ARs(target)
# file = open(out_file, 'r')
# line = file.readline()
# lines = file.readlines()
# repeats_file = os.path.join(repeats_dir, target + '_' + reference_species + '.gff')
# repeats = open(repeats_file, 'w')
# for line in lines:
# line = line.strip()
# beg, end, feature = line.split()
# b = str(int(beg) + int(start))
# e = str(int(end) + int(start))
# entry = '\t'.join([chr, 'RepeatMasker', 'AR', b, e,'.', '.','.',feature])
# repeats.write(entry + '\n')
# repeats.close()
# return repeats_file
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Logger
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
if __name__ == '__main__':
import logging
import logging.handlers
MESSAGE = 15
logging.addLevelName(MESSAGE, "MESSAGE")
def setup_std_logging (logger, log_file, verbose):
"""
set up logging using programme options
"""
class debug_filter(logging.Filter):
"""
Ignore INFO messages
"""
def filter(self, record):
return logging.INFO != record.levelno
class NullHandler(logging.Handler):
"""
for when there is no logging
"""
def emit(self, record):
pass
# We are interesting in all messages
logger.setLevel(logging.DEBUG)
has_handler = False
# log to file if that is specified
if log_file:
handler = logging.FileHandler(log_file, delay=False)
handler.setFormatter(logging.Formatter("%(asctime)s - %(name)s - %(levelname)6s - %(message)s"))
handler.setLevel(MESSAGE)
logger.addHandler(handler)
has_handler = True
# log to stderr if verbose
if verbose:
stderrhandler = logging.StreamHandler(sys.stderr)
stderrhandler.setFormatter(logging.Formatter(" %(message)s"))
stderrhandler.setLevel(logging.DEBUG)
if log_file:
stderrhandler.addFilter(debug_filter())
logger.addHandler(stderrhandler)
has_handler = True
# no logging
if not has_handler:
logger.addHandler(NullHandler())
#
# set up log
#
logger = logging.getLogger(module_name)
setup_std_logging(logger, options.log_file, options.verbose)
#
# Allow logging across Ruffus pipeline
#
def get_logger (logger_name, args):
return logger
from ruffus.proxy_logger import *
(logger_proxy,
logging_mutex) = make_shared_logger_and_proxy (get_logger,
module_name,
{})
#
# log programme parameters
#
if not options.skip_parameter_logging:
programme_name = os.path.split(sys.argv[0])[1]
logger.info("%s %s" % (programme_name, original_args))
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Pipeline
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#
# convert targets into individual strings
# and identify files in staring_dir
#
options.targets = re.split(", *", options.targets)
if not len(options.targets):
raise Exception ("Please specify the targets as a common separated list for --targets")
starting_files = [os.path.join(options.starting_dir, t + ".maf") for t in options.targets]
repeats_files = [os.path.join(options.starting_dir, t + ".repeats") for t in options.targets]
#
# regex to split path out and substitute working directory
#
@follows(mkdir(options.working_dir, "test"))
@transform(starting_files, regex(r"(.+/)([^/]+\.maf)"), os.path.join(options.working_dir, r"\2"))
def copy_maf_into_working_directory(input_file, output_file):
"""
Make copy in working directory
"""
shutil.copyfile(input_file, output_file)
@follows(mkdir(options.working_dir))
@transform(repeats_files, regex(r"(.+/)([^/]+\.repeats)"), os.path.join(options.working_dir, r"\2"))
def copy_repeats_into_working_directory(input_file, output_file):
"""
Make copy in working directory
"""
shutil.copyfile(input_file, output_file)
#
# pipes in output from copy_maf_into_working_directory
#
# working_dir/target.maf -> working_dir/target.projected_maf
#
@transform(copy_maf_into_working_directory, suffix(".maf"), ".projected_maf")
def project_maf_alignments(input_file, output_file):
#os.system(maf_project + ' ' + input_file + ' ' + reference_species + ' > ' + output_file)
open(output_file, "w")
@transform(project_maf_alignments, suffix(".projected_maf"), add_inputs(r"\1.repeats"), r".neutral_model")
@follows(copy_repeats_into_working_directory)
def generate_neutral_model(input_files, output_file):
maf_file, repeats_file = input_files
#cmd_str = (phylo_fit + ' --tree ' + tree
# + ' --features ' + repeats_file
# + ' --do-cats AR --out-root '
# + output_file + ' --msa-format MAF '
# + maf_file)
#os.system(cmd_str)
#run_cmd(cmd_str, "generate neutral model")
#queue_cmd_prefix = "qrsh -now n -cwd -p -6 -v BASH_ENV=~/.bashrc -q medium_jobs.q"
#target_name = os.path.splitext("maf_file")[0]
#run_cmd(cmd_str, "generate neutral model", queue_cmd_prefix = queue_cmd_prefix, job_name = target_name)
open(output_file, "w")
#must convert maf to fasta for indel history program
@transform(project_maf_alignments, suffix(".projected_maf"), ".fa")
def convert_maf2fasta(input_file, output_file):
#species = (open(species, 'r')).readline()
#os.system(msa_view + ' ' + input_file\
# + ' --soft-masked --seqs '\
# + species + ' --in-format MAF > ' + output_file)
open(output_file, "w")
@follows(generate_neutral_model)
@transform(convert_maf2fasta, regex("(.+).fa"), add_inputs(r"\1.neutral_model"), r"\1.indel_history")
def generate_indel_history(input_files, output_file):
fasta_file, neutral_model = input_files
#os.system(indel_history + ' ' + fasta_file + ' ' + neutral_model + ' > ' + output_file)
open(output_file, "w")
def parameters_for_dless(indel_history_file, neutral_model):
os.system(tree_doctor + ' -t ' + neutral_model + ' > ' + tree_file)
cmd = indel_fit + ' ' + indel_history_file + ' ' + tree_file
fin,fout=os.popen4(cmd)
indel_mod = fout.read()
indelmod=[]
for n in [2,5,8]:
indelmod.append(indel_mod.split()[n].strip(','))
return indelmod
@follows(generate_neutral_model)
@follows(convert_maf2fasta)
@transform(generate_indel_history,
suffix(".indel_history"),
add_inputs(r"\1.neutral_model", r"\1.fa"),
".dless.out")
def run_dless(input_files, output_file):
indel_history_file, neutral_model, fasta_file = input_files
#indelmod = parameters_for_dless(indel_history_file, neutral_model)
#cmd = ' '.join([dless,'-I',','.join(indel_params), '-H', indel_history_file,
# alignment, neutral_mod, '>', output_file])
#os.system(cmd)
open(output_file, "w")
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Main logic
#88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
if __name__ == '__main__':
if options.just_print:
pipeline_printout(sys.stdout, options.target_tasks, options.forced_tasks,
verbose=options.verbose)
elif options.flowchart:
pipeline_printout_graph ( open(options.flowchart, "w"),
os.path.splitext(options.flowchart)[1][1:],
options.target_tasks,
options.forced_tasks,
no_key_legend = not options.key_legend_in_graph,
minimal_key_legend = True,
user_colour_scheme = {"colour_scheme_index": options.colour_scheme_index},
pipeline_name = "dless2")
#graph_colour_demo_printout (open(options.flowchart, "w"),
# os.path.splitext(options.flowchart)[1][1:])
else:
pipeline_run(options.target_tasks, options.forced_tasks,
multiprocess = options.jobs,
logger = stderr_logger,
verbose = options.verbose)
|
the-stack_0_24088 | #!/usr/bin/env python3
# vim:fileencoding=utf-8:ft=python
# file: creoclean.py
#
# Copyright © 2015 R.F. Smith <[email protected]>. All rights reserved.
# Created: 2015-05-07 18:29:17 +0200
# Last modified: 2017-11-11 19:50:41 +0100
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Cleans up Creo versioned files.
Works in the named diratories or in the current working directory.
Removes all versions except the last one, and renames that to version 1.
"""
import argparse
import logging
import os
import re
import sys
__version__ = '1.0'
def main(argv):
"""
Entry point for creoclean.
Arguments:
argv: command line arguments
"""
dr = "dry run; show what would be done but don't delete files"
opts = argparse.ArgumentParser(prog='creoclean', description=__doc__)
opts.add_argument('-d', dest='dry_run', action="store_true", help=dr)
opts.add_argument('-v', '--version', action='version', version=__version__)
opts.add_argument(
'--log',
default='warning',
choices=['debug', 'info', 'warning', 'error'],
help="logging level (defaults to 'warning')"
)
opts.add_argument(
"dirs", metavar='dir', nargs='*', default=[], help="one or more directories to process"
)
args = opts.parse_args(argv)
lfmt = '%(levelname)s: %(message)s'
if args.dry_run:
logging.basicConfig(level='INFO', format=lfmt)
logging.info('DRY RUN, no files will be deleted or renamed')
else:
logging.basicConfig(level=getattr(logging, args.log.upper(), None), format=lfmt)
if not args.dirs:
args.dirs = ['.']
for directory in [d for d in args.dirs if os.path.isdir(d)]:
logging.info("cleaning in '{}'".format(directory))
cleandir(directory, args.dry_run)
def cleandir(path, dry_run):
"""
Clean up Creo files in the named directory.
Arguments:
path: The path of the directory to clean.
dry_run: Boolean to indicate a dry run.
"""
filenames = [e for e in os.listdir(path) if os.path.isfile(os.path.join(path, e))]
logging.info('found {} files'.format(len(filenames)))
splits = [re.split('^(.*)\.([^\.]{3})\.([0-9]+)$', fn) for fn in filenames]
splits = [s[1:-1] for s in splits if len(s) == 5]
exts = sorted(set([s[1] for s in splits]))
os.chdir(path)
for ext in exts:
data = [s for s in splits if s[1] == ext]
cnt = len(data)
if cnt < 2:
logging.info("not enough '{}' files; skipping".format(ext))
continue
logging.info("found {} '{}' files".format(cnt, ext))
names = set(p[0] for p in data)
logging.info("found {} unique '{}' file names".format(len(names), ext))
for nm in names:
numbers = [int(p[2]) for p in data if p[0] == nm]
if len(numbers) > 1:
numbers.sort()
for n in numbers[:-1]:
fn = "{}.{}.{}".format(nm, ext, n)
logging.info("removing '{}'".format(fn))
if not dry_run:
try:
os.remove(fn)
except OSError as e:
es = "removing '{}' failed: {}"
logging.warning(es.format(fn, e))
oldfn = "{}.{}.{}".format(nm, ext, numbers[-1])
newfn = "{}.{}.{}".format(nm, ext, 1)
if oldfn != newfn:
logging.info("renaming '{}' to '{}'".format(oldfn, newfn))
if not dry_run:
try:
os.rename(oldfn, newfn)
except OSError as e:
es = "renaming '{}' failed: {}"
logging.warning(es.format(oldfn, e))
if __name__ == '__main__':
main(sys.argv[1:])
|
the-stack_0_24089 | import copy
import random
from queue import Queue
from threading import Thread
import rocksdb
import traceback
from collections import Counter
from multiprocessing import Process, Manager
from multiprocessing.pool import Pool
import nltk
from tqdm import tqdm
from django.conf import settings
from capdb.models import Jurisdiction, CaseMetadata, CaseBodyCache
from capdb.storages import ngram_kv_store, KVDB, ngram_kv_store_ro
from scripts.helpers import ordered_query_iterator
nltk.data.path = settings.NLTK_PATH
unicode_translate_table = dict((ord(a), ord(b)) for a, b in zip(u'\u201c\u201d\u2018\u2019', u'""\'\''))
# custom tokenizer to disable separating contractions and possessives into separate words
tokenizer = copy.copy(nltk.tokenize._treebank_word_tokenizer)
tokenizer.CONTRACTIONS2 = tokenizer.CONTRACTIONS3 = []
tokenizer.ENDING_QUOTES = tokenizer.ENDING_QUOTES[:-2]
strip_chars = """`~!@#$%^&*()-_=+[{]}\|;:'",<>/?¡°¿‡†—•■"""
strip_right_chars = strip_chars + "£$©"
strip_left_chars = strip_chars + ".®"
def tokenize(text):
# clean up input
text = text.translate(unicode_translate_table)\
.replace(u"\u2014", u" \u2014 ") # add spaces around m-dashes
# yield each valid token
for sentence in nltk.sent_tokenize(text):
for token in tokenizer.tokenize(sentence):
token = token.lower().rstrip(strip_right_chars).lstrip(strip_left_chars)
if token:
yield token
def ngrams(words, n, padding=False):
"""
Yield generator of all n-tuples from list of words.
This approach uses more RAM but is faster than nltk.ngrams, which doesn't immediately consume the generator.
"""
words = list(words)
if padding:
word_lists = [words[i:] for i in range(n)]
else:
word_lists = [words[i:-n+i+1 or None] for i in range(n)]
return zip(*word_lists)
def get_totals_key(jurisdiction_id, year, n):
return b"totals" + KVDB.pack((jurisdiction_id, year, n))
def ngram_jurisdictions(slug=None, max_n=3):
"""
Add jurisdiction specified by slug to rocksdb, or all jurisdictions if name not provided.
This is the primary ngrams entrypoint. It spawns NGRAM_THREAD_COUNT worker processes to
ngram each jurisdiction-year, plus a rocksdb worker process that pulls their work off of
the queue and writes it to the database.
"""
# process pool of workers to ngram each jurisdiction-year and return keys
ngram_workers = Pool(settings.NGRAM_THREAD_COUNT, maxtasksperchild=1)
# inter-process queue of returned keys
m = Manager()
queue = m.Queue(settings.NGRAM_THREAD_COUNT)
ngram_worker_offsets = m.dict()
ngram_worker_lock = m.Lock()
# process to write keys to rocksdb
rocksdb_loaded = m.Condition()
rocksdb_worker = Process(target=rocksdb_writer, args=(queue, rocksdb_loaded))
rocksdb_worker.start()
with rocksdb_loaded:
rocksdb_loaded.wait()
# queue each jurisdiction-year for processing
jurisdictions = Jurisdiction.objects.all()
if slug:
jurisdictions = jurisdictions.filter(slug=slug)
ngram_worker_results = []
for jurisdiction in jurisdictions:
# skip empty jurisdictions
if not jurisdiction.case_metadatas.exists():
continue
# get year range
case_query = CaseMetadata.objects.in_scope().filter(jurisdiction_slug=jurisdiction.slug)
first_year = case_query.order_by('decision_date', 'id').first().decision_date.year
last_year = case_query.order_by('-decision_date', '-id').first().decision_date.year
# ngram each year
for year in range(first_year, last_year + 1):
# ngram_worker(queue, jurisdiction_id, year, max_n)
ngram_worker_results.append((jurisdiction.slug, year, ngram_workers.apply_async(ngram_worker, (ngram_worker_offsets, ngram_worker_lock, queue, jurisdiction.id, jurisdiction.slug, year, max_n))))
# wait for all ngram workers to finish
ngram_workers.close()
ngram_workers.join()
# report failures
for jurisdiction_slug, year, result in ngram_worker_results:
if not result._success:
exc = result._value
print("%s-%s failed:" % (jurisdiction_slug, year))
traceback.print_exception(etype=type(exc), value=exc, tb=exc.__traceback__)
# tell rocksdb worker to exit, and wait for it to finish
queue.put('STOP')
rocksdb_worker.join()
def ngram_worker(ngram_worker_offsets, ngram_worker_lock, queue, jurisdiction_id, jurisdiction_slug, year, max_n):
"""
Worker process to generate all ngrams for the given jurisdiction-year and add them to the queue.
"""
# skip reindexing jurisdiction-year combinations that already have ngrams
if ngram_kv_store_ro.get(get_totals_key(jurisdiction_id, year, 3)):
return
# tqdm setup -- add an offset based on current process index, plus space for rocksdb worker
desc = "%s-%s" % (jurisdiction_slug, year)
with ngram_worker_lock:
line_offset = next((i for i in range(settings.NGRAM_THREAD_COUNT) if i not in ngram_worker_offsets), None)
if line_offset is None:
line_offset = random.shuffle(ngram_worker_offsets.keys())[0]
ngram_worker_offsets[line_offset] = True
pos = 2 + settings.NGRAM_THREAD_COUNT + line_offset
# count words for each case
counters = {n: {'total_tokens':0, 'total_documents':0, 'instances': Counter(), 'documents': Counter()} for n in range(1, max_n + 1)}
queryset = CaseBodyCache.objects.filter(
metadata__duplicative=False, metadata__jurisdiction__isnull=False, metadata__court__isnull=False,
metadata__decision_date__year=year, metadata__jurisdiction_slug=jurisdiction_slug
).only('text').order_by('id')
for case_text in tqdm(ordered_query_iterator(queryset), desc="Ngram %s" % desc, position=pos, mininterval=.5):
tokens = list(tokenize(case_text.text))
for n in range(1, max_n + 1):
grams = list(' '.join(gram) for gram in ngrams(tokens, n))
counters[n]['total_tokens'] = counters[n].setdefault('total_tokens', 0) + len(grams)
counters[n]['total_documents'] = counters[n].setdefault('total_documents', 0) + 1
counters[n]['instances'].update(grams)
counters[n]['documents'].update(set(grams))
# enqueue data for rocksdb
storage_year = year - 1900
for n, counts in counters.items():
# skip storing jurisdiction-year combinations that already have ngrams
totals_key = get_totals_key(jurisdiction_id, year, n)
if ngram_kv_store_ro.get(totals_key):
print(" - Length %s already in totals" % n)
continue
# set up values for use by rocksdb_write_thread()
totals = (totals_key, [counts['total_tokens'], counts['total_documents']])
merge_value_prefix = (jurisdiction_id, storage_year)
# prepare list of all ngram observations to be merged into rocksdb, in the form:
# merges = [
# (b'<n><gram>': (<instance count>, <document count>)), ...
# ]
key_prefix = bytes([int(n)])
count_pairs = zip(sorted(counts['instances'].items()), sorted(counts['documents'].items()))
merges = [(key_prefix+gram.encode('utf8'), (instance_count, document_count)) for (gram, instance_count), (_, document_count) in count_pairs]
queue.put((totals, merge_value_prefix, merges))
del ngram_worker_offsets[line_offset]
def rocksdb_writer(queue, rocksdb_loaded):
"""
Worker process to pull ngrams off of the queue and add them to a second internal queue for writing to rocksdb.
This spawns NGRAM_THREAD_COUNT threads to do the actual writing.
"""
# make sure the database exists; read-only clients in ngram_worker() will choke if it doesn't
ngram_kv_store.get(b'init')
with rocksdb_loaded:
rocksdb_loaded.notify_all()
# NOTE: the following is a lower-level way to do something we could just do with multiprocessing.ThreadPool.
# Unfortunately ThreadPool doesn't let us set a max queue size for adding tasks to the queue, which is needed for backpressure.
# start an internal queue and threads to read from it
internal_queue = Queue(settings.NGRAM_THREAD_COUNT)
threads = []
for i in range(settings.NGRAM_THREAD_COUNT):
t = Thread(target=rocksdb_write_thread, args=(internal_queue,))
t.start()
threads.append(t)
# pull all items off of the inter-process queue and onto the internal queue
for item in tqdm(iter(queue.get, 'STOP'), position=0, desc="Jurisdiction-years written", mininterval=.5):
internal_queue.put(item)
# block until all tasks are done
internal_queue.join()
# stop worker threads
for i in range(settings.NGRAM_THREAD_COUNT):
internal_queue.put(None)
for t in threads:
t.join()
def rocksdb_write_thread(queue):
"""
Worker thread to write ngrams to rocksdb, spawned by rocksdb_writer.
"""
while True:
try:
# fetch items until 'None' is added to queue
item = queue.get()
if item is None:
break
totals, merge_value_prefix, merges = item
# skip storing jurisdiction-year combinations that already have ngrams
if ngram_kv_store.get(totals[0]):
continue
# write in a batch so writes succeed or fail as a group
batch = rocksdb.WriteBatch()
# write each ngram, in the form (b'<n><gram>', pack(<jurisdiction_id>, <year>, <instance_count>, <document_count>))
# see ngram_kv_store.NgramMergeOperator for how this value is merged into the existing b'<n><gram>' key
for k, v in tqdm(merges, desc="Current write job", mininterval=.5):
ngram_kv_store.merge(k, merge_value_prefix+v, packed=True, batch=batch)
# write totals value
ngram_kv_store.put(totals[0], totals[1], packed=True, batch=batch)
# write batch
ngram_kv_store.db.write(batch)
finally:
# let internal_queue.join() know not to wait for this job to complete
queue.task_done()
|
the-stack_0_24092 | from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
from constants.pipelines import OperationStatuses, PipelineStatuses
from db.models.pipelines import OperationRun, OperationRunStatus, PipelineRun, PipelineRunStatus
from libs.decorators import ignore_raw, ignore_updates
from polyaxon.celery_api import app as celery_app
from polyaxon.settings import PipelineCeleryTasks
from signals.run_time import set_finished_at, set_started_at
@receiver(post_save, sender=PipelineRun, dispatch_uid="pipeline_run_saved")
@ignore_updates
@ignore_raw
def new_pipeline_run(sender, **kwargs):
instance = kwargs['instance']
instance.set_status(PipelineStatuses.CREATED)
@receiver(post_save, sender=OperationRun, dispatch_uid="operation_run_saved")
@ignore_updates
@ignore_raw
def new_operation_run(sender, **kwargs):
instance = kwargs['instance']
instance.set_status(OperationStatuses.CREATED)
@receiver(post_save, sender=PipelineRunStatus, dispatch_uid="new_pipeline_run_status_saved")
@ignore_updates
@ignore_raw
def new_pipeline_run_status(sender, **kwargs):
instance = kwargs['instance']
pipeline_run = instance.pipeline_run
# Update job last_status
pipeline_run.status = instance
set_started_at(instance=pipeline_run,
status=instance.status,
starting_statuses=[PipelineStatuses.RUNNING])
set_finished_at(instance=pipeline_run,
status=instance.status,
is_done=PipelineStatuses.is_done)
pipeline_run.save()
# Notify operations with status change. This is necessary if we skip or stop the dag run.
if pipeline_run.stopped:
celery_app.send_task(
PipelineCeleryTasks.PIPELINES_STOP_OPERATIONS,
kwargs={'pipeline_run_id': pipeline_run.id,
'message': 'Pipeline run was stopped'})
if pipeline_run.skipped:
celery_app.send_task(
PipelineCeleryTasks.PIPELINES_SKIP_OPERATIONS,
kwargs={'pipeline_run_id': pipeline_run.id,
'message': 'Pipeline run was skipped'})
@receiver(post_save, sender=OperationRunStatus, dispatch_uid="new_operation_run_status_saved")
@ignore_updates
@ignore_raw
def new_operation_run_status(sender, **kwargs):
instance = kwargs['instance']
operation_run = instance.operation_run
pipeline_run = operation_run.pipeline_run
# Update job last_status
operation_run.status = instance
set_started_at(instance=operation_run,
status=instance.status,
starting_statuses=[PipelineStatuses.RUNNING])
set_finished_at(instance=operation_run,
status=instance.status,
is_done=PipelineStatuses.is_done)
operation_run.save()
# No need to check if it is just created
if instance.status == OperationStatuses.CREATED:
return
# Check if we need to update the pipeline_run's status
celery_app.send_task(
PipelineCeleryTasks.PIPELINES_CHECK_STATUSES,
kwargs={'pipeline_run_id': pipeline_run.id,
'status': instance.status,
'message': instance.message})
if operation_run.is_done:
# Notify downstream that instance is done, and that its dependency can start.
downstream_runs = operation_run.downstream_runs.filter(
status__status=OperationStatuses.CREATED)
for op_run in downstream_runs:
celery_app.send_task(
PipelineCeleryTasks.PIPELINES_START_OPERATION,
kwargs={'operation_run_id': op_run.id})
@receiver(pre_delete, sender=OperationRun, dispatch_uid="operation_run_deleted")
@ignore_raw
def operation_run_deleted(sender, **kwargs):
instance = kwargs['instance']
instance.stop()
|
the-stack_0_24094 | import torch
import numpy as np
import torch.nn as nn
from torch.autograd import Variable
from data_io.models import load_pytorch_model
from data_io.instructions import debug_untokenize_instruction
from learning.models.model_pvn_stage1_bidomain import PVN_Stage1_Bidomain
from learning.models.model_pvn_stage2_bidomain import PVN_Stage2_Bidomain
from learning.models.model_pvn_stage2_actor_critic import PVN_Stage2_ActorCritic
from learning.modules.cuda_module import CudaModule
from learning.modules.map_transformer import MapTransformer
from learning.modules.visitation_softmax import VisitationSoftmax
from learning.inputs.pose import Pose
from learning.inputs.vision import standardize_images, standardize_image
from learning.intrinsic_reward.visitation_reward import VisitationReward
from learning.intrinsic_reward.wd_visitation_and_exploration_reward import WDVisitationAndExplorationReward
from learning.intrinsic_reward.map_coverage_reward import MapCoverageReward
from learning.intrinsic_reward.action_oob_reward import ActionOutOfBoundsReward
from learning.intrinsic_reward.visitation_and_exploration_reward import VisitationAndExplorationReward
from learning.inputs.common import cuda_var
from learning.modules.spatial_softmax_2d import SpatialSoftmax2d
from drones.aero_interface.rviz import RvizInterface
from utils.simple_profiler import SimpleProfiler
import parameters.parameter_server as P
from visualization import Presenter
class PVN_Keyboard_Wrapper_Bidomain(CudaModule):
def __init__(self, run_name="", model_instance_name="only"):
super(PVN_Keyboard_Wrapper_Bidomain, self).__init__()
self.instance_name = model_instance_name
self.s1_params = P.get_current_parameters()["ModelPVN"]["Stage1"]
self.wrapper_params = P.get_current_parameters()["PVNWrapper"]
self.real_drone = P.get_current_parameters()["Setup"]["real_drone"]
self.rviz = None
if self.real_drone:
self.rviz = RvizInterface(
base_name="/pvn/",
map_topics=["semantic_map", "visitation_dist"],
markerarray_topics=["instruction"])
self.rl = self.wrapper_params["learning_mode"] == "reinforcement_learning"
self.stage1_visitation_prediction = PVN_Stage1_Bidomain(run_name, model_instance_name)
self.load_models_from_file()
#self.spatialsoftmax = SpatialSoftmax2d()
self.visitation_softmax = VisitationSoftmax()
self.map_transformer_w_to_r = MapTransformer(
source_map_size=self.s1_params["global_map_size"],
dest_map_size=self.s1_params["local_map_size"],
world_size_m=self.s1_params["world_size_m"],
world_size_px=self.s1_params["world_size_px"]
)
self.prev_instruction = None
self.start_poses = None
self.seq_step = 0
self.log_v_dist_w = None
self.v_dist_w = None
self.log_goal_oob_score = None
self.goal_oob_prob_w = None
self.map_coverage_w = None
self.map_uncoverage_w = None
def load_models_from_file(self):
if self.wrapper_params.get("stage1_file"):
print("PVNWrapper: Loading Stage 1")
load_pytorch_model(self.stage1_visitation_prediction, self.wrapper_params["stage1_file"])
# Policy state is whatever needs to be updated during RL training.
# Right now we only update the stage 2 weights.
def get_policy_state(self):
return {}
def set_policy_state(self, state):
pass
def init_weights(self):
self.stage1_visitation_prediction.init_weights()
self.load_models_from_file()
def cuda(self, device=None):
CudaModule.cuda(self, device)
self.stage1_visitation_prediction.cuda(device)
self.map_transformer_w_to_r.cuda(device)
return self
def reset(self):
self.stage1_visitation_prediction.reset()
self.prev_instruction = None
self.start_poses = None
self.log_v_dist_w = None
self.v_dist_w = None
self.log_goal_oob_score = None
self.goal_oob_prob_w = None
self.map_coverage_w = None
self.map_uncoverage_w = None
self.map_coverage_reward.reset()
self.visitation_reward.reset()
self.wd_visitation_and_exploration_reward.reset()
def start_sequence(self):
self.seq_step = 0
self.reset()
def start_segment_rollout(self):
self.start_sequence()
def cam_poses_from_states(self, states):
cam_pos = states[:, 9:12]
cam_rot = states[:, 12:16]
pose = Pose(cam_pos, cam_rot)
return pose
def calc_intrinsic_rewards(self, next_state, action):
if self.v_dist_w is None or self.map_coverage_w is None:
raise ValueError("Computing intrinsic reward prior to any rollouts!")
else:
states_np = next_state.state[np.newaxis, :]
states = torch.from_numpy(states_np)
cam_pos = states[:, 0:12]
if self.s1_params.get("clip_observability") and self.wrapper_params.get("wasserstein_reward"):
visitation_reward, stop_reward, exploration_reward = self.wd_visitation_and_exploration_reward(
self.v_dist_w, self.goal_oob_prob_w, cam_pos, action)
elif self.s1_params.get("clip_observability"):
visitation_reward, stop_reward, exploration_reward = self.visitation_and_exploration_reward(
self.v_dist_w, self.goal_oob_prob_w, cam_pos, action)
else:
visitation_reward, stop_reward = self.visitation_reward(self.v_dist_w, cam_pos, action)
exploration_reward = 0.0
#map_reward = self.map_coverage_reward(self.map_coverage_w)
#return visitation_reward + map_reward
negative_per_step_reward = -0.04
action_oob_reward = self.action_oob_reward.get_reward(action)
return {"visitation_reward": visitation_reward,
"stop_reward": stop_reward,
"exploration_reward": exploration_reward,
"negative_per_step_reward": negative_per_step_reward,
"action_oob_reward": action_oob_reward}
def states_to_torch(self, state):
states_np = state.state[np.newaxis, :]
images_np = state.image[np.newaxis, :]
images_np = standardize_images(images_np, out_np=True)
images_fpv = torch.from_numpy(images_np).float()
states = torch.from_numpy(states_np)
return states, images_fpv
def get_action(self, state, instruction, sample=False, rl_rollout=False):
"""
Given a DroneState (from PomdpInterface) and instruction, produce a numpy 4D action (x, y, theta, pstop)
:param state: DroneState object with the raw image from the simulator
:param instruction: Tokenized instruction given the corpus
:param sample: (Only applies if self.rl): If true, sample action from action distribution. If False, take most likely action.
#TODO: Absorb corpus within model
:return:
"""
self.eval()
ACTPROF = False
actprof = SimpleProfiler(print=ACTPROF, torch_sync=ACTPROF)
states, images_fpv = self.states_to_torch(state)
first_step = True
if instruction == self.prev_instruction:
first_step = False
if first_step:
self.reset()
self.start_poses = self.cam_poses_from_states(states)
if self.rviz is not None:
dbg_instr = "\n".join(Presenter().split_lines(debug_untokenize_instruction(instruction), maxchars=45))
self.rviz.publish_instruction_text("instruction", dbg_instr)
self.prev_instruction = instruction
self.seq_step += 1
instr_len = [len(instruction)] if instruction is not None else None
instructions = torch.LongTensor(instruction).unsqueeze(0)
plan_now = self.seq_step % self.s1_params["plan_every_n_steps"] == 0 or first_step
# Run stage1 visitation prediction
# TODO: There's a bug here where we ignore images between planning timesteps. That's why must plan every timestep
if plan_now or True:
device = next(self.parameters()).device
images_fpv = images_fpv.to(device)
states = states.to(device)
instructions = instructions.to(device)
self.start_poses = self.start_poses.to(device)
actprof.tick("start")
#print("Planning for: " + debug_untokenize_instruction(list(instructions[0].detach().cpu().numpy())))
self.log_v_dist_w, v_dist_w_poses, self.log_goal_oob_score, rl_outputs = self.stage1_visitation_prediction(
images_fpv, states, instructions, instr_len,
plan=[True], firstseg=[first_step],
noisy_start_poses=self.start_poses,
start_poses=self.start_poses,
select_only=True,
rl=True
)
actprof.tick("stage1")
self.map_coverage_w = rl_outputs["map_coverage_w"]
self.map_uncoverage_w = rl_outputs["map_uncoverage_w"]
self.v_dist_w, self.goal_oob_prob_w = self.visitation_softmax(self.log_v_dist_w, self.log_goal_oob_score)
if self.rviz:
v_dist_w_np = self.v_dist_w[0].data.cpu().numpy().transpose(1, 2, 0)
# expand to 0-1 range
v_dist_w_np[:, :, 0] /= (np.max(v_dist_w_np[:, :, 0]) + 1e-10)
v_dist_w_np[:, :, 1] /= (np.max(v_dist_w_np[:, :, 1]) + 1e-10)
self.rviz.publish_map("visitation_dist", v_dist_w_np,
self.s1_params["world_size_m"])
# Transform to robot reference frame
cam_poses = self.cam_poses_from_states(states)
# Log-distributions CANNOT be transformed - the transformer fills empty space with zeroes, which makes sense for
# probability distributions, but makes no sense for likelihood scores
map_coverage_r, _ = self.map_transformer_w_to_r(self.map_coverage_w, None, cam_poses)
map_uncoverage_r, _ = self.map_transformer_w_to_r(self.map_uncoverage_w, None, cam_poses)
v_dist_r, r_poses = self.map_transformer_w_to_r(self.v_dist_w, None, cam_poses)
# Run stage2 action generation
if self.rl:
actprof.tick("pipes")
# If RL, stage 2 outputs distributions over actions (following torch.distributions API)
xvel_dist, yawrate_dist, stop_dist, value = self.stage2_action_generation(v_dist_r, map_uncoverage_r, eval=True)
actprof.tick("stage2")
if sample:
xvel, yawrate, stop = self.stage2_action_generation.sample_action(xvel_dist, yawrate_dist, stop_dist)
else:
xvel, yawrate, stop = self.stage2_action_generation.mode_action(xvel_dist, yawrate_dist, stop_dist)
actprof.tick("sample")
xvel_logprob, yawrate_logprob, stop_logprob = self.stage2_action_generation.action_logprob(xvel_dist, yawrate_dist, stop_dist, xvel, yawrate, stop)
xvel = xvel.detach().cpu().numpy()
yawrate = yawrate.detach().cpu().numpy()
stop = stop.detach().cpu().numpy()
xvel_logprob = xvel_logprob.detach()
yawrate_logprob = yawrate_logprob.detach()
stop_logprob = stop_logprob.detach()
value = value.detach()#.cpu().numpy()
# Add an empty column for sideways velocity
act = np.concatenate([xvel, np.zeros(xvel.shape), yawrate, stop])
# This will be needed to compute rollout statistics later on
#v_dist_w = self.visitation_softmax(self.log_v_dist_w, self.log_goal_oob_score)
# Keep all the info we will need later for A2C / PPO training
# TODO: We assume independence between velocity and stop distributions. Not true, but what ya gonna do?
rl_data = {
"policy_input": v_dist_r[0].detach(),
"v_dist_w": self.v_dist_w[0].detach(),
"policy_input_b": map_uncoverage_r[0].detach(),
"value_pred": value[0],
"xvel": xvel,
"yawrate": yawrate,
"stop": stop,
"xvel_logprob": xvel_logprob,
"yawrate_logprob": yawrate_logprob,
"stop_logprob": stop_logprob,
"action_logprob": xvel_logprob + stop_logprob + yawrate_logprob
}
actprof.tick("end")
actprof.loop()
actprof.print_stats(1)
if rl_rollout:
return act, rl_data
else:
return act
else:
action = self.stage2_action_generation(v_dist_r, firstseg=[first_step], eval=True)
output_action = action.squeeze().data.cpu().numpy()
stop_prob = output_action[3]
output_stop = 1 if stop_prob > self.s2_params["stop_threshold"] else 0
output_action[3] = output_stop
return output_action |
the-stack_0_24095 | import motor.motor_asyncio
import discord
from discord.ext import commands
from .errors import CaptainNotFound, RoleNotFound
class MongoTeam:
"""Represents a team stored in MongoDB"""
def __init__(self, mongo_object: dict, collection: motor.motor_asyncio.AsyncIOMotorCollection):
"""
Init
:param mongo_object: MongoDB document
:param collection: MongoDB Collection connection
"""
self._collection = collection
self.team_id = mongo_object.get("_id")
self.battlefy_tournament_id = mongo_object.get("battlefyTournamentId")
self.name = mongo_object.get("name")
self.logo_icon = mongo_object.get("logoUrl", None)
self.captain_discord = mongo_object.get("captainDiscord")
self.additional_discord = mongo_object.get("additionalDiscord", [])
self.bracket = mongo_object.get("bracket", 0)
self.checkin = mongo_object.get("checkin", False)
async def set_check_in(self, status: bool = True) -> bool:
"""
Check in the team
:param status: Check in status
:return: success status
"""
update = await self._collection.update_one(
{"name": self.name, "battlefyTournamentId": self.battlefy_tournament_id},
{"$set": {"checkin": status}}, upsert=True)
if update:
self.checkin = status
return True
else:
return False
async def set_bracket(self, bracket: int) -> bool:
"""
Set team's bracket
:param bracket: Bracket to set
:return: success status
"""
update = await self._collection.update_one(
{"name": self.name, "battlefyTournamentId": self.battlefy_tournament_id},
{"$set": {"bracket": bracket}}, upsert=True)
if update:
self.bracket = bracket
return True
else:
return False
async def set_captain_discord(self, captain_discord: str) -> bool:
"""
Set team's bracket
:param captain_discord: captain username#tag
:return: success status
"""
update = await self._collection.update_one(
{"name": self.name, "battlefyTournamentId": self.battlefy_tournament_id},
{"$set": {"captainDiscord": captain_discord}}, upsert=True)
if update:
self.captain_discord = captain_discord
return True
else:
return False
async def add_additional_discord(self, discord_field: str) -> bool:
"""
Add additional Discord to team
:param discord_field: Discord ID
:return: success status
"""
update = await self._collection.update_one(
{"name": self.name, "battlefyTournamentId": self.battlefy_tournament_id},
{"$set": {"additional_discord": {"$each": [discord_field]}}}, upsert=True)
if update:
self.additional_discord.append(discord)
return True
else:
return False
async def set_assign_bracket(self, ctx: commands.Context, bracket_info: dict) -> bool:
"""
Assign Role and set bracket
:param ctx: Discord Context
:param bracket_info: format like so {"name": "ALPHA", "id": 1}
:return: success status
"""
if not self.captain_discord:
raise CaptainNotFound
try:
# If we can't find the person stated on the discord field in the server
if not (captain := await commands.MemberConverter().convert(ctx, self.captain_discord)):
raise CaptainNotFound
except commands.BadArgument: # catch all for above
raise CaptainNotFound
try:
if not (role := discord.utils.get(ctx.guild.roles, name=f"{bracket_info['name']}")):
raise RoleNotFound
await captain.add_roles(role)
return await self.set_bracket(bracket_info['id'])
except discord.Forbidden:
pass
|
the-stack_0_24096 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Label map utility functions."""
import logging
import tensorflow as tf
from google.protobuf import text_format
from tf_utils.protos import string_int_label_map_pb2
def _validate_label_map(label_map):
"""Checks if a label map is valid.
Args:
label_map: StringIntLabelMap to validate.
Raises:
ValueError: if label map is invalid.
"""
for item in label_map.item:
if item.id < 1:
raise ValueError('Label map ids should be >= 1.')
def create_category_index(categories):
"""Creates dictionary of COCO compatible categories keyed by category id.
Args:
categories: a list of dicts, each of which has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
Returns:
category_index: a dict containing the same entries as categories, but keyed
by the 'id' field of each category.
"""
category_index = {}
for cat in categories:
category_index[cat['id']] = cat
return category_index
def get_max_label_map_index(label_map):
"""Get maximum index in label map.
Args:
label_map: a StringIntLabelMapProto
Returns:
an integer
"""
return max([item.id for item in label_map.item])
def convert_label_map_to_categories(label_map,
max_num_classes,
use_display_name=True):
"""Loads label map proto and returns categories list compatible with eval.
This function loads a label map and returns a list of dicts, each of which
has the following keys:
'id': (required) an integer id uniquely identifying this category.
'name': (required) string representing category name
e.g., 'cat', 'dog', 'pizza'.
We only allow class into the list if its id-label_id_offset is
between 0 (inclusive) and max_num_classes (exclusive).
If there are several items mapping to the same id in the label map,
we will only keep the first one in the categories list.
Args:
label_map: a StringIntLabelMapProto or None. If None, a default categories
list is created with max_num_classes categories.
max_num_classes: maximum number of (consecutive) label indices to include.
use_display_name: (boolean) choose whether to load 'display_name' field
as category name. If False or if the display_name field does not exist,
uses 'name' field as category names instead.
Returns:
categories: a list of dictionaries representing all possible categories.
"""
categories = []
list_of_ids_already_added = []
if not label_map:
label_id_offset = 1
for class_id in range(max_num_classes):
categories.append({
'id': class_id + label_id_offset,
'name': 'category_{}'.format(class_id + label_id_offset)
})
return categories
for item in label_map.item:
if not 0 < item.id <= max_num_classes:
logging.info('Ignore item %d since it falls outside of requested '
'label range.', item.id)
continue
if use_display_name and item.HasField('display_name'):
name = item.display_name
else:
name = item.name
if item.id not in list_of_ids_already_added:
list_of_ids_already_added.append(item.id)
categories.append({'id': item.id, 'name': name})
return categories
def load_labelmap(path):
"""Loads label map proto.
Args:
path: path to StringIntLabelMap proto text file.
Returns:
a StringIntLabelMapProto
"""
with tf.gfile.GFile(path, 'r') as fid:
label_map_string = fid.read()
label_map = string_int_label_map_pb2.StringIntLabelMap()
try:
text_format.Merge(label_map_string, label_map)
except text_format.ParseError:
label_map.ParseFromString(label_map_string)
_validate_label_map(label_map)
return label_map
def get_label_map_dict(label_map_path, use_display_name=False):
"""Reads a label map and returns a dictionary of label names to id.
Args:
label_map_path: path to label_map.
use_display_name: whether to use the label map items' display names as keys.
Returns:
A dictionary mapping label names to id.
"""
label_map = load_labelmap(label_map_path)
label_map_dict = {}
for item in label_map.item:
if use_display_name:
label_map_dict[item.display_name] = item.id
else:
label_map_dict[item.name] = item.id
return label_map_dict
def create_category_index_from_labelmap(label_map_path):
"""Reads a label map and returns a category index.
Args:
label_map_path: Path to `StringIntLabelMap` proto text file.
Returns:
A category index, which is a dictionary that maps integer ids to dicts
containing categories, e.g.
{1: {'id': 1, 'name': 'dog'}, 2: {'id': 2, 'name': 'cat'}, ...}
"""
label_map = load_labelmap(label_map_path)
max_num_classes = max(item.id for item in label_map.item)
categories = convert_label_map_to_categories(label_map, max_num_classes)
return create_category_index(categories)
def create_class_agnostic_category_index():
"""Creates a category index with a single `object` class."""
return {1: {'id': 1, 'name': 'object'}}
|
the-stack_0_24098 | from datetime import datetime
from copy import deepcopy
import geopandas as gpd
import pandas as pd
import numpy as np
import STARTHER
import lastnedvegnett
import skrivdataframe
import nvdbapiv3
import nvdbgeotricks
t0 = datetime.now()
# Henter alle tunneler
mittfilter = lastnedvegnett.kostraFagdataFilter( mittfilter={} )
# Henter tunneller
sok = nvdbapiv3.nvdbFagdata( 581)
sok.filter( mittfilter )
tunnelGdf = nvdbgeotricks.records2gdf( sok.to_records() )
tunnelGdf.to_file( 'tunneldebug.gpkg', layer='alletunneller', driver='GPKG' )
# Dessverre er det noen tunnelløp som mangler egnenskapen "Lengde, offisiell"
# Vi skal supplere manglende data fra tunnelløp
# aller først deler vi tunnel-datasettet i to
manglerLengde = tunnelGdf[ tunnelGdf['Lengde, offisiell'].isnull() ].copy()
harLengde = tunnelGdf[ ~tunnelGdf['Lengde, offisiell'].isnull() ]
# Først få tak i tunnelløp
sok = nvdbapiv3.nvdbFagdata( 67 )
sok.filter( mittfilter )
tLopGdf = nvdbgeotricks.records2gdf( sok.to_records( ) )
tLopGdf.to_file( 'tunneldebug.gpkg', layer='alletunnellop', driver='GPKG' )
# Tar vare på geometrilengden
tLopGdf['geometrilengde'] = tLopGdf['geometry'].apply( lambda x : x.length )
# Lager en buffer rundt tunnel (punktobjekt) og gjør geografisk join
manglerLengde['geometry'] = manglerLengde['geometry'].apply( lambda x : x.buffer( 10, resolution=16) )
geojoin = gpd.sjoin( manglerLengde, tLopGdf, how='inner', op='intersects' )
# Har vi lengde-egenskap? Del datasettet i to.
lop_harLengde = geojoin[ ~geojoin['Lengde'].isnull( )].copy()
lop_manglerLengde = geojoin[ geojoin['Lengde'].isnull( )].copy()
# Henter Lengde-egenskapverdi
lop_harLengde['Lengde, offisiell'] = lop_harLengde['Lengde']
# Henter lengde fra geometri-egenskap
lop_manglerLengde['Lengde, offisiell'] = lop_manglerLengde['geometrilengde']
# Døper om kolonnenavn så de matcher med orginaldatasettet (tunnel)
col = list( tunnelGdf.columns )
oversett = { 'objekttype_left' : 'objekttype',
'nvdbId_left' : 'nvdbId',
'versjon_left' : 'versjon',
'startdato_left' : 'startdato',
'Åpningsår_left' : 'Åpningsår',
'Navn_left' : 'Navn',
'veglenkesekvensid_left' : 'veglenkesekvensid',
'detaljnivå_left' : 'detaljnivå',
'typeVeg_left' : 'typeVeg',
'kommune_left' : 'kommune',
'fylke_left' : 'fylke',
'vref_left' : 'vref',
'vegkategori_left' : 'vegkategori',
'fase_left' : 'fase',
'nummer_left' : 'nummer',
'adskilte_lop_left' : 'adskilte_lop',
'trafikantgruppe_left' : 'trafikantgruppe',
'Prosjektreferanse_left' : 'Prosjektreferanse',
'Brutus_Id_left' : 'Brutus_Id',
'sluttdato_left' : 'sluttdato'
}
lop_manglerLengde.rename( columns=oversett, inplace=True )
lop_harLengde.rename( columns=oversett, inplace=True )
tunnelGdfV2 = pd.concat( [ harLengde, lop_manglerLengde[col], lop_harLengde[col] ] )
telling = tunnelGdfV2.groupby( ['fylke' ]).agg( { 'nvdbId': 'nunique', 'Lengde, offisiell' : 'sum'} ).astype(int).reset_index()
telling.rename( columns={ 'nvdbId' : 'Antall', 'Lengde, offisiell' : 'Lengde (m)' }, inplace=True )
skrivdataframe.skrivdf2xlsx( telling, '../kostraleveranse2021/Kostra 13 og 14 - tunnell fylkesveg.xlsx', sheet_name='Tunnel Fv', metadata=mittfilter)
langeTunneller = tunnelGdfV2[ tunnelGdfV2['Lengde, offisiell'] >= 500 ]
telling = langeTunneller.groupby( ['fylke' ]).agg( { 'nvdbId': 'nunique', 'Lengde, offisiell' : 'sum'} ).astype(int).reset_index()
telling.rename( columns={ 'nvdbId' : 'Antall', 'Lengde, offisiell' : 'Lengde (m)' }, inplace=True )
skrivdataframe.skrivdf2xlsx( telling, '../kostraleveranse2021/Kostra 15 - tunnell lengre enn 500m.xlsx', sheet_name='Tunnel lengre enn 500m', metadata=mittfilter)
tidsbruk = datetime.now() - t0 |
the-stack_0_24099 | from io import BytesIO
from pathlib import Path
import librosa
import numpy as np
from encoder import inference as encoder
from fastapi import FastAPI, File, UploadFile
from fastapi.responses import StreamingResponse
from scipy.io import wavfile
from synthesizer.inference import Synthesizer
from vocoder import inference as vocoder
print("Preparing the encoder, the synthesizer and the vocoder...")
encoder.load_model(Path("encoder/saved_models/pretrained.pt"))
synthesizer = Synthesizer(Path("synthesizer/saved_models/logs-pretrained/taco_pretrained"), low_mem=False, seed=None)
vocoder.load_model(Path("vocoder/saved_models/pretrained/pretrained.pt"))
def load_embedding(file):
original_wav, sampling_rate = librosa.load(file)
preprocessed_wav = encoder.preprocess_wav(original_wav, sampling_rate)
print("Loaded file succesfully")
emb = encoder.embed_utterance(preprocessed_wav)
return emb
embed = load_embedding("gerty_sample.wav")
app = FastAPI()
@app.post("/sample")
async def embed_file(file: UploadFile = File(...)):
global embed
embed = load_embedding(file.file)
return 200
@app.post("/tts")
async def create_upload_file(text: str):
texts = [text]
embeds = [embed]
# If you know what the attention layer alignments are, you can retrieve them here by
# passing return_alignments=True
specs = synthesizer.synthesize_spectrograms(texts, embeds)
spec = specs[0]
print("Created the mel spectrogram")
# Generating the waveform
print("Synthesizing the waveform:")
# Synthesizing the waveform is fairly straightforward. Remember that the longer the
# spectrogram, the more time-efficient the vocoder.
generated_wav = vocoder.infer_waveform(spec)
# Post-generation
# There's a bug with sounddevice that makes the audio cut one second earlier, so we
# pad it.
generated_wav = np.pad(generated_wav, (0, synthesizer.sample_rate), mode="constant")
# Trim excess silences to compensate for gaps in spectrograms (issue #53)
generated_wav = encoder.preprocess_wav(generated_wav)
# Save it on the disk
output = BytesIO()
wavfile.write(output, synthesizer.sample_rate, generated_wav.astype(np.float32))
return StreamingResponse(output, media_type="audio/x-wav")
|
the-stack_0_24108 | from myhdl import *
@block
def id(ins,alu_signal,mem_read,mem_write,register_write,rd_r0_mux,rd_r1_mux
,ds1_rx,ds2_rx,rd_mux0,rd_mux1,
cr_write,selector,imm,branch_offset,bra,ret,apc,jmp):
'''
ins in 16
alu_signal out 4
mem_read out 1
mem_write out 1
register_write out 8
rd_r0_mux out 1
rd_r1_mux out 1
selector out 3
cr_write out 1
ds1_rx out 3
ds2_rx out 3
imm out 8
branch_offset out 16
jmp out 1
ret out 1
apc out 1
bra out 1
'''
#opcode_r = Signal(intbv(0)[2:])
#opcode_b = Signal(intbv(1)[2:])
#opcode_sys = Signal(intbv(2)[2:])
opcode_ls = Signal(intbv(3)[2:])
funct4_0 = Signal(intbv(0)[4:])
funct4_1 = Signal(intbv(1)[4:])
funct4_2 = Signal(intbv(2)[4:])
funct4_3 = Signal(intbv(3)[4:])
funct4_4 = Signal(intbv(4)[4:])
funct4_5 = Signal(intbv(5)[4:])
#funct4_6 = Signal(intbv(6)[4:])
#funct4_7 = Signal(intbv(7)[4:])
funct4_8 = Signal(intbv(8)[4:])
funct4_9 = Signal(intbv(9)[4:])
#funct4_10 = Signal(intbv(10)[4:])
#funct4_11 = Signal(intbv(11)[4:])
#funct4_12 = Signal(intbv(12)[4:])
#funct4_13 = Signal(intbv(13)[4:])
#funct4_14 = Signal(intbv(14)[4:])
#funct4_15 = Signal(intbv(15)[4:])
#states_alu = enum('add0', 'sub0', 'and0', 'or0', 'xor0', 'sr0', 'sl0', 'sra0', 'slt0', 'sltu0', 'eq0', 'neq0')
states_opcode = enum("r","b","sys","ls")
states_rd = enum("a","b","c","d","e","f","g","h")
ins20 = Signal(intbv(0)[3:])
ins96 = Signal(intbv(0)[3:])
@always_comb
def trans_logic():
ins20.next = ins[2:0]
ins96.next = ins[9:6]
@always_comb
def id_logic():
if ins20==states_opcode.r:
#alu_signal
alu_signal.next = ins[6:2]
#register_write signal 1
register_write.next = ins[9:6]
else:
alu_signal.next = 0
# register_write signal 1
register_write.next = 0
if ins20==states_opcode.b:
bra.next = bool(1)
else:
bra.next = bool(0)
if ins20 == states_opcode.sys:
register_write[0].next = ins[6:2]==funct4_4
register_write[1].next = ins[6:2] == funct4_4
rd_r0_mux.next=ins[6:2] == funct4_4
rd_r1_mux.next=ins[6:2] == funct4_4
cr_write.next =ins[6:2] == funct4_3
#special
jmp.next = (ins[6:2]==funct4_0 or ins[6:2] ==funct4_2)
apc.next = (ins[6:2]==funct4_0 or ins[6:2]==funct4_1)
ret.next = (ins[6:2]==funct4_5)
else:
register_write[0].next = bool(0)
register_write[1].next = bool(0)
rd_r0_mux.next = bool(0)
rd_r1_mux.next = bool(0)
cr_write.next = bool(0)
# special
jmp.next = bool(0)
apc.next = bool(0)
ret.next = bool(0)
if ins20 == states_opcode.ls:
#mem
mem_read.next = (ins[6:2] == funct4_8)
mem_write.next = (ins[6:2] == funct4_9)
#register_write signal 2
#register_write[0].next = ((ins[9:6]==funct4_9)|(ins[6:2]==funct4_0))&(ins[9:6]==0)
if (ins[9:6]==funct4_9)|(ins[6:2]==funct4_0):
if ins96==states_rd.a:
register_write[0].next = 1
elif ins96 == states_rd.b:
register_write[1].next = 1
elif ins96 == states_rd.c:
register_write[2].next = 1
elif ins96 == states_rd.d:
register_write[3].next = 1
elif ins96 == states_rd.e:
register_write[4].next = 1
elif ins96 == states_rd.f:
register_write[5].next = 1
elif ins96 == states_rd.g:
register_write[6].next = 1
elif ins96 == states_rd.h:
register_write[7].next = 1
else :
register_write.next = 0
else:
register_write.next = 0
else :
mem_read.next = bool(0)
mem_write.next = bool(0)
register_write.next = 0
@always_comb
def rd_logic():
rd_mux0.next = (ins[6:2]==funct4_0)
rd_mux1.next = (ins[2:0]==opcode_ls)
#other two
#rd_r0_mux and rd_r1_mux are in id logic
#maybe need to change it
@always_comb
def ds_logic():
ds2_rx.next = ins[12:9]
ds1_rx.next = ins[12:9]
@always_comb
def cr_write_logic():
selector.next = ins[12:9]
@always_comb
def imm_branch_logic():
imm[7].next = 0
imm[7:0].next = ins[16:9]
branch_offset[15].next = ins[15]
branch_offset[14].next = ins[15]
branch_offset[13].next = ins[15]
branch_offset[12].next = ins[15]
branch_offset[11].next = ins[15]
branch_offset[10].next = ins[15]
branch_offset[9].next = ins[15]
branch_offset[8].next = ins[15]
branch_offset[8:4].next = ins[15:12]
branch_offset[4:1].next = ins[9:6]
branch_offset[0].next = 0
return instances()
|
the-stack_0_24109 | # dot.py - create dot code
r"""Assemble DOT source code objects.
>>> dot = Graph(comment=u'M\xf8nti Pyth\xf8n ik den H\xf8lie Grailen')
>>> dot.node(u'M\xf8\xf8se')
>>> dot.node('trained_by', u'trained by')
>>> dot.node('tutte', u'TUTTE HERMSGERVORDENBROTBORDA')
>>> dot.edge(u'M\xf8\xf8se', 'trained_by')
>>> dot.edge('trained_by', 'tutte')
>>> dot.node_attr['shape'] = 'rectangle'
>>> print(dot.source.replace(u'\xf8', '0')) #doctest: +NORMALIZE_WHITESPACE
// M0nti Pyth0n ik den H0lie Grailen
graph {
node [shape=rectangle]
"M00se"
trained_by [label="trained by"]
tutte [label="TUTTE HERMSGERVORDENBROTBORDA"]
"M00se" -- trained_by
trained_by -- tutte
}
>>> dot.view('test-output/m00se.gv') # doctest: +SKIP
'test-output/m00se.gv.pdf'
"""
from . import lang
from . import files
__all__ = ['Graph', 'Digraph']
class Dot(files.File):
"""Assemble, save, and render DOT source code, open result in viewer."""
_comment = '// %s'
_subgraph = 'subgraph %s{'
_subgraph_plain = '%s{'
_node = _attr = '\t%s%s'
_attr_plain = _attr % ('%s', '')
_tail = '}'
_quote = staticmethod(lang.quote)
_quote_edge = staticmethod(lang.quote_edge)
_a_list = staticmethod(lang.a_list)
_attr_list = staticmethod(lang.attr_list)
def __init__(self, name=None, comment=None,
filename=None, directory=None,
format=None, engine=None, encoding=files.ENCODING,
graph_attr=None, node_attr=None, edge_attr=None, body=None,
strict=False):
self.name = name
self.comment = comment
super(Dot, self).__init__(filename, directory, format, engine, encoding)
self.graph_attr = dict(graph_attr) if graph_attr is not None else {}
self.node_attr = dict(node_attr) if node_attr is not None else {}
self.edge_attr = dict(edge_attr) if edge_attr is not None else {}
self.body = list(body) if body is not None else []
self.strict = strict
def _kwargs(self):
result = super(Dot, self)._kwargs()
result.update(name=self.name,
comment=self.comment,
graph_attr=dict(self.graph_attr),
node_attr=dict(self.node_attr),
edge_attr=dict(self.edge_attr),
body=list(self.body),
strict=self.strict)
return result
def clear(self, keep_attrs=False):
"""Reset content to an empty body, clear graph/node/egde_attr mappings.
Args:
keep_attrs (bool): preserve graph/node/egde_attr mappings
"""
if not keep_attrs:
for a in (self.graph_attr, self.node_attr, self.edge_attr):
a.clear()
del self.body[:]
def __iter__(self, subgraph=False):
"""Yield the DOT source code line by line (as graph or subgraph)."""
if self.comment:
yield self._comment % self.comment
if subgraph:
if self.strict:
raise ValueError('subgraphs cannot be strict')
head = self._subgraph if self.name else self._subgraph_plain
else:
head = self._head_strict if self.strict else self._head
yield head % (self._quote(self.name) + ' ' if self.name else '')
for kw in ('graph', 'node', 'edge'):
attrs = getattr(self, '%s_attr' % kw)
if attrs:
yield self._attr % (kw, self._attr_list(None, attrs))
for line in self.body:
yield line
yield self._tail
def __str__(self):
"""The DOT source code as string."""
return '\n'.join(self)
source = property(__str__, doc=__str__.__doc__)
def node(self, name, label=None, _attributes=None, **attrs):
"""Create a node.
Args:
name: Unique identifier for the node inside the source.
label: Caption to be displayed (defaults to the node ``name``).
attrs: Any additional node attributes (must be strings).
"""
name = self._quote(name)
attr_list = self._attr_list(label, attrs, _attributes)
line = self._node % (name, attr_list)
self.body.append(line)
def edge(self, tail_name, head_name, label=None, _attributes=None, **attrs):
"""Create an edge between two nodes.
Args:
tail_name: Start node identifier (format: ``node[:port[:compass]]``).
head_name: End node identifier (format: ``node[:port[:compass]]``).
label: Caption to be displayed near the edge.
attrs: Any additional edge attributes (must be strings).
Note:
The ``tail_name`` and ``head_name`` strings are separated by
(optional) colon(s) into ``node`` name, ``port`` name, and
``compass`` (e.g. ``sw``).
See :ref:`details in the User Guide <ports>`.
"""
tail_name = self._quote_edge(tail_name)
head_name = self._quote_edge(head_name)
attr_list = self._attr_list(label, attrs, _attributes)
line = self._edge % (tail_name, head_name, attr_list)
self.body.append(line)
def edges(self, tail_head_iter):
"""Create a bunch of edges.
Args:
tail_head_iter: Iterable of ``(tail_name, head_name)`` pairs
(format:``node[:port[:compass]]``).
Note:
The ``tail_name`` and ``head_name`` strings are separated by
(optional) colon(s) into ``node`` name, ``port`` name, and
``compass`` (e.g. ``sw``).
See :ref:`details in the User Guide <ports>`.
"""
edge = self._edge_plain
quote = self._quote_edge
lines = (edge % (quote(t), quote(h)) for t, h in tail_head_iter)
self.body.extend(lines)
def attr(self, kw=None, _attributes=None, **attrs):
"""Add a general or graph/node/edge attribute statement.
Args:
kw: Attributes target (``None`` or ``'graph'``, ``'node'``, ``'edge'``).
attrs: Attributes to be set (must be strings, may be empty).
See the :ref:`usage examples in the User Guide <attributes>`.
"""
if kw is not None and kw.lower() not in ('graph', 'node', 'edge'):
raise ValueError('attr statement must target graph, node, or edge: '
'%r' % kw)
if attrs or _attributes:
if kw is None:
a_list = self._a_list(None, attrs, _attributes)
line = self._attr_plain % a_list
else:
attr_list = self._attr_list(None, attrs, _attributes)
line = self._attr % (kw, attr_list)
self.body.append(line)
def subgraph(self, graph=None, name=None, comment=None,
graph_attr=None, node_attr=None, edge_attr=None, body=None):
"""Add the current content of the given sole ``graph`` argument as subgraph \
or return a context manager returning a new graph instance created \
with the given (``name``, ``comment``, etc.) arguments whose content is \
added as subgraph when leaving the context manager's ``with``-block.
Args:
graph: An instance of the same kind (:class:`.Graph`, :class:`.Digraph`)
as the current graph (sole argument in non-with-block use).
name: Subgraph name (``with``-block use).
comment: Subgraph comment (``with``-block use).
graph_attr: Subgraph-level attribute-value mapping (``with``-block use).
node_attr: Node-level attribute-value mapping (``with``-block use).
edge_attr: Edge-level attribute-value mapping (``with``-block use).
body: Verbatim lines to add to the subgraph ``body`` (``with``-block use).
See the :ref:`usage examples in the User Guide <subgraphs>`.
When used as a context manager, the returned new graph instance uses
``strict=None`` and the parent graph's values for ``directory``,
``format``, ``engine``, and ``encoding`` by default.
Note:
If the ``name`` of the subgraph begins with ``'cluster'`` (all lowercase)
the layout engine will treat it as a special cluster subgraph.
"""
if graph is None:
return SubgraphContext(self, {'name': name,
'comment': comment,
'directory': self.directory,
'format': self.format,
'engine': self.engine,
'encoding': self.encoding,
'graph_attr': graph_attr,
'node_attr': node_attr,
'edge_attr': edge_attr,
'body': body,
'strict': None})
args = [name, comment, graph_attr, node_attr, edge_attr, body]
if not all(a is None for a in args):
raise ValueError('graph must be sole argument of subgraph()')
if graph.directed != self.directed:
raise ValueError('%r cannot add subgraph of different kind:'
' %r' % (self, graph))
lines = ['\t' + line for line in graph.__iter__(subgraph=True)]
self.body.extend(lines)
class SubgraphContext(object):
"""Return a blank instance of the parent and add as subgraph on exit."""
def __init__(self, parent, kwargs):
self.parent = parent
self.graph = parent.__class__(**kwargs)
def __enter__(self):
return self.graph
def __exit__(self, type_, value, traceback):
if type_ is None:
self.parent.subgraph(self.graph)
class Graph(Dot):
"""Graph source code in the DOT language.
Args:
name: Graph name used in the source code.
comment: Comment added to the first line of the source.
filename: Filename for saving the source (defaults to ``name`` + ``'.gv'``).
directory: (Sub)directory for source saving and rendering.
format: Rendering output format (``'pdf'``, ``'png'``, ...).
engine: Layout command used (``'dot'``, ``'neato'``, ...).
encoding: Encoding for saving the source.
graph_attr: Mapping of ``(attribute, value)`` pairs for the graph.
node_attr: Mapping of ``(attribute, value)`` pairs set for all nodes.
edge_attr: Mapping of ``(attribute, value)`` pairs set for all edges.
body: Iterable of verbatim lines to add to the graph ``body``.
strict (bool): Rendering should merge multi-edges.
Note:
All parameters are `optional` and can be changed under their
corresponding attribute name after instance creation.
"""
_head = 'graph %s{'
_head_strict = 'strict %s' % _head
_edge = '\t%s -- %s%s'
_edge_plain = _edge % ('%s', '%s', '')
@property
def directed(self):
"""``False``"""
return False
class Digraph(Dot):
"""Directed graph source code in the DOT language."""
if Graph.__doc__ is not None:
__doc__ += Graph.__doc__.partition('.')[2]
_head = 'digraph %s{'
_head_strict = 'strict %s' % _head
_edge = '\t%s -> %s%s'
_edge_plain = _edge % ('%s', '%s', '')
@property
def directed(self):
"""``True``"""
return True
|
the-stack_0_24111 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from bugs.models import CustomUser, Ticket
# help in this section is from
# https://testdriven.io/blog/django-custom-user-model/
class CustomUserAdmin(UserAdmin):
list_display = (
"username",
"first_name",
"last_name",
"tag_line",
"is_staff",
"is_active",
)
list_filter = (
"username",
"first_name",
"last_name",
"tag_line",
"is_staff",
"is_active",
)
fieldsets = (
(None, {"fields": ("username", "first_name", "last_name", "tag_line")}),
("Permissions", {"fields": ("is_staff", "is_active")}),
)
admin.site.register(CustomUser, CustomUserAdmin)
admin.site.register(Ticket)
|
the-stack_0_24113 | from __future__ import print_function
"""
DataPlan Class
read either a python or excel ('xlsx') file, and return a dict with the "dataplan"
Note that some specific requirements are present in the current version regarding
the names of the keys and the columns.
A dataplan must have:
datasets (dict)
basepath : the base path to the data files
outputpath : the path to where the output files of an analysis would be stored.
In the excel file, there should be one sheet "Directories" with a directory column
and antoher sheet "Dataplan" with at least Date, Slice, Cell, Protocols and a few other entries,
"""
import os
import numpy as np
import pandas as pd
#import ezodf as odf
from collections import OrderedDict
import re
class DataPlan():
def __init__(self, datadictname, sheet='Dataplan'):
"""
set up and read a data plan, saving the structure and information for other uses
Parameters
----------
datadictname : str (no default)
name of the file that holds the data dictionary
Should end in .py or .xlsx. The file is parsed to a common structure
sheet : str (default "Dataplan")
The name of the sheet in the excel file that will be used (if an excel file is specified)
Returns
-------
Nothing. All results are in class variables
"""
self.sheet = sheet # for excel files
self.orderedkeys = ['subject', 'dir', 'G', 'prots', 'thr', 'rt', 'decay', 'exclist'] # default set
data = {}
fn, ext = os.path.splitext(datadictname)
if ext == '':
ext = '.py'
if ext == '.py':
h = open(fn+ext).read()
print('h: ', h)
exec(open(fn+ext).read(), data)
#execfile(fn + ext, data) old python 2.7 version
self.datasource = datadictname
self.datasets = data['datasets'] # convenience
self.datadir = data['basepath']
self.outputpath = data['outputpath']
self.data = data # just save the dict for anything else
elif ext == '.xlsx':
self.datasets = self.read_xlsx_datasummary(fn + ext, sheet=sheet)
def setkeys(self, keylist):
"""
Change the ordered key list values (column names in excel sheet)
"""
self.orderedkeys = keylist
def make_xls(self, dataset, outfile='test.xlsx', sheet='Dataplan'):
"""
Process .py files, convert to an excel sheet
From the dictionary in dataset, write an excel spreadsheet as follows:
Top keys are rows
Within each row, keys are columns
prots and exclist are written as strings of python code
All others are written as values
The column headings are derived from the first entry in the data set.
If a value is missing for a given row, it is left empty
if a new key is encountered, a new column is made
Parameters
----------
dataset : dict
Dictionary specifying the dataset (for .py file)
outfile : str (default "test.xlsx")
Output excel file to create from this dataset
sheet : str (default "Dataplan")
Sheet in the excel file where the plan will be written
Retuns
------
Nothing
"""
subjectkeys = dataset.keys()
colkeys = dataset[subjectkeys[0]].keys()
# append unordered keys now
for key in colkeys:
if key not in self.orderedkeys:
self.orderedkeys.append(key)
# for each column determine maximum width of field and set it
cwidths = OrderedDict()
for col in self.orderedkeys:
cwidths[col] = 0
for row in dataset.keys():
if col == 'subject':
dsl = 12
else:
dsl = len(str(dataset[row][col]))
if dsl > cwidths[col]:
cwidths[col] = dsl
orderedkeys = self.orderedkeys[1:]
df = pd.DataFrame(dataset).transpose()
df = df[self.orderedkeys]
writer = pd.ExcelWriter(outfile, engine='xlsxwriter')
df.to_excel(writer, sheet_name=sheet)
wks = writer.sheets['Dataplan']
for i, c in enumerate(self.orderedkeys):
wks.set_column(i+1, i+1, cwidths[c]+2)
wks.set_column(0, 0, 12)
writer.save()
self.read_sheet(self.outfile, sheet)
def read_sheet(self, filename, sheet=0):
"""
Read an excel sheet and return as a pandas data structure
Parameters
----------
filename : str
name of the excel file to read (including extension)
sheet : int or str (defalt: 0)
sheet within the excel file to read
Returns
--------
Nothing
"""
d = pd.read_excel(filename, sheet_name=sheet).transpose()
ds = d.to_dict()
for s in ds.keys():
ds[s]['prots'] = eval(ds[s]['prots'])
ds[s]['exclist'] = eval(ds[s]['exclist'])
return(ds)
def read_xlsx_datasummary(self, filename, sheet):
"""
Read a datasummary file and store into an pandas dataframe
Datasummaries are flat text files in a csv format and are generated
by datasummary.py
Parameters
----------
filename : str (no default)
Name of the text file to read, including path and extension
Returns
-------
dataset as a pandas datafram
"""
re_plist = re.compile(r'([\w\(\)]+)+')
re_sst = re.compile(r'Protocols: \[([\w\(\), ])*')
self.excel_as_df = pd.read_excel(filename, sheet_name=sheet)
ds = self.excel_as_df.transpose().to_dict()
for s in ds.keys():
ds[s]['exclist'] = ('{0:s}.{1:s}.{2:s}'.format(str(ds[s]['Date'])[:-1], str(ds[s]['Slice']), str(ds[s]['Cell'])))
try:
ds[s]['prots'] = eval(ds[s]['Protocols']).strip()
ds[s]['IV'] = eval(ds[s]['IV']).strip()
ds[s]['Map'] = eval(ds[s]['Map']).strip()
except:
matchstring = re_sst.match(str(ds[s]['Protocols']))
if matchstring:
res = re.findall(re_plist, matchstring.group(0))
ds[s]['prots'] = res[1:]
# print(ds[s].keys())
ds[s]['IV'] = str(ds[s]['IV']).strip()
ds[s]['Map'] = str(ds[s]['Map']).strip()
#print('protos:')
return(ds)
def add_result_columns(self, colnames, datatype='float'):
"""
Add columns to the pandas data frame if they do not already exist
Parameters
----------
colname : str (no default)
the text namd for the column to add at the right hand end of the sheet
datatype : str (default: 'float')
data type for the column (must be float or str)
Returns
-------
Nothing. The current pandas dataframe is modified
"""
dflength = len(self.excel_as_df['CellID'])
for colname in colnames:
if not self.excel_as_df.columns.isin([colname]).any():
if datatype == 'float':
self.excel_as_df[colname] = pd.Series(np.zeros(dflength), index=self.excel_as_df.index)
elif datatype == 'str':
self.excel_as_df[colname] = pd.Series(['' for x in range(dflength)], index=self.excel_as_df.index)
else:
raise ValueError('add result column needs datatype of "float" or "str", got: %s' % str(datatypepyt))
def post_result(self, dataname, dataid, colname, value):
"""
Put results into a cell in the pandas dataframe. If the column
does not already exist, it is added to the dataframe.
Parameters
----------
dataname : str
Idendtifier for the data namoing scheme (usuall 'cell')
dataid : int
Number/data identification associated with the dataname
colname : str
Name of the column to store the data
value : float or str
data to be stored.
Returns
-------
Nothing. The pandas dataframe is modified
"""
if dataid not in self.excel_as_df[dataname].values:
raise ValueError('%s number %d is not found in current frame/excel sheet' % (dataname, dataid))
if not self.excel_as_df.columns.isin([colname]).any():
if isinstance(value, str):
dtype = 'str'
elif isinstance(value, float):
dtype = 'float'
else:
raise ValueError('Do not yet know how to post a value of type %s ' % str(type(value)))
self.add_result_columns([colname], datatype=dtype)
index = self.excel_as_df[self.excel_as_df[dataname] == dataid].index[0]
self.excel_as_df.at[index, colname] = value
def update_xlsx(self, filename, sheet):
"""
Update the excel file by writing he current pandas dataframe
to as an excel file. This is almost noe really needed
Parameters
----------
filename : str
Full pant and extension for the excel file
sheet : str or int (no default)
Sheet to write the result to.
"""
self.excel_as_df.to_excel(filename, sheet_name=sheet, index=False)
# broken:
# def read_ods(self, ods_filename):
# """
# Read an Open Document spreadsheet
# Assume that the first row contains the column headings for the data in the sheet
# Reads every row, saving a subset of the information to a dictionary
# The dictionary is keyed by the Day/Slice/Cell, and contains a dictionary with
# the following elements:
#
# Parameter
# ---------
# ods_filename: the name of the file to read. The parent directory is set in the source
#
# Return
# ------
# Dictionary: A dictionary with the spreadsheet information
# The structure is:
# top level keys: sheet names
# next level keys: day/slice/cell
# containing a dict of data (genotype, description)
# """
# fn = os.path.join(basedir, ods_filename)
# doc = odf.opendoc(fn)
# result = {}
# # print("Spreadsheet %s contains %d sheets.\n" % (ods_filename, len(doc.sheets)))
# for sheet in doc.sheets:
# # print("-"*40)
# # print("Sheet name: '%s'" % sheet.name)
# # print("Size of Sheet : (rows=%d, cols=%d)" % (sheet.nrows(), sheet.ncols()) )
#
# rt = sheet.row(0)
# titles = {t.value: int(i) for i, t in enumerate(rt)} # titles of columns
# # Assemble dictionary with key = day/slice/cell, containing dict{genotype: type, description: des}
# ss = {}
# for i, r in enumerate(sheet.rows()):
# if i == 0:
# continue
# dayn = r[titles['Day']].value
# slicen = r[titles['Slice']].value
# celln = r[titles['Cell']].value
# genotype = r[titles['Genotype']].value
# description = r[titles['Day Description']].value
# vgenotype = r[titles['Verified Genotype']].value
# # species = r[titles['Species']].value
# if dayn is None or slicen is None or celln is None: # probably just information for a human
# continue
# thiskey = os.path.join(dayn.rstrip(), slicen.rstrip(), celln.rstrip())
# ss[thiskey] = {'genotype': genotype, 'description': description, 'verified': vgenotype}
# result[sheet] = ss
# return result
# #print ('Found {:d} Cells'.format(len(ss)))
if __name__ == '__main__':
# D = DataPlan(os.path.join('ephysanalysis', 'test_data', 'CS_CHL1_minis.py'))
# D.make_xls(D.datasets)
#
# test routine -not for production. Use code like this in your own analysis script.
D = DataPlan('dataplan1.xlsx')
D.post_result('CellID', 9, 'Rin', 52.3)
#print( D.datasets)
D.update_xlsx('dataplan2.xlsx', 'Dataplan')
|
the-stack_0_24116 | from contextlib import closing
import psycopg
from lightwood.api import dtype
from mindsdb.integrations.base import Integration
from mindsdb.utilities.log import log
from mindsdb.utilities.config import Config
from mindsdb.utilities.wizards import make_ssl_cert
class PostgreSQLConnectionChecker:
def __init__(self, **kwargs):
self.host = kwargs.get('host')
self.port = kwargs.get('port')
self.user = kwargs.get('user')
self.password = kwargs.get('password')
self.database = kwargs.get('database', 'postgres')
def _get_connection(self):
conn = psycopg.connect(f'host={self.host} port={self.port} dbname={self.database} user={self.user} password={self.password}', connect_timeout=10)
return conn
def check_connection(self):
try:
con = self._get_connection()
with closing(con) as con:
cur = con.cursor()
cur.execute('select 1;')
connected = True
except Exception as e:
print('EXCEPTION!')
print(e)
connected = False
return connected
class PostgreSQL(Integration, PostgreSQLConnectionChecker):
def __init__(self, config, name, db_info):
super().__init__(config, name)
self.user = db_info.get('user')
self.password = db_info.get('password')
self.host = db_info.get('host')
self.port = db_info.get('port')
self.database = db_info.get('database', 'postgres')
def _to_postgres_table(self, dtype_dict, predicted_cols, columns):
subtype_map = {
dtype.integer: ' int8',
dtype.float: 'float8',
dtype.binary: 'bool',
dtype.date: 'date',
dtype.datetime: 'timestamp',
dtype.binary: 'text',
dtype.categorical: 'text',
dtype.tags: 'text',
dtype.image: 'text',
dtype.video: 'text',
dtype.audio: 'text',
dtype.short_text: 'text',
dtype.rich_text: 'text',
dtype.array: 'text',
dtype.quantity: 'text',
dtype.tsarray: 'text',
'default': 'text'
}
column_declaration = []
for name in columns:
try:
col_subtype = dtype_dict[name]
new_type = subtype_map.get(col_subtype, subtype_map.get('default'))
column_declaration.append(f' "{name}" {new_type} ')
if name in predicted_cols:
column_declaration.append(f' "{name}_original" {new_type} ')
except Exception as e:
log.error(f'Error: can not determine postgres data type for column {name}: {e}')
return column_declaration
def _escape_table_name(self, name):
return '"' + name.replace('"', '""') + '"'
def _query(self, query):
con = self._get_connection()
with closing(con) as con:
cur = con.cursor()
res = True
cur.execute(query)
try:
rows = cur.fetchall()
keys = [k[0] if isinstance(k[0], str) else k[0].decode('ascii') for k in cur.description]
res = [dict(zip(keys, row)) for row in rows]
except Exception:
pass
con.commit()
return res
def setup(self):
user = f"{self.config['api']['mysql']['user']}_{self.name}"
password = self.config['api']['mysql']['password']
host = self.config['api']['mysql']['host']
port = self.config['api']['mysql']['port']
try:
self._query('''
DO $$
begin
if not exists (SELECT 1 FROM pg_extension where extname = 'mysql_fdw') then
CREATE EXTENSION mysql_fdw;
end if;
END
$$;
''')
except Exception:
print('Error: cant find or activate mysql_fdw extension for PostgreSQL.')
self._query(f'DROP SCHEMA IF EXISTS {self.mindsdb_database} CASCADE')
self._query(f"DROP USER MAPPING IF EXISTS FOR {self.user} SERVER server_{self.mindsdb_database}")
self._query(f'DROP SERVER IF EXISTS server_{self.mindsdb_database} CASCADE')
self._query(f'''
CREATE SERVER server_{self.mindsdb_database}
FOREIGN DATA WRAPPER mysql_fdw
OPTIONS (host '{host}', port '{port}');
''')
self._query(f'''
CREATE USER MAPPING FOR {self.user}
SERVER server_{self.mindsdb_database}
OPTIONS (username '{user}', password '{password}');
''')
self._query(f'CREATE SCHEMA {self.mindsdb_database}')
q = f"""
CREATE FOREIGN TABLE IF NOT EXISTS {self.mindsdb_database}.predictors (
name text,
status text,
accuracy text,
predict text,
update_status text,
mindsdb_version text,
error text,
select_data_query text,
training_options text
)
SERVER server_{self.mindsdb_database}
OPTIONS (dbname 'mindsdb', table_name 'predictors');
"""
self._query(q)
q = f"""
CREATE FOREIGN TABLE IF NOT EXISTS {self.mindsdb_database}.commands (
command text
) SERVER server_{self.mindsdb_database}
OPTIONS (dbname 'mindsdb', table_name 'commands');
"""
self._query(q)
def register_predictors(self, model_data_arr):
for model_meta in model_data_arr:
name = model_meta['name']
predict = model_meta['predict']
if not isinstance(predict, list):
predict = [predict]
columns_sql = ','.join(self._to_postgres_table(
model_meta['dtype_dict'],
predict,
list(model_meta['dtype_dict'].keys())
))
columns_sql += ',"select_data_query" text'
for col in predict:
columns_sql += f',"{col}_confidence" float8'
if model_meta['dtype_dict'][col] in (dtype.integer, dtype.float):
columns_sql += f',"{col}_min" float8'
columns_sql += f',"{col}_max" float8'
columns_sql += f',"{col}_explain" text'
self.unregister_predictor(name)
q = f"""
CREATE FOREIGN TABLE {self.mindsdb_database}.{self._escape_table_name(name)} (
{columns_sql}
) SERVER server_{self.mindsdb_database}
OPTIONS (dbname 'mindsdb', table_name '{name}');
"""
self._query(q)
def unregister_predictor(self, name):
q = f"""
DROP FOREIGN TABLE IF EXISTS {self.mindsdb_database}.{self._escape_table_name(name)};
"""
self._query(q)
def get_row_count(self, query):
q = f"""
SELECT COUNT(*) as count
FROM ({query}) as query;
"""
result = self._query(q)
return result[0]['count']
def get_tables_list(self):
q = """
SELECT table_schema, table_name
FROM information_schema.tables
WHERE table_schema != 'pg_catalog'
AND table_schema != 'information_schema'
ORDER BY table_schema, table_name
"""
tables_list = self._query(q)
tables = [f"{table['table_schema']}.{table['table_name']}" for table in tables_list]
return tables
def get_columns(self, query):
q = f"""SELECT * from ({query}) LIMIT 1;"""
query_response = self._query(q)
if len(query_response) > 0:
columns = list(query_response[0].keys())
return columns
else:
return []
|
the-stack_0_24118 | import json
import config
from requests_oauthlib import OAuth1Session
from time import sleep
import emoji
# 絵文字を除去する
def remove_emoji(src_str):
return ''.join(c for c in src_str if c not in emoji.UNICODE_EMOJI)
# APIキー設定
CK = config.CONSUMER_KEY
CS = config.CONSUMER_SECRET
AT = config.ACCESS_TOKEN
ATS = config.ACCESS_TOKEN_SECRET
# 認証処理
twitter = OAuth1Session(CK, CS, AT, ATS)
# タイムライン取得エンドポイント
url = "https://api.twitter.com/1.1/statuses/user_timeline.json"
# パラメータの定義
params = {'screen_name': 'USERID',
'exclude_replies': True,
'include_rts': False,
'count': 200}
# 出力先ファイル
f_out = open('./output/tweet_data', 'w')
for j in range(100):
res = twitter.get(url, params=params)
if res.status_code == 200:
# API残り回数
limit = res.headers['x-rate-limit-remaining']
print("API remain: " + limit)
if limit == 1:
sleep(60*15)
n = 0
timeline = json.loads(res.text)
# 各ツイートの本文を表示
for i in range(len(timeline)):
if i != len(timeline)-1:
f_out.write(remove_emoji(timeline[i]['text']) + '\n')
else:
f_out.write(remove_emoji(timeline[i]['text']) + '\n')
# 一番最後のツイートIDをパラメータmax_idに追加
params['max_id'] = timeline[i]['id']-1
f_out.close()
|
the-stack_0_24119 | #!/usr/bin/env python2.7
# -*- mode: python; coding: utf-8; -*-
"""Module for generating lexicon using Rao and Ravichandran's method (2009).
"""
##################################################################
# Imports
from __future__ import unicode_literals, print_function
from blair_goldensohn import build_mtx, seeds2seedpos
from common import POSITIVE, NEGATIVE, NEUTRAL
from graph import Graph
from itertools import chain
from scipy import sparse
import numpy as np
import sys
##################################################################
# Constants
POS_IDX = 0
NEG_IDX = 1
NEUT_IDX = 2
POL_IDX = 1
SCORE_IDX = 2
MAX_I = 300
IDX2CLS = {POS_IDX: POSITIVE, NEG_IDX: NEGATIVE, NEUT_IDX: NEUTRAL}
##################################################################
# Methods
def _eq_sparse(a_M1, a_M2):
"""Compare two sparse matrices.
@param a_M1 - first sparse matrix to compare
@param a_M2 - second sparse matrix to compare
@return True if both matrices are equal, non-False otherwise
"""
if type(a_M1) != type(a_M2):
return False
if not np.allclose(a_M1.get_shape(), a_M1.get_shape()):
return False
X, Y = a_M1.nonzero()
IDX1 = set([(x, y) for x, y in zip(X, Y)])
X, Y = a_M2.nonzero()
IDX2 = [(x, y) for x, y in zip(X, Y) if (x, y) not in IDX1]
IDX = list(IDX1)
IDX.extend(IDX2)
IDX.sort()
for x_i, y_i in IDX:
# print("a_M1[{:d}, {:d}] = {:f}".format(x_i, y_i, a_M1[x_i, y_i]))
# print("a_M2[{:d}, {:d}] = {:f}".format(x_i, y_i, a_M2[x_i, y_i]))
# print("is_close", np.isclose(a_M1[x_i, y_i], a_M2[x_i, y_i]))
if not np.isclose(a_M1[x_i, y_i], a_M2[x_i, y_i]):
return False
return True
def _mtx2tlist(a_Y, a_term2idx):
"""Convert matrix to a list of polar terms.
@param a_Y - matrix of polar terms
@param a_terms2idx - mapping from terms to their matrix indices
@return list of 3-tuples (word, polarity, score)
"""
ret = []
iscore = 0.
irow = None
lex2lidx = {}
ipol = lidx = 0
for (iword, ipos), idx in a_term2idx.iteritems():
# obtain matrix row for that term
irow = a_Y.getrow(idx).toarray()
# print("irow =", repr(irow))
ipol = irow.argmax(axis=1)[0]
iscore = irow[0, ipol]
# print("ipol =", repr(ipol))
# print("iscore =", repr(iscore))
if ipol != NEUT_IDX:
ipol = IDX2CLS[ipol]
if iword in lex2lidx:
lidx = lex2lidx[iword]
if abs(iscore) > abs(ret[lidx][SCORE_IDX]):
ret[lidx][POL_IDX] = ipol
ret[lidx][SCORE_IDX] = iscore
else:
lex2lidx[iword] = len(ret)
ret.append([iword, ipol, iscore])
return ret
def _sign_normalize(a_Y, a_terms2idx, a_pos, a_neg, a_neut,
a_set_dflt=None):
"""Fix seed values and row-normalize the class matrix.
@param a_Y - class matrix to be changed
@param a_terms2idx - mapping from terms to their matrix indices
@param a_pos - set of lexemes with positive polarity
@param a_neg - set of lexemes with negative polarity
@param a_neut - set of lexemes with neutral polarity
@param a_set_dflt - function to set the default value of an unkown term
@return void
@note modifies the input matrix in place
"""
seed_found = False
for iterm, i in a_terms2idx.iteritems():
if iterm in a_pos:
seed_found = True
a_Y[i, :] = 0.
a_Y[i, POS_IDX] = 1.
elif iterm in a_neg:
seed_found = True
a_Y[i, :] = 0.
a_Y[i, NEG_IDX] = 1.
elif iterm in a_neut:
seed_found = True
a_Y[i, :] = 0.
a_Y[i, NEUT_IDX] = 1.
elif a_set_dflt is not None:
a_set_dflt(a_Y, i)
assert seed_found, "No seed term found in matrix."
# normalize class scores
Z = a_Y.sum(1)
x, y = a_Y.nonzero()
for i, j in zip(x, y):
# print("a_Y[{:d}, {:d}] =".format(i, j), repr(a_Y[i, j]))
# print("Z[{:d}, 0] =".format(i), repr(Z[i, 0]))
a_Y[i, j] /= float(Z[i, 0]) or 1.
# print("*a_Y[{:d}, {:d}] =".format(i, j), repr(a_Y[i, j]))
def prune_normalize(a_M):
"""Make each of the adjacency matrix sum up to one.
Args:
a_M (scipy.sparse.csr): matrix to be normalized
Returns:
void:
Note:
modifies the input matrix in place
"""
# remove negative transitions
nonzero_xy = a_M.nonzero()
for i, j in zip(*nonzero_xy):
if a_M[i, j] < 0.:
a_M[i, j] = 0.
a_M.prune()
# normalize all outgoing transitions
Z = a_M.sum(0)
nonzero_xy = a_M.nonzero()
for i, j in zip(*nonzero_xy):
a_M[i, j] /= float(Z[0, j]) or 1.
def rao_min_cut(a_germanet, a_pos, a_neg, a_neut, a_seed_pos,
a_ext_syn_rels):
"""Extend sentiment lexicons using the min-cut method of Rao (2009).
@param a_germanet - GermaNet instance
@param a_pos - set of lexemes with positive polarity
@param a_neg - set of lexemes with negative polarity
@param a_neut - set of lexemes with neutral polarity
@param a_seed_pos - part-of-speech class of seed synsets ("none" for no
restriction)
@param a_ext_syn_rels - use extended set of synonymous relations
@return list of polar terms, their polarities, and scores
"""
sgraph = Graph(a_germanet, a_ext_syn_rels)
# partition the graph into subjective and objective terms
mcs, cut_edges, _, _ = sgraph.min_cut(a_pos | a_neg, a_neut, a_seed_pos)
print("min_cut_score (subj. vs. obj.) = {:d}".format(mcs),
file=sys.stderr)
# remove edges belonging to the min cut (i.e., cut the graph)
for isrc, itrg in cut_edges:
if isrc in sgraph.nodes:
sgraph.nodes[isrc].pop(itrg, None)
# separate the graph into positive and negative terms
mcs, _, pos, neg = sgraph.min_cut(a_pos, a_neg, a_seed_pos)
print("min_cut_score (pos. vs. neg.) = {:d}".format(mcs),
file=sys.stderr)
ret = [(inode[0], POSITIVE, 1.) for inode in pos]
ret.extend((inode[0], NEGATIVE, -1.) for inode in neg)
return ret
def rao_lbl_prop(a_germanet, a_pos, a_neg, a_neut, a_seed_pos,
a_ext_syn_rels):
"""Extend sentiment lexicons using the lbl-prop method of Rao (2009).
@param a_germanet - GermaNet instance
@param a_pos - set of lexemes with positive polarity
@param a_neg - set of lexemes with negative polarity
@param a_neut - set of lexemes with neutral polarity
@param a_seed_pos - part-of-speech class of seed synsets ("none" for no
restriction)
@param a_ext_syn_rels - use extended set of synonymous relations
@return list of polar terms, their polarities, and scores
"""
if a_seed_pos is None:
a_seed_pos = ["adj", "nomen", "verben"]
else:
a_seed_pos = [a_seed_pos]
a_pos = seeds2seedpos(a_pos, a_seed_pos)
a_neg = seeds2seedpos(a_neg, a_seed_pos)
a_neut = seeds2seedpos(a_neut, a_seed_pos)
# obtain and row-normalize the adjacency matrix
terms = set((ilex, ipos)
for isynid, ipos in a_germanet.synid2pos.iteritems()
for ilexid in a_germanet.synid2lexids[isynid]
for ilex in a_germanet.lexid2lex[ilexid]
)
terms2idx = {iterm: i for i, iterm in enumerate(terms)}
M = build_mtx(a_germanet, terms2idx, set(),
a_ext_syn_rels, len(terms))
prune_normalize(M)
# no need to transpose M[i, j] is the link going from node j to the node i;
# and, in Y, the Y[j, k] cell is the polarity score of the class k for the
# term j
# M = M.transpose()
# check that the matrix is column normalized
assert np.all(i == 0 or np.isclose([i], [1.])
for i in M.sum(0)[0, :])
# initialize label matrix
Y = sparse.lil_matrix((len(terms), len(IDX2CLS)), dtype=np.float32)
def _set_neut_one(X, i):
X[i, NEUT_IDX] = 1.
_sign_normalize(Y, terms2idx, a_pos, a_neg, a_neut,
_set_neut_one)
# Y = Y.tocsr()
# output first M row and Y column
# for i in xrange(len(terms)):
# if M[0, i] != 0:
# print("M[0, {:d}] =".format(i), M[0, i], file=sys.stderr)
# if Y[i, 0] != 0:
# print("Y[i, 0] =", Y[i, 0], file=sys.stderr)
# B = M.dot(Y)
# print("B[0, 0] =", B[0, 0], file=sys.stderr)
# perform multiplication until convergence
i = 0
prev_Y = None
while not _eq_sparse(prev_Y, Y) and i < MAX_I:
prev_Y = Y.copy()
Y = Y.tocsc()
Y = M.dot(Y)
Y = Y.tolil()
_sign_normalize(Y, terms2idx, a_pos, a_neg, a_neut)
i += 1
ret = _mtx2tlist(Y, terms2idx)
ret.sort(key=lambda el: abs(el[-1]), reverse=True)
return ret
|
the-stack_0_24124 | from __future__ import print_function, division
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
num_epochs = 100
total_series_length = 50000
truncated_backprop_length = 15
state_size = 4
num_classes = 2
echo_step = 3
batch_size = 5
num_batches = total_series_length//batch_size//truncated_backprop_length
def generateData():
x = np.array(np.random.choice(2, total_series_length, p=[0.5, 0.5]))
y = np.roll(x, echo_step)
y[0:echo_step] = 0
x = x.reshape((batch_size, -1)) # The first index changing slowest, subseries as rows
y = y.reshape((batch_size, -1))
return (x, y)
batchX_placeholder = tf.placeholder(tf.float32, [batch_size, truncated_backprop_length])
batchY_placeholder = tf.placeholder(tf.int32, [batch_size, truncated_backprop_length])
init_state = tf.placeholder(tf.float32, [batch_size, state_size])
W2 = tf.Variable(np.random.rand(state_size, num_classes),dtype=tf.float32)
b2 = tf.Variable(np.zeros((1,num_classes)), dtype=tf.float32)
# Unpack columns
inputs_series = tf.split(batchX_placeholder, truncated_backprop_length, 1)
labels_series = tf.unstack(batchY_placeholder, axis=1)
print("inputs_series: ", inputs_series)
print()
print("labels_series: ", labels_series)
print()
# Forward passes
cell = tf.contrib.rnn.BasicRNNCell(state_size)
states_series, current_state = tf.nn.static_rnn(cell, inputs_series, init_state)
print("states_series: ", states_series)
print()
print("current_series: ", current_state)
print()
logits_series = [tf.matmul(state, W2) + b2 for state in states_series] #Broadcasted addition
predictions_series = [tf.nn.softmax(logits) for logits in logits_series]
print("logits_series: ", logits_series)
print()
losses = [tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels) for logits, labels in zip(logits_series,labels_series)]
total_loss = tf.reduce_mean(losses)
train_step = tf.train.AdagradOptimizer(0.3).minimize(total_loss)
def plot(loss_list, predictions_series, batchX, batchY):
plt.subplot(2, 3, 1)
plt.cla()
plt.plot(loss_list)
for batch_series_idx in range(5):
one_hot_output_series = np.array(predictions_series)[:, batch_series_idx, :]
single_output_series = np.array([(1 if out[0] < 0.5 else 0) for out in one_hot_output_series])
plt.subplot(2, 3, batch_series_idx + 2)
plt.cla()
plt.axis([0, truncated_backprop_length, 0, 2])
left_offset = range(truncated_backprop_length)
plt.bar(left_offset, batchX[batch_series_idx, :], width=1, color="blue")
plt.bar(left_offset, batchY[batch_series_idx, :] * 0.5, width=1, color="red")
plt.bar(left_offset, single_output_series * 0.3, width=1, color="green")
plt.draw()
plt.pause(0.0001)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
plt.ion()
plt.figure()
plt.show()
loss_list = []
for epoch_idx in range(num_epochs):
x,y = generateData()
_current_state = np.zeros((batch_size, state_size))
print("New data, epoch", epoch_idx)
for batch_idx in range(num_batches):
start_idx = batch_idx * truncated_backprop_length
end_idx = start_idx + truncated_backprop_length
batchX = x[:,start_idx:end_idx]
batchY = y[:,start_idx:end_idx]
_total_loss, _train_step, _current_state, _predictions_series = sess.run(
[total_loss, train_step, current_state, predictions_series],
feed_dict={
batchX_placeholder:batchX,
batchY_placeholder:batchY,
init_state:_current_state
})
loss_list.append(_total_loss)
if batch_idx%100 == 0:
print("Step",batch_idx, "Loss", _total_loss)
plot(loss_list, _predictions_series, batchX, batchY)
plt.ioff()
plt.show() |
the-stack_0_24126 | """
English Word Segmentation in Python
Word segmentation is the process of dividing a phrase without spaces back
into its constituent parts. For example, consider a phrase like "thisisatest".
For humans, it's relatively easy to parse.
This module makes it easy for
machines too.
Typically, you will use the module this way in your own code.
>>> from wordsegmentation import WordSegment
>>> ws = WordSegment(use_google_corpus=True)
>>> ws.segment('universityofwashington')
['university', 'of', 'washington']
>>> ws.segment('thisisatest')
['this', 'is', 'a', 'test']
In the code, the segmentation algorithm consists of the following steps,
1)divide and conquer -- safely divide the input string into substring. This way
we solved the length limit which will dramatically slow down the performance.
for example, "facebook123helloworld" will be treated as 3 sub-problems --
"facebook", "123", and "helloworld". The rule to divide is kee
2)for each sub-string. I used dynamic programming to calculate and get the optimal
words.
3)combine the sub-problems, and return the result for the original string.
This module is inspired by Grant Jenks' https://pypi.python.org/pypi/wordsegment.
Segmentation algorithm used in this module, has achieved a time-complexity of O(n^2).
By comparison to existing segmentation algorithms, this module does better on following aspects,
1)can handle very long input. There is no arbitary max lenght limit set to input string.
2)segmentation finished in polynomial time via dynamic programming.
3)by default, the algorithm uses a filtered Google corpus, which contains only English words that could be found in dictionary.
An extreme example is shown below,
>>>ws.segment('MARGARETAREYOUGRIEVINGOVERGOLDENGROVEUNLEAVINGLEAVESLIKETHETHINGSOFMANYOUWITHYOURFRESHTHOUGHTSCAREFORCANYOUAHASTHEHEARTGROWSOLDERITWILLCOMETOSUCHSIGHTSCOLDERBYANDBYNORSPAREASIGHTHOUGHWORLDSOFWANWOODLEAFMEALLIEANDYETYOUWILLWEEPANDKNOWWHYNOWNOMATTERCHILDTHENAMESORROWSSPRINGSARETHESAMENORMOUTHHADNONORMINDEXPRESSEDWHATHEARTHEARDOFGHOSTGUESSEDITISTHEBLIGHTMANWASBORNFORITISMARGARETYOUMOURNFOR')
['margaret', 'are', 'you', 'grieving', 'over', 'golden', 'grove', 'un', 'leaving', 'leaves', 'like', 'the', 'things', 'of', 'man', 'you', 'with', 'your', 'fresh', 'thoughts', 'care', 'for', 'can', 'you', 'a', 'has', 'the', 'he', 'art', 'grows', 'older', 'it', 'will', 'come', 'to', 'such', 'sights', 'colder', 'by', 'and', 'by', 'nor', 'spa', 're', 'a', 'sigh', 'though', 'worlds', 'of', 'wan', 'wood', 'leaf', 'me', 'allie', 'and', 'yet', 'you', 'will', 'weep', 'and', 'know', 'why', 'now', 'no', 'matter', 'child', 'the', 'name', 'sorrows', 'springs', 'are', 'the', 'same', 'nor', 'mouth', 'had', 'non', 'or', 'mind', 'expressed', 'what', 'he', 'art', 'heard', 'of', 'ghost', 'guessed', 'it', 'is', 'the', 'blight', 'man', 'was', 'born', 'for', 'it', 'is', 'margaret', 'you', 'mourn', 'for']
Scoring mechanism based on formula from the chapter "Natural Language Corpus Data"
from the book "Beautiful Data" (Segaran and Hammerbacher, 2009)
http://oreilly.com/catalog/9780596157111/
"""
import sys
from os.path import join, dirname, realpath
import networkx as nx
from itertools import groupby, count
from math import log10
import copy
if sys.hexversion < 0x03000000:
range = xrange
def parse_file(filename):
'''
Global function that parses file and form a dictionary.
'''
with open(filename) as fptr:
lines = (line.split('\t') for line in fptr)
return dict((word, float(number)) for word, number in lines)
#UNIGRAM_COUNTS = parse_file(join(dirname(realpath(__file__)), 'corpus', 'unigrams.txt'))
UNIGRAM_COUNTS = parse_file(join(dirname(realpath(__file__)), 'corpus', 'unigrams.txt.original'))
BIGRAM_COUNTS = parse_file(join(dirname(realpath(__file__)), 'corpus', 'bigrams.txt'))
def as_range(group):
'''
Global function returns range
'''
tmp_lst = list(group)
return tmp_lst[0], tmp_lst[-1]
class Data(object):
'''
Read corpus from path, and provide the following functionalities,
1. data as "property", it is a dictionary where key is word,
while the value is the frequency count of this word.
2. generator that yield word and its frequency
'''
def __init__(self, use_google_corpus):
self._unigram_counts = dict()
self._use_google_corpus = use_google_corpus
if self._use_google_corpus:
#use pure google corpus
self._unigram_counts = parse_file(
join(dirname(realpath(__file__)), 'corpus', 'filtered_1_2_letter_only.txt')
)
else:
#use dictionary-filtered google corpus
self._unigram_counts = parse_file(
join(dirname(realpath(__file__)), 'corpus', 'unigrams.txt')
)
@property
def data(self):
'''
return the whole dictionary out to user as a property.
'''
return self._unigram_counts
def __iter__(self):
for each in self._unigram_counts.keys():
yield each
class ConstructCorpus(object):
'''
according to the minimal character limit,
construct a corpus at initial time.
it provides the following two properties,
1. ngram_distribution -- a dictionary where key is the ngram,
value is an int of summation of frequency of each English
word starts with that specific ngram.
2. ngram_tree -- a dictionary where key is the ngram,
value is a list containing all possile English word
starts with that specific ngram.
'''
def __init__(self, min_length, use_google_corpus):
self._minlen = min_length
self._use_google_corpus = use_google_corpus
@property
def ngram_distribution(self):
'''
return a dictionary containing the following pairs,
key: ngram string, for example, when minlen=5,
the ngram string for word "university" is "unive".
value: added-up frequency(from google corpus) of all
words starting with "unive".
'''
ngram_distribution = dict()
instance_d = Data(self._use_google_corpus)
data = instance_d.data
for entry in instance_d:
if len(entry) >= self._minlen:
cut = entry[:self._minlen]
if cut in ngram_distribution:
ngram_distribution[cut] += data[entry]
else:
ngram_distribution[cut] = data[entry]
return ngram_distribution
@property
def ngram_tree(self):
'''
return a dictionary containing the following pairs,
key: ngram string, for example, when minlen=5,
the ngram string for word "university" is "unive".
value: all words starting with the ngram,
in the example, it is "unive".
'''
ngram_tree = dict()
instance_d = Data(self._use_google_corpus)
for entry in instance_d:
if len(entry) >= self._minlen:
cut = entry[:self._minlen]
if cut in ngram_tree:
ngram_tree[cut].append(entry)
else:
ngram_tree[cut] = [entry]
return ngram_tree
class Scoring(object):
"""
Methond that scores words by using probability from Google Trillion Corpus
"""
def __init__(self):
pass
def get_unigram_score(self, word):
"""
function that score single words
2-word scoring to be added
"""
if word in UNIGRAM_COUNTS:
score = log10((UNIGRAM_COUNTS[word] / 1024908267229.0))
else:
score = log10((10.0 / (1024908267229.0 * 10 ** len(word))))
#print "get_unigram_score's args->{0}; RESULT->{1}".format(word, score)
return score
class IntersectCheck(object):
"""
Method that checks intersection between words
"""
def __init__(self):
'''
taks no arguments
'''
self.tu1 = None
self.tu2 = None
def check(self, tuple_0, tuple_1):
'''
finds intersection of two words
input: position tuples
return: boolean values showing whether intersection detected
'''
self.tu1 = tuple_0
self.tu2 = tuple_1
word1 = range(self.tu1[0], self.tu1[1]+1)
word2 = range(self.tu2[0], self.tu2[1]+1)
tmp_xs = set(word1)
#print "returning {}".format(tmp_xs.intersection(word2))
return tmp_xs.intersection(word2)
class WordSegment(object):
'''
def __init__(self, string, casesensitive = False):
self._casesensitive = casesensitive
self._string = string
#facebook
self.lst = ((0, 3, 0, 1), (1, 3, 0, 1), (4, 7, 1, 1), (6, 7, 1, 4), (0, 7, 0, 1))
'''
def __init__(self, min_length=2, casesensitive=False, use_google_corpus=False):
self._minlen = min_length
self._string = ''
self._use_google_corpus = use_google_corpus
self._casesensitive = casesensitive
corpus = ConstructCorpus(self._minlen, self._use_google_corpus)
self.ngram_distribution = corpus.ngram_distribution
self.ngram_tree = corpus.ngram_tree
self.score_tool = Scoring()
self.lst = []
def _divide(self):
"""
Iterator finds ngrams(with its position in string) and their suffix.
An example input of string "helloworld" yields the following tuples,
(('hello',(0,5)), 'world')
(('ellow',(1,6)), 'orld')
(('llowo',(2,7)), 'rld')
(('lowor',(3,8)), 'ld')
(('oworl',(4,9)), 'd')
(('world',(5,10)), '')
"""
counter = 0
for cut_point in range(self._minlen, len(self._string)+1):
yield (
(self._string[counter:cut_point], (counter, counter+self._minlen)),
self._string[cut_point:]
)
counter += 1
def bigram_reward(slef, current, prev):
'''
function that give extra score to bigram found in Goole Corpus
'''
pass
def penaltize(self, current, prev):
'''
function that imposes penalty to any gap between words
example input is shown below
#[[(1, 3), 'ace', (0,), -4.964005188761728], [(0, 3), 'face', (0,), -4.0926128161036965], [(4, 5), 'bo', (1, 2), -5.2144070696039995], [(4, 6), 'boo', (1, 2), -5.50655106498099], [(4, 7), 'book', (1, 2), -3.490909555336102], [(0, 7), 'facebook', (0,), -6.671146108616224]]
'''
penalty = -10
bigram_reward = 0
#starting point penalty
if prev == 0:
gap = penalty * (self.lst[current-1][0][0] - 0)
#print "starting point gap found current->{0}, prev->{1}, GAP: {2}".format(current, prev, gap)
elif self.lst[current-1][0][0] - self.lst[prev-1][0][1] == 1:
bigram = '{0} {1}'.format(self.lst[prev-1][1], self.lst[current-1][1])
#print "bigram is {}".format(bigram)
if bigram in BIGRAM_COUNTS:
# Conditional probability of the word given the previous
# word. The technical name is *stupid backoff* and it's
# not a probability distribution but it works well in
# practice.
bigram_reward = (BIGRAM_COUNTS[bigram] / 1024908267229.0 / self.lst[prev-1][3]) - self.lst[current-1][3]
#print "bigram reward {0} added! for bigram {1}".format(bigram_reward, bigram)
gap = 0
#print "seamless one found current->{0}, prev->{1}, GAP: {2}".format(current, prev, gap)
else:
gap = 0
#print "Non-seamless one found current->{0}, prev->{1}, GAP: {2}".format(current, prev, gap)
return gap + bigram_reward
def _init_graph(self, meaningful_words):
'''
function that creates graph for each requesting string
below is an example input of this function
[[(0, 2), 'fac', -14.0106849963], [(1, 3), 'ace', -4.964005188761728], [(0, 3), 'face', -4.0926128161036965], [(2, 4), 'ceb', -14.0106849963], [(4, 5), 'bo', -5.2144070696039995], [(3, 5), 'ebo', -14.0106849963], [(4, 6), 'boo', -5.550655106498099], [(4, 7), 'book', -3.490909555336102], [(0, 7), 'facebook', -6.671146108616224], [(3, 7), 'ebook', -16.0106849963], [(5, 7), 'ook', -14.0106849963]]
'''
word_graph = nx.Graph()
word_graph.add_nodes_from(meaningful_words)
inter = IntersectCheck()
for each in meaningful_words:
for each_2 in meaningful_words:
if each == each_2:
continue
elif inter.check(each[0], each_2[0]):
if (each[0], each_2[0]) in word_graph.edges():
continue
else:
word_graph.add_edge(each, each_2)
return word_graph
def _find_components(self, meaningful_words):
'''
function that finds the components in the graph.
each component represents overlaping words
for example, in the example below, except the word "anotherword",
all rest words have at least one character contained in other words.
They will become one component in the who string-level graph
Example input is a list like this: [((0, 3),"face"), ((0, 7),"facebook"), ((1, 3),"ace"),
((4, 6),"boo"), ((4, 7),"book"), ((6, 7), "ok"), ((8, 19),"anotherword")]
'''
component_graph = nx.Graph()
component_graph = self._init_graph(meaningful_words)
components = []
components = list(nx.connected_component_subgraphs(component_graph))
return components
def suffix_penaltize(self, current, suffix):
"""
function that imposes penalties to suffix
"""
if suffix == None:
return 0
#let default penalty = 10
penalty = -10
inter = IntersectCheck()
if inter.check(self.lst[current-1][0], self.lst[suffix-1][0]):
if suffix == len(self.lst):
gap = penalty * (self.lst[suffix-1][0][1] - self.lst[-1][0][1])
#print "[start-1-jump] from {0} to {1}".format(self.lst[suffix-1][1], self.lst[current-1][1])
else:
gap = 0
#print "[0-jump]non-starting overlapping suffix penality paid"
else:
gap = penalty * (self.lst[suffix-1][0][1] - self.lst[current-1][0][0] - 1)
#print "non-overlapping suffix penality paid"
return gap
def _opt_component(self, component):
"""
function that finds optimal segmentation for each component
"""
#the sorted list is [((1, 4), 'ace', -4.964005188761728), ((0, 4), 'face', -4.0926128161036965), ((4, 6), 'bo', -5.2144070696039995), ((4, 7), 'boo', -5.550655106498099), ((4, 8), 'book', -3.490909555336102), ((0, 8), 'facebook', -6.671146108616224)]
meaningful_words = component.nodes()
meaningful_words.sort(key=lambda x: x[0][1])
#print "old meaningful list is {}".format(meaningful_words)
old_meaningful_words = copy.deepcopy(meaningful_words)
meaningful_words = []
for each in old_meaningful_words:
meaningful_words.append(list(each))
#print "new meaningful list is {}".format(meaningful_words)
tmp_lst = []
inter = IntersectCheck()
for tu1 in meaningful_words:
pos = 0
prev_list = []
for tu2 in meaningful_words:
if not inter.check(tu1[0], tu2[0]) and tu1[0][0] == tu2[0][1] + 1:
prev_list.append(pos+1 if pos is not None else 1)
#print "prev list appended {}".format(pos+1)
if pos == None:
pos = 1
else:
pos += 1
#print "for {0}, the non-intersected word positions are
#: {1}, words are,".format(tu1, tmp_lst)
if prev_list:
prev_list.reverse()
tu1.insert(2, tuple(prev_list))
else:
tu1.insert(2, (0,))
self.lst = meaningful_words
#print meaningful_words
j = len(self.lst)
#print "j has length of {}".format(j)
def _add(input1, input2, input3):
"""
function that adds up 3 inputs
"""
if input1 is None:
return input2 + input3
else:
return input1 + input2 + input3
def opt(j, memo):
"""
Recurrence using Dynamic programming
"""
#print "j is {}".format(j)
if j == 0:
return None
if memo[j-1] is None:
memo[j-1] = max(
#choose j
_add(opt(self.lst[(j-1)][2][0], memo), self.lst[(j-1)][3], self.penaltize(j, self.lst[(j-1)][2][0])),
#not choose j and jump to j-1 only when nesrest overlpping word has the same finish position
opt(j-1, memo) if self.lst[(j-2)][0][1] == self.lst[(j-1)][0][1] else None
)
return memo[j-1]
else:
return memo[j-1]
tmp_lst = []
#create a memo table for dynamic programming
memo = [None] * j
ending_words = []
counter = 1
for each in self.lst:
if each[0][1] == self.lst[-1][0][1]:
ending_words.append(counter)
counter += 1
tmp_lst.append(opt(j, memo))
#print tmp_lst
new_lst = []
pos = 0
for each in tmp_lst:
new_lst.append((each, ))
def find_path(j, path):
"""
find the optimal segmentation from the memo list
"""
#print "working on {}".format(self.lst[(j-1)][1])
#print "j is {}".format(j)
if j == 0:
pass
elif memo[j-1] == memo[j-2] if j-2 >= 0 else memo[0]:
if j != 1:
find_path(j-1, path)
elif j == 1:
path.append(((self.lst[0][0][0], self.lst[0][0][1]), self.lst[0][1]))
else:
#if p(j) exists
if len(self.lst[(j-1)][2]) > 0:
tmp_i = 0
#if p(j) == 1
if len(self.lst[(j-1)][2]) == 1:
path.append(((self.lst[j-1][0][0], self.lst[j-1][0][1]), self.lst[j-1][1]))
#print "[single P]jumped to {}".format(self.lst[j-1][2][tmp_i])
find_path(self.lst[j-1][2][tmp_i], path)
#if p(j) > 1
elif len(self.lst[(j-1)][2]) > 1:
prev_list = self.lst[(j-1)][2][:]
prev_list = list(prev_list)
prev_list.reverse()
p_list = []
#get the p, whose memo value is max
for i in xrange(len(self.lst[(j-1)][2])):
p_list.append(memo[self.lst[(j-1)][2][i]-1])
#print "p_list is {}".format(p_list)
max_p = max(p_list)
prev_list = self.lst[(j-1)][2][:]
#print "prev_list is {}".format(prev_list)
for i in xrange(len(self.lst[(j-1)][2])):
#print memo[prev_list[i]-1]
if memo[prev_list[i]-1] == max_p:
#tmp_p = memo[self.lst[(j-1)][2][i]-1]
tmp_i = i
break
#print "best i is {}".format(tmp_i)
#print "tmpi is {}".format(tmp_i)
#path.append(self.lst[(tmp_p - 1)][1])
path.append(((self.lst[j-1][0][0], self.lst[j-1][0][1]), self.lst[j-1][1]))
#print "jumped to {}".format(prev_list[tmp_i])
find_path(prev_list[tmp_i], path)
else:
find_path(j-1, path)
result = tmp_lst[0]
path = []
max_v = [i for i, j in enumerate(memo) if j == result]
j = max_v[-1] + 1
find_path(j, path)
path.reverse()
#print "for node {0}, the path is {1}".format(j, path)
words_list = []
for each in path:
words_list.append(each[1])
return ((path[0][0][0], self.lst[-1][0][1]), words_list)
#public interface
def segment(self, text):
"""
public interface
input: string, typically a sentence without spaces
output: list of optimal words
"""
if self._casesensitive == False:
self._string = text.lower()
self._string = self._string.strip("'")
else:
#for current version, only supports lowercase version
pass
candidate_list = []
pair_dic = dict()
for prefix, suffix in self._divide():
pair_dic[prefix] = suffix
if prefix[0] in self.ngram_distribution:
candidate_list.append(
(self.ngram_distribution[prefix[0]], prefix)
)
else:
#means this prefix was not likely
#to be a part of meaningful word
pass
candidate_list.sort(reverse=True)
#now candidate list is [(2345234, ("hello",(0,5)))]
#print "candidate list is:"
#print candidate_list
meaningful_words = []
#meaningful_words is [((0, 10),"helloworld"),...]
for each in candidate_list:
#print "each[1][0] is {0} of type{1}".format(each[1][0], type(each[1][0]))
if 'a' == each[1][0][0]:
meaningful_words.append(((each[1][1][0],
each[1][1][0]+len('a')-1), 'a',
self.score_tool.get_unigram_score('a')))
#(17507324569.0, ('in', (8, 10)))
for word in self.ngram_tree[each[1][0]]:
if word in each[1][0] + pair_dic[each[1]]:
if self._string[each[1][1][0]:each[1][1][0]+len(word)] == word:
meaningful_words.append(((each[1][1][0],
each[1][1][0]+len(word)-1), word,
self.score_tool.get_unigram_score(word)))
#sort the list in order of position in original text
meaningful_words.sort(key=lambda x: x[0][1])
#print meaningful_words
#find components from the original input string
components = []
components = self._find_components(meaningful_words)
post_components = []
for each in components:
post_components.append(self._opt_component(each))
#print "{}components found".format(len(post_components)
#print "post_components is {}".format(post_components)
meaningful_pos_lst = []
for each in post_components:
#print "each is {}".format(each)
meaningful_pos_lst += range(int(each[0][0]), int(each[0][1]+1))
meaningful_pos_lst.sort()
#print "DEBUG meaningful_pos_lst is {}".format(meaningful_pos_lst)
non_meaning_pos_lst = []
for pos in xrange(len(self._string)):
if pos in meaningful_pos_lst:
continue
else:
non_meaning_pos_lst.append(pos)
non_meaningful_range = []
non_meaningful_range = [
as_range(g) for _, g in groupby(non_meaning_pos_lst, key=lambda n, c=count(): n-next(c))
]
meaningful_dic = dict()
overall_pos_lst = []
for each in non_meaningful_range:
overall_pos_lst.append(each)
for component in post_components:
overall_pos_lst.append(component[0])
meaningful_dic[component[0]] = component[1]
#print "meaningful_dic is {}".format(meaningful_dic)
#print "self._string is {}".format(self._string)
overall_pos_lst.sort()
#print "overall_pos_lst is {}".format(overall_pos_lst)
return_lst = []
overall_pos_lst.sort()
for each in overall_pos_lst:
if each in meaningful_dic:
return_lst.extend(meaningful_dic[each])
else:
return_lst.append(self._string[each[0]:each[1]+1])
#print "RESULT: {}\n".format(return_lst)
return return_lst
'''
Test Cases
w = WordSegment(use_google_corpus=True)
#w = WordSegment(use_google_corpus=False)
w.segment('facebookingirl')
w.segment('facebook')
w.segment('whoiswatching')
w.segment('acertain')
w.segment('theyouthevent')
w.segment('baidu')
w.segment('google')
w.segment('from')
print w.segment('MARGARETAREYOUGRIEVINGOVERGOLDENGROVEUNLEAVINGLEAVESLIKETHETHINGSOFMANYOUWITHYOURFRESHTHOUGHTSCAREFORCANYOUAHASTHEHEARTGROWSOLDERITWILLCOMETOSUCHSIGHTSCOLDERBYANDBYNORSPAREASIGHTHOUGHWORLDSOFWANWOODLEAFMEALLIEANDYETYOUWILLWEEPANDKNOWWHYNOWNOMATTERCHILDTHENAMESORROWSSPRINGSARETHESAMENORMOUTHHADNONORMINDEXPRESSEDWHATHEARTHEARDOFGHOSTGUESSEDITISTHEBLIGHTMANWASBORNFORITISMARGARETYOUMOURNFOR')
w.segment('pressinginvestedthebecomethemselves')
'''
__title__ = 'wordsegmentation'
__version__ = '0.3.5'
__author__ = 'Weihan Jiang'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2015 Weihan Jiang'
|
the-stack_0_24127 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test spending coinbase transactions.
The coinbase transaction in block N can appear in block
N+100... so is valid in the mempool when the best block
height is N+99.
This test makes sure coinbase spends that will be mature
in the next block are accepted into the memory pool,
but less mature coinbase spends are NOT.
"""
from decimal import Decimal
from test_framework.blocktools import SUBSIDY, create_raw_transaction
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class MempoolSpendCoinbaseTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
chain_height = self.nodes[0].getblockcount()
assert_equal(chain_height, 200)
node0_address = self.nodes[0].getnewaddress()
# Coinbase at height chain_height-100+1 ok in mempool, should
# get mined. Coinbase at height chain_height-100+2 is
# is too immature to spend.
b = [self.nodes[0].getblockhash(n) for n in range(101, 103)]
coinbase_txids = [self.nodes[0].getblock(h)['tx'][0] for h in b]
spends_raw = [create_raw_transaction(self.nodes[0], txid, node0_address,
amount=SUBSIDY - Decimal('1'), vout=1)
for txid in coinbase_txids]
spend_101_id = self.nodes[0].sendrawtransaction(spends_raw[0])
# coinbase at height 102 should be too immature to spend
assert_raises_rpc_error(-26, "bad-txns-premature-spend-of-coinbase",
self.nodes[0].sendrawtransaction, spends_raw[1])
# mempool should have just spend_101:
assert_equal(self.nodes[0].getrawmempool(), [spend_101_id])
# mine a block, spend_101 should get confirmed
self.nodes[0].generate(1)
assert_equal(set(self.nodes[0].getrawmempool()), set())
# ... and now height 102 can be spent:
spend_102_id = self.nodes[0].sendrawtransaction(spends_raw[1])
assert_equal(self.nodes[0].getrawmempool(), [spend_102_id])
if __name__ == '__main__':
MempoolSpendCoinbaseTest().main()
|
the-stack_0_24130 | import unittest
from super_gradients.training import SgModel
from super_gradients.training.metrics import Accuracy
from super_gradients.training.datasets import ClassificationTestDatasetInterface
from super_gradients.training.models import LeNet
from super_gradients.training.utils.callbacks import TestLRCallback
class LRCooldownTest(unittest.TestCase):
def setUp(self) -> None:
self.dataset_params = {"batch_size": 4}
self.dataset = ClassificationTestDatasetInterface(dataset_params=self.dataset_params)
self.arch_params = {'num_classes': 10}
def test_lr_cooldown_with_lr_scheduling(self):
# Define Model
net = LeNet()
model = SgModel("lr_warmup_test", model_checkpoints_location='local')
model.connect_dataset_interface(self.dataset)
model.build_model(net, arch_params=self.arch_params)
lrs = []
phase_callbacks = [TestLRCallback(lr_placeholder=lrs)]
train_params = {"max_epochs": 7, "cosine_final_lr_ratio": 0.2, "lr_mode": "cosine",
"lr_cooldown_epochs": 2,
"lr_warmup_epochs": 3, "initial_lr": 1, "loss": "cross_entropy", "optimizer": 'SGD',
"criterion_params": {}, "optimizer_params": {"weight_decay": 1e-4, "momentum": 0.9},
"train_metrics_list": [Accuracy()], "valid_metrics_list": [Accuracy()],
"loss_logging_items_names": ["Loss"], "metric_to_watch": "Accuracy",
"greater_metric_to_watch_is_better": True, "ema": False, "phase_callbacks": phase_callbacks}
expected_lrs = [0.25, 0.5, 0.75, 0.9236067977499791, 0.4763932022500211, 0.4763932022500211, 0.4763932022500211]
model.train(train_params)
# ALTHOUGH NOT SEEN IN HERE, THE 4TH EPOCH USES LR=1, SO THIS IS THE EXPECTED LIST AS WE COLLECT
# THE LRS AFTER THE UPDATE
self.assertListEqual(lrs, expected_lrs)
|
the-stack_0_24131 | import pytest
from pystachio.basic import *
from pystachio.composite import *
from pystachio.container import List, Map
from pystachio.naming import Ref
def ref(address):
return Ref.from_address(address)
def test_basic_types():
class Resources(Struct):
cpu = Float
ram = Integer
assert Resources().check().ok()
assert Resources(cpu = 1.0).check().ok()
assert Resources(cpu = 1.0, ram = 100).check().ok()
assert Resources(cpu = 1, ram = 100).check().ok()
assert Resources(cpu = '1.0', ram = 100).check().ok()
def test_bad_inputs():
class Resources(Struct):
cpu = Float
ram = Required(Integer)
with pytest.raises(AttributeError):
Resources(herp = "derp")
with pytest.raises(AttributeError):
Resources({'foo': 'bar'})
with pytest.raises(ValueError):
Resources(None)
def test_nested_composites():
class Resources(Struct):
cpu = Float
ram = Integer
class Process(Struct):
name = String
resources = Resources
assert Process().check().ok()
assert Process(name = "hello_world").check().ok()
assert Process(resources = Resources()).check().ok()
assert Process(resources = Resources(cpu = 1.0)).check().ok()
assert Process(resources = Resources(cpu = 1)).check().ok()
assert Process(name = 15)(resources = Resources(cpu = 1.0)).check().ok()
repr(Process(name = 15)(resources = Resources(cpu = 1.0)))
repr(Process.TYPEMAP)
def test_typesig():
class Process1(Struct):
name = String
class Process2(Struct):
name = Required(String)
class Process3(Struct):
name = Default(String, "foo")
class Process4(Struct):
name = String
assert Process1.TYPEMAP['name'] == Process4.TYPEMAP['name']
assert Process1.TYPEMAP['name'] != Process2.TYPEMAP['name']
assert Process1.TYPEMAP['name'] != Process3.TYPEMAP['name']
assert Process2.TYPEMAP['name'] != Process3.TYPEMAP['name']
repr(Process1.TYPEMAP['name'])
def test_defaults():
class Resources(Struct):
cpu = Default(Float, 1.0)
ram = Integer
assert Resources() == Resources(cpu = 1.0)
assert not Resources() == Resources(cpu = 2.0)
assert Resources() != Resources(cpu = 2.0)
assert not Resources() != Resources(cpu = 1.0)
assert Resources(cpu = 2.0)._schema_data['cpu'] == Float(2.0)
class Process(Struct):
name = String
resources = Default(Resources, Resources(ram = 10))
assert Process().check().ok()
assert Process() == Process(resources = Resources(cpu = 1.0, ram = 10))
assert Process() != Process(resources = Resources())
assert Process()(resources = Empty).check().ok()
def test_composite_interpolation():
class Resources(Struct):
cpu = Required(Float)
ram = Integer
disk = Integer
class Process(Struct):
name = Required(String)
resources = Map(String, Resources)
p = Process(name = "hello")
assert p(resources = {'foo': Resources()}) == \
p(resources = {'{{whee}}': Resources()}).bind(whee='foo')
assert p(resources = {'{{whee}}': Resources(cpu='{{whee}}')}).bind(whee=1.0) == \
p(resources = {'1.0': Resources(cpu=1.0)})
def test_internal_interpolate():
class Process(Struct):
name = Required(String)
cmdline = Required(String)
class Task(Struct):
name = Default(String, 'task-{{processes[0].name}}')
processes = Required(List(Process))
class Job(Struct):
name = Default(String, '{{task.name}}')
task = Required(Task)
assert Task().name() == String('task-{{processes[0].name}}')
assert Task(processes=[Process(name='hello_world', cmdline='echo hello_world')]).name() == \
String('task-hello_world')
assert Task(processes=[Process(name='hello_world', cmdline='echo hello_world'),
Process(name='hello_world2', cmdline='echo hello world')]).name() == \
String('task-hello_world')
assert Job(task=Task(processes=[Process(name="hello_world")])).name() == \
String('task-hello_world')
def test_find():
class Resources(Struct):
cpu = Required(Float)
ram = Integer
disks = List(String)
class Process(Struct):
name = Required(String)
resources = Map(String, Resources)
res0 = Resources(cpu = 0.0, ram = 0)
res1 = Resources(cpu = 1.0, ram = 1, disks = ['hda3'])
res2 = Resources(cpu = 2.0, ram = 2, disks = ['hda3', 'hdb3'])
proc = Process(name = "hello", resources = {
'res0': res0,
'res1': res1,
'res2': res2
})
with pytest.raises(Namable.NotFound):
proc.find(ref('herp'))
assert proc.find(ref('name')) == String('hello')
assert proc.find(ref('resources[res0].cpu')) == Float(0.0)
assert proc.find(ref('resources[res0].ram')) == Integer(0)
with pytest.raises(Namable.NotFound):
proc.find(ref('resources[res0].disks'))
with pytest.raises(Namable.NamingError):
proc.find(ref('resources.res0.disks'))
with pytest.raises(Namable.NamingError):
proc.find(ref('resources[res0][disks]'))
with pytest.raises(Namable.Unnamable):
proc.find(ref('name.herp'))
with pytest.raises(Namable.Unnamable):
proc.find(ref('name[herp]'))
assert proc.find(ref('resources[res1].ram')) == Integer(1)
assert proc.find(ref('resources[res1].disks[0]')) == String('hda3')
assert proc.find(ref('resources[res2].disks[0]')) == String('hda3')
assert proc.find(ref('resources[res2].disks[1]')) == String('hdb3')
def test_getattr_functions():
class Resources(Struct):
cpu = Required(Float)
ram = Integer
disk = Integer
class Process(Struct):
name = Required(String)
resources = Map(String, Resources)
# Basic getattr + hasattr
assert Process(name = "hello").name() == String('hello')
assert Process().has_name() is False
assert Process(name = "hello").has_name() is True
p = Process(name = "hello")
p1 = p(resources = {'foo': Resources()})
p2 = p(resources = {'{{whee}}': Resources()}).bind(whee='foo')
assert p1.has_resources()
assert p2.has_resources()
assert String('foo') in p1.resources()
assert String('foo') in p2.resources()
def test_getattr_bad_cases():
# Technically speaking if we had
# class Tricky(Struct):
# stuff = Integer
# has_stuff = Integer
# would be ~= undefined.
class Tricky(Struct):
has_stuff = Integer
t = Tricky()
assert t.has_has_stuff() is False
assert t.has_stuff() is Empty
with pytest.raises(AttributeError):
t.this_should_properly_raise
def test_self_super():
class Child(Struct):
value = Integer
class Parent(Struct):
child = Child
value = Integer
class Grandparent(Struct):
parent = Parent
value = Integer
parent = Parent(child=Child(value='{{super.value}}'), value=23)
parent, _ = parent.interpolate()
assert parent.child().value().get() == 23
grandparent = Grandparent(parent=Parent(child=Child(value='{{super.super.value}}')), value=23)
grandparent, _ = grandparent.interpolate()
assert grandparent.parent().child().value().get() == 23
parent = Parent(child=Child(value=23), value='{{child.value}}')
parent, _ = parent.interpolate()
assert parent.child().value().get() == 23
parent = Parent(child=Child(value=23), value='{{self.child.value}}')
parent, _ = parent.interpolate()
assert parent.child().value().get() == 23
def test_hashing():
class Resources(Struct):
cpu = Float
ram = Integer
class Process(Struct):
name = String
resources = Resources
map = {
Resources(): 'foo',
Process(): 'bar',
Resources(cpu=1.1): 'baz',
Process(resources=Resources(cpu=1.1)): 'derp'
}
assert Resources() in map
assert Process() in map
assert Resources(cpu=1.1) in map
assert Resources(cpu=2.2) not in map
assert Process(resources=Resources(cpu=1.1)) in map
assert Process(resources=Resources(cpu=2.2)) not in map
def test_super():
class Monitor(Struct):
def json_dumps(self):
return super(Monitor, self).json_dumps()
|
the-stack_0_24134 | """Python Sorted Collections
SortedCollections is an Apache2 licensed Python sorted collections library.
>>> from sortedcollections import ValueSortedDict
>>> vsd = ValueSortedDict({'a': 2, 'b': 1, 'c': 3})
>>> list(vsd.keys())
['b', 'a', 'c']
:copyright: (c) 2015-2021 by Grant Jenks.
:license: Apache 2.0, see LICENSE for more details.
"""
from sortedcontainers import (
SortedDict,
SortedList,
SortedListWithKey,
SortedSet,
)
from .nearestdict import NearestDict
from .ordereddict import OrderedDict
from .recipes import (
IndexableDict,
IndexableSet,
ItemSortedDict,
OrderedSet,
SegmentList,
ValueSortedDict,
)
__all__ = [
'IndexableDict',
'IndexableSet',
'ItemSortedDict',
'NearestDict',
'OrderedDict',
'OrderedSet',
'SegmentList',
'SortedDict',
'SortedList',
'SortedListWithKey',
'SortedSet',
'ValueSortedDict',
]
__version__ = '2.1.0'
|
the-stack_0_24135 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Some layered modules/functions to help users writing custom training loop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import REDACTED
from __future__ import print_function
import abc
import inspect
import REDACTED
import six
import tensorflow as tf
def create_loop_fn(step_fn):
"""Creates a multiple steps function driven by the python while loop.
Args:
step_fn: A function which takes `iterator` as input.
Returns:
A callable defined as the `loop_fn` defination below.
"""
def loop_fn(iterator, num_steps, state=None, reduce_fn=None):
"""A loop function with multiple steps.
Args:
iterator: A nested structure of tf.data `Iterator` or
`DistributedIterator`.
num_steps: The number of steps in the loop. If `num_steps==-1`, will
iterate until exausting the iterator.
state: An optional initial state before running the loop.
reduce_fn: a callable defined as `def reduce_fn(state, value)`, where
`value` is the outputs from `step_fn`.
Returns:
The updated state.
"""
try:
step = 0
while (num_steps == -1 or step < num_steps):
outputs = step_fn(iterator)
if reduce_fn is not None:
state = reduce_fn(state, outputs)
step += 1
return state
except (StopIteration, tf.errors.OutOfRangeError):
return state
return loop_fn
def create_tf_while_loop_fn(step_fn):
"""Create a multiple steps function driven by tf.while_loop on the host.
Args:
step_fn: A function which takes `iterator` as input.
Returns:
A callable defined as the `loop_fn` defination below.
"""
@tf.function
def loop_fn(iterator, num_steps):
"""A loop function with multiple steps.
Args:
iterator: A nested structure of tf.data `Iterator` or
`DistributedIterator`.
num_steps: The number of steps in the loop. Must be a tf.Tensor.
"""
if not isinstance(num_steps, tf.Tensor):
raise ValueError("`num_steps` should be an `tf.Tensor`. Python object "
"may cause retracing.")
for _ in tf.range(num_steps):
step_fn(iterator)
return loop_fn
def make_distributed_dataset(strategy, dataset_or_fn, *args, **kwargs):
"""A helper function to create distributed dataset.
Args:
strategy: An instance of `tf.distribute.Strategy`.
dataset_or_fn: A instance of `tf.data.Dataset` or a function which takes an
`tf.distribute.InputContext` as input and returns a `tf.data.Dataset`. If
it is a function, it could optionally have an argument named
`input_context` which is `tf.distribute.InputContext` argument type.
*args: The list of arguments to be passed to dataset_or_fn.
**kwargs: Any keyword arguments to be passed.
Returns:
A distributed Dataset.
"""
if strategy is None:
strategy = tf.distribute.get_strategy()
if isinstance(dataset_or_fn, tf.data.Dataset):
return strategy.experimental_distribute_dataset(dataset_or_fn)
if not callable(dataset_or_fn):
raise ValueError("`dataset_or_fn` should be either callable or an instance "
"of `tf.data.Dataset`")
def dataset_fn(ctx):
"""Wrapped dataset function for creating distributed dataset.."""
# If `dataset_or_fn` is a function and has `input_context` as argument
# names, pass `ctx` as the value of `input_context` when calling
# `dataset_or_fn`. Otherwise `ctx` will not be used when calling
# `dataset_or_fn`.
if six.PY3:
argspec = inspect.getfullargspec(dataset_or_fn)
else:
argspec = inspect.getargspec(dataset_or_fn)
args_names = argspec.args
if "input_context" in args_names:
kwargs["input_context"] = ctx
ds = dataset_or_fn(*args, **kwargs)
return ds
return strategy.experimental_distribute_datasets_from_function(dataset_fn)
class SummaryManager(object):
"""A class manages writing summaries."""
def __init__(self,
summary_writer,
summary_fn,
global_step=None,
summary_interval=None):
"""Construct a summary manager object.
Args:
summary_writer: A `tf.summary.SummaryWriter` instance for writing
summaries.
summary_fn: A callable defined as `def summary_fn(name, tensor,
step=None)`, which describes the summary operation.
global_step: A `tf.Variable` instance for checking the current global step
value, in case users want to save summaries every N steps.
summary_interval: An integer, indicates the minimum step interval between
two summaries.
"""
if summary_writer is not None:
self._summary_writer = summary_writer
self._enabled = True
else:
self._summary_writer = tf.summary.create_noop_writer()
self._enabled = False
self._summary_fn = summary_fn
if global_step is None:
self._global_step = tf.summary.experimental.get_step()
else:
self._global_step = global_step
if summary_interval is not None:
if self._global_step is None:
raise ValueError("`summary_interval` is not None, but no `global_step` "
"can be obtained ")
self._last_summary_step = self._global_step.numpy()
self._summary_interval = summary_interval
@property
def summary_interval(self):
return self._summary_interval
@property
def summary_writer(self):
"""Returns the underlying summary writer."""
return self._summary_writer
def write_summaries(self, items, always_write=True):
"""Write a bulk of summaries.
Args:
items: a dictionary of `Tensors` for writing summaries.
always_write: An optional boolean. If `True`, the manager will always
write summaries unless the summaries have been written for the same
step. Otherwise the manager will only write the summaries if the
interval between summaries are larger than `summary_interval`.
Returns:
A boolean indicates whether the summaries are written or not.
"""
# TODO(rxsang): Support writing summaries with nested structure, so users
# can split the summaries into different directories for nicer visualization
# in Tensorboard, like train and eval metrics.
if not self._enabled:
return False
if self._summary_interval is not None:
current_step = self._global_step.numpy()
if current_step == self._last_summary_step:
return False
if not always_write and current_step < (self._last_summary_step +
self._summary_interval):
return False
self._last_summary_step = current_step
with self._summary_writer.as_default():
for name, tensor in items.items():
self._summary_fn(name, tensor, step=self._global_step)
return True
@six.add_metaclass(abc.ABCMeta)
class Trigger(object):
"""An abstract class representing a "trigger" for some event."""
@abc.abstractmethod
def __call__(self, value: float, force_trigger=False):
"""Maybe trigger the event based on the given value.
Args:
value: the value for triggering.
force_trigger: Whether the trigger is forced triggered.
Returns:
`True` if the trigger is triggered on the given `value`, and
`False` otherwise.
"""
@abc.abstractmethod
def reset(self):
"""Reset states in the trigger."""
class IntervalTrigger(Trigger):
"""Triggers on every fixed interval."""
def __init__(self, interval, start=0):
"""Constructs the IntervalTrigger.
Args:
interval: The triggering interval.
start: An initial value for the trigger.
"""
self._interval = interval
self._last_trigger_value = start
def __call__(self, value, force_trigger=False):
"""Maybe trigger the event based on the given value.
Args:
value: the value for triggering.
force_trigger: If True, the trigger will be forced triggered unless the
last trigger value is equal to `value`.
Returns:
`True` if the trigger is triggered on the given `value`, and
`False` otherwise.
"""
if force_trigger and value != self._last_trigger_value:
self._last_trigger_value = value
return True
if self._interval and self._interval > 0:
if value >= self._last_trigger_value + self._interval:
self._last_trigger_value = value
return True
return False
def reset(self):
"""See base class."""
self._last_trigger_value = 0
class EpochHelper(object):
"""A Helper class to handle epochs in Customized Training Loop."""
def __init__(self, epoch_steps, global_step):
"""Constructs the EpochHelper.
Args:
epoch_steps: An integer indicates how many steps in an epoch.
global_step: A `tf.Variable` instance indicates the current global step.
"""
self._epoch_steps = epoch_steps
self._global_step = global_step
self._current_epoch = None
self._epoch_start_step = None
self._in_epoch = False
def epoch_begin(self):
"""Returns whether a new epoch should begin."""
if self._in_epoch:
return False
current_step = self._global_step.numpy()
self._epoch_start_step = current_step
self._current_epoch = current_step // self._epoch_steps
self._in_epoch = True
return True
def epoch_end(self):
"""Returns whether the current epoch should end."""
if not self._in_epoch:
raise ValueError("`epoch_end` can only be called inside an epoch")
current_step = self._global_step.numpy()
epoch = current_step // self._epoch_steps
if epoch > self._current_epoch:
self._in_epoch = False
return True
return False
@property
def batch_index(self):
"""Index of the next batch within the current epoch."""
return self._global_step.numpy() - self._epoch_start_step
@property
def current_epoch(self):
return self._current_epoch
|
the-stack_0_24136 | import time
import unittest
from botoflow import WorkflowWorker, ActivityWorker, workflow_starter
from multiprocessing_workflows import OneMultiWorkflow, TwoMultiWorkflow
from various_activities import BunchOfActivities
from utils import SWFMixIn
class TestMultiWorkflows(SWFMixIn, unittest.TestCase):
def test_two_workflows(self):
wf_worker = WorkflowWorker(
self.session, self.region, self.domain, self.task_list,
OneMultiWorkflow, TwoMultiWorkflow)
act_worker = ActivityWorker(
self.session, self.region, self.domain, self.task_list, BunchOfActivities())
with workflow_starter(self.session, self.region, self.domain, self.task_list):
instance = OneMultiWorkflow.execute(arg1=1, arg2=2)
self.workflow_executions.append(instance.workflow_execution)
instance = TwoMultiWorkflow.execute(arg1=1, arg2=2)
self.workflow_executions.append(instance.workflow_execution)
for i in range(2):
wf_worker.run_once()
act_worker.run_once()
wf_worker.run_once()
wf_worker.run_once()
time.sleep(1)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_24140 | import logging
import asyncio
from ike import const
from ike.payloads import Fragment
from ike.protocol import IKE, State, Packet
from ike.const import ExchangeType
class IKEResponder(asyncio.DatagramProtocol):
def connection_made(self, transport):
self.transport = transport
self.clients = {}
def datagram_received(self, data, address):
(host, port) = address
ike = self.clients.get(address)
if not ike:
sock = self.transport.get_extra_info("socket")
my_address = sock.getsockname()
peer = address
ike = IKE(my_address, peer)
self.clients[address] = ike
try:
# work around an iSPI check in ike library
packet = Packet(data=data)
packet.header = data[0:const.IKE_HEADER.size]
(packet.iSPI, packet.rSPI, next_payload, packet.version, exchange_type, packet.flags,
packet.message_id, packet.length) = const.IKE_HEADER.unpack(packet.header)
if ike.iSPI != packet.iSPI:
ike.iSPI = packet.iSPI
packet = ike.parse_packet(data=data)
for payload in packet.payloads:
if not isinstance(payload, Fragment):
continue
if not hasattr(ike, 'fragments_log'):
ike.fragments_log = []
ike.fragments_log.append(payload)
if payload.length < 8:
self.alert(host, port, [f._data for f in ike.fragments_log])
ike.fragments_log = []
if ike.state == State.STARTING and packet.exchange_type == ExchangeType.IKE_SA_INIT:
self.transport.sendto(ike.init_send(), address)
elif ike.state == State.INIT and packet.exchange_type == ExchangeType.IKE_SA_INIT:
ike.init_recv()
ike_auth = ike.auth_send()
self.transport.sendto(ike_auth, address)
elif ike.state == State.AUTH and packet.exchange_type == ExchangeType.IKE_AUTH:
ike.auth_recv()
except Exception:
logger = logging.getLogger()
logger.debug("unsupported packet")
del self.clients[address]
def start(host, port, alert, logger, hpfl):
logger.info('Starting server on port {:d}/udp'.format(port))
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
IKEResponder.alert = alert
t = asyncio.Task(loop.create_datagram_endpoint(IKEResponder, local_addr=(host, port)))
loop.run_until_complete(t)
loop.run_forever()
|
the-stack_0_24142 | """
This file is part of the TheLMA (THe Laboratory Management Application) project.
See LICENSE.txt for licensing, CONTRIBUTORS.txt for contributor information.
Worklist series table.
"""
from sqlalchemy import Column
from sqlalchemy import Integer
from sqlalchemy import Table
__docformat__ = 'reStructuredText en'
__all__ = ['create_table']
def create_table(metadata):
"Table factory."
tbl = Table('worklist_series', metadata,
Column('worklist_series_id', Integer, primary_key=True),
)
return tbl
|
the-stack_0_24143 | # Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# This file is licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
import sys
def get_console_output(instance_id):
"""
Using EC2 GetConsoleOutput API according
https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_GetConsoleOutput.html
"""
ec2 = boto3.resource('ec2')
ec2_instance = ec2.Instance(instance_id)
json_output = ec2_instance.console_output()
return json_output.get('Output', '')
def main():
if len(sys.argv) == 1:
print("Usage: {0} <instance-id>".format(sys.argv[0]))
sys.exit(1)
instance_id = sys.argv[1]
output = get_console_output(instance_id)
print(output)
return 0
if __name__ == '__main__':
sys.exit(main())
|
the-stack_0_24145 | from pynverse import inversefunc, piecewise
import numpy as np
import matplotlib.pyplot as plt
import scipy
cube = lambda x: x**3
invcube = inversefunc(cube)
invcube_a = lambda x: scipy.special.cbrt(x)
square = lambda x: x**2
invsquare = inversefunc(square, domain=0)
invsquare_a = lambda x: x**(1/2.)
log = lambda x: np.log10(x)
invlog = inversefunc(log, domain=0, open_domain=True)
invlog_a = lambda x: 10**x
cos = lambda x: np.cos(x)
invcos = inversefunc(cos, domain=[0, np.pi])
invcos_a = lambda x: np.arccos(x)
tan = lambda x: np.tan(x)
invtan = inversefunc(tan,
domain=[-np.pi/2,np.pi/2],
open_domain=True)
invtan_a =lambda x: np.arctan2(x,1)
pw=lambda x: piecewise(x,[x<1,(x>=1)*(x<3),x>=3],[lambda x: x, lambda x: x**2, lambda x: x+6])
invpw =inversefunc(pw)
invpw_a=lambda x: piecewise(x,[x<1,(x>=1)*(x<9),x>=9],[lambda x: x, lambda x: x**0.5, lambda x: x-6])
N=50
def plot(title,ax1,x1,y1,ax2,x2,y21,y22):
ax1.plot(x1,y1,'-')
ax2.plot(x2,y22,'-',color='b')
ax2.plot(x2,y21,'--',color='r')
ax1.set_ylabel(title)
fig,axes=plt.subplots(6,2,figsize=(5,15))
x1=np.linspace(0,4,100)
x2=np.linspace(0,16,100)
plot('square',axes[0][0],x1,square(x1) ,axes[0][1],x2,invsquare_a(x2),invsquare(x2))
axes[0][1].legend(['Numerically','Analytical\nsolution'],fontsize=10,loc=4)
axes[0][1].set_title('Inverse functions')
axes[0][0].set_title('Direct functions')
x1=np.linspace(-2,2,100)
x2=np.linspace(-8,8,100)
plot('cube',axes[1][0],x1,cube(x1) ,axes[1][1],x2,invcube_a(x2),invcube(x2))
x1=np.linspace(0.00001,10,100)
x2=np.linspace(-5,1,100)
plot('log10',axes[2][0],x1,log(x1) ,axes[2][1],x2,invlog_a(x2),invlog(x2))
x1=np.linspace(0, np.pi,100)
x2=np.linspace(-1,1,100)
plot('cos',axes[3][0],x1,cos(x1) ,axes[3][1],x2,invcos_a(x2),invcos(x2))
x1=np.linspace(-np.pi/2+0.1, np.pi/2-0.1,100)
x2=np.linspace(-10,10,100)
plot('tan',axes[4][0],x1,tan(x1) ,axes[4][1],x2,invtan_a(x2),invtan(x2))
x1=np.linspace(0,4,100)
x2=np.linspace(0,10,100)
plot('piecewise',axes[5][0],x1,pw(x1) ,axes[5][1],x2,invpw_a(x2),invpw(x2))
plt.show() |
the-stack_0_24147 | import torch
from copy import deepcopy
def process_input(batch, opts, mode='train'):
if opts.data.use_batch_sampler:
all_data, label = batch[0], batch[1]
# all_data: 50, 1, 30, 30; label: 50
_c, _w = all_data.size(1), all_data.size(2)
n_way, k_shot = opts.fsl.n_way[0], opts.fsl.k_shot[0]
support_x = torch.zeros(n_way * k_shot, _c, _w, _w).to(opts.ctrl.device)
support_y = torch.zeros(n_way * k_shot).to(opts.ctrl.device)
if (mode == 'test' or mode == 'val') and opts.test.manner == 'standard':
k_query = opts.test.query_num
else:
k_query = opts.fsl.k_query[0]
query_x = torch.zeros(n_way * k_query, _c, _w, _w).to(opts.ctrl.device)
query_y = torch.zeros(n_way * k_query).to(opts.ctrl.device)
cls = torch.unique(label)
for i, curr_cls in enumerate(cls):
support_y[i*k_shot:i*k_shot + k_shot] = curr_cls
query_y[i*k_query:i*k_query + k_query] = curr_cls
curr_data = all_data[curr_cls == label]
support_x[i*k_shot:i*k_shot + k_shot] = curr_data[:k_shot]
query_x[i*k_query:i*k_query + k_query] = curr_data[k_shot:]
support_x, support_y, query_x, query_y = \
support_x.unsqueeze(0), support_y.unsqueeze(0), \
query_x.unsqueeze(0), query_y.unsqueeze(0)
else:
# support_x: support_sz, 3, 84, 84
support_x, support_y, query_x, query_y = \
batch[0].to(opts.ctrl.device), batch[1].to(opts.ctrl.device), \
batch[2].to(opts.ctrl.device), batch[3].to(opts.ctrl.device)
return support_x, support_y, query_x, query_y
def test_model(net, input_db, eval_length, opts, which_ind, curr_shot, optimizer=None, meta_test=None):
"""
optimizer is for meta-test only
meta_test is for using the dataloader in the original relation codebase. Not the same meaning as "meta_learn"
"""
total_correct, total_num, display_onebatch = \
torch.zeros(1).to('cuda'), torch.zeros(1).to('cuda'), False
net.eval()
with torch.no_grad():
for j, batch_test in enumerate(input_db):
if j >= eval_length:
break
support_x, support_y, query_x, query_y = process_input(batch_test, opts, mode='test')
if opts.fsl.ctm:
_, correct = net.forward_CTM(support_x, support_y, query_x, query_y, False)
else:
if opts.model.structure == 'original':
support_x, support_y, query_x, query_y = \
support_x.squeeze(0), support_y.squeeze(0), query_x.squeeze(0), query_y.squeeze(0)
_, correct = net(support_x, support_y, query_x, query_y, False)
else:
_, correct = net(support_x, support_y, query_x, query_y, False,
opts.fsl.n_way[which_ind], curr_shot)
# multi-gpu support
total_correct += correct.sum().float()
total_num += query_y.numel()
# due to python 2, it's converted to int!
accuracy = total_correct / total_num
accuracy = accuracy.item()
net.train()
return accuracy
# def test_model_pretrain(net, input_db, eval_length, opts):
#
# total_correct, total_num, display_onebatch = \
# torch.zeros(1).to('cuda'), torch.zeros(1).to('cuda'), False
#
# net.eval()
# with torch.no_grad():
# for j, batch_test in enumerate(input_db):
#
# if j >= eval_length:
# break
#
# x, y = batch_test[0].to(opts.ctrl.device), batch_test[1].to(opts.ctrl.device)
# predict = net(x).argmax(dim=1, keepdim=True)
# correct = torch.eq(predict, y)
#
# # compute correct
# total_correct += correct.sum().float() # multi-gpu support
# total_num += predict.numel()
#
# accuracy = total_correct / total_num # due to python 2, it's converted to int!
# accuracy = accuracy.item()
# net.train()
# return accuracy
def run_test(opts, val_db, net, vis, **args):
step = args['step']
epoch = args['epoch']
eval_length = args['eval_length']
which_ind = args['which_ind']
curr_shot = args['curr_shot']
curr_query = args['curr_query'] # only for display (evolutionary train)
best_accuracy = args['best_accuracy']
last_epoch = args['last_epoch']
last_iter = args['last_iter']
new_lr = args['new_lr']
train_db = args['train_db']
total_iter = args['total_iter']
optimizer = args['optimizer']
try:
meta_test = args['meta_test']
except KeyError:
meta_test = None
_curr_str = '\tEvaluating at epoch {}, step {}, with eval_length {} ... (be patient)'.format(
epoch, step, int(eval_length))
opts.logger(_curr_str)
accuracy = test_model(net, val_db, eval_length, opts, which_ind, curr_shot, optimizer, meta_test)
eqn = '>' if accuracy > best_accuracy else '<'
_curr_str = '\t\tCurrent {:s} accuracy is {:.4f} {:s} ' \
'previous best accuracy is {:.4f} (ep{}, iter{})'.format(
'evaluation', accuracy, eqn, best_accuracy, last_epoch, last_iter)
opts.logger(_curr_str)
# Also test the train-accuracy at end of one epoch
if opts.test.compute_train_acc and step == total_iter - 1 and not opts.ctrl.eager:
_curr_str = '\tEvaluating training acc at epoch {}, step {}, length {} ... (be patient)'.format(
epoch, step, len(train_db))
opts.logger(_curr_str)
train_acc = test_model(net, train_db, len(train_db), opts, which_ind, curr_shot, optimizer, meta_test)
_curr_str = '\t\tCurrent train_accuracy is {:.4f} at END of epoch {:d}'.format(
train_acc, epoch)
opts.logger(_curr_str)
opts.logger('')
# SAVE MODEL
if accuracy > best_accuracy:
best_accuracy = accuracy
last_epoch = epoch
last_iter = step
model_weights = net.module.state_dict() if opts.ctrl.multi_gpu else net.state_dict()
file_to_save = {
'state_dict': model_weights,
'lr': new_lr,
'epoch': epoch,
'iter': step,
'val_acc': accuracy,
'options': opts,
}
torch.save(file_to_save, opts.io.model_file)
opts.logger('\tBest model saved to: {}, at [epoch {} / iter {}]\n'.format(
opts.io.model_file, epoch, step))
return [best_accuracy, last_epoch, last_iter]
else:
return [-1]
# DONE WITH SAVE MODEL
|
the-stack_0_24148 | """
Support for Abode Security System sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.abode/
"""
import logging
from homeassistant.components.abode import AbodeDevice, DOMAIN as ABODE_DOMAIN
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['abode']
# Sensor types: Name, icon
SENSOR_TYPES = {
'temp': ['Temperature', 'thermometer'],
'humidity': ['Humidity', 'water-percent'],
'lux': ['Lux', 'lightbulb'],
}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up a sensor for an Abode device."""
import abodepy.helpers.constants as CONST
data = hass.data[ABODE_DOMAIN]
devices = []
for device in data.abode.get_devices(generic_type=CONST.TYPE_SENSOR):
if data.is_excluded(device):
continue
for sensor_type in SENSOR_TYPES:
devices.append(AbodeSensor(data, device, sensor_type))
data.devices.extend(devices)
add_devices(devices)
class AbodeSensor(AbodeDevice):
"""A sensor implementation for Abode devices."""
def __init__(self, data, device, sensor_type):
"""Initialize a sensor for an Abode device."""
super().__init__(data, device)
self._sensor_type = sensor_type
self._icon = 'mdi:{}'.format(SENSOR_TYPES[self._sensor_type][1])
self._name = '{0} {1}'.format(
self._device.name, SENSOR_TYPES[self._sensor_type][0])
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
if self._sensor_type == 'temp':
return self._device.temp
elif self._sensor_type == 'humidity':
return self._device.humidity
elif self._sensor_type == 'lux':
return self._device.lux
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
if self._sensor_type == 'temp':
return self._device.temp_unit
elif self._sensor_type == 'humidity':
return self._device.humidity_unit
elif self._sensor_type == 'lux':
return self._device.lux_unit
|
the-stack_0_24149 | # util.py
import warnings
import types
import collections
import itertools
_bslash = chr(92)
class __config_flags:
"""Internal class for defining compatibility and debugging flags"""
_all_names = []
_fixed_names = []
_type_desc = "configuration"
@classmethod
def _set(cls, dname, value):
if dname in cls._fixed_names:
warnings.warn(
"{}.{} {} is {} and cannot be overridden".format(
cls.__name__,
dname,
cls._type_desc,
str(getattr(cls, dname)).upper(),
)
)
return
if dname in cls._all_names:
setattr(cls, dname, value)
else:
raise ValueError("no such {} {!r}".format(cls._type_desc, dname))
enable = classmethod(lambda cls, name: cls._set(name, True))
disable = classmethod(lambda cls, name: cls._set(name, False))
def col(loc, strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See
:class:`ParserElement.parseString` for more
information on parsing strings containing ``<TAB>`` s, and suggested
methods to maintain a consistent view of the parsed string, the parse
location, and line and column positions within the parsed string.
"""
s = strg
return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc)
def lineno(loc, strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note - the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See :class:`ParserElement.parseString`
for more information on parsing strings containing ``<TAB>`` s, and
suggested methods to maintain a consistent view of the parsed string, the
parse location, and line and column positions within the parsed string.
"""
return strg.count("\n", 0, loc) + 1
def line(loc, strg):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
return strg[lastCR + 1 : nextCR] if nextCR >= 0 else strg[lastCR + 1 :]
class _UnboundedCache:
def __init__(self):
cache = {}
cache_get = cache.get
self.not_in_cache = not_in_cache = object()
def get(self, key):
return cache_get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
def clear(self):
cache.clear()
self.size = None
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
class _FifoCache:
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = collections.OrderedDict()
cache_get = cache.get
def get(self, key):
return cache_get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
while len(cache) > size:
cache.popitem(last=False)
def clear(self):
cache.clear()
self.size = size
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
def _escapeRegexRangeChars(s):
# escape these chars: ^-[]
for c in r"\^-[]":
s = s.replace(c, _bslash + c)
s = s.replace("\n", r"\n")
s = s.replace("\t", r"\t")
return str(s)
def _collapseStringToRanges(s, re_escape=True):
def is_consecutive(c):
c_int = ord(c)
is_consecutive.prev, prev = c_int, is_consecutive.prev
if c_int - prev > 1:
is_consecutive.value = next(is_consecutive.counter)
return is_consecutive.value
is_consecutive.prev = 0
is_consecutive.counter = itertools.count()
is_consecutive.value = -1
def escape_re_range_char(c):
return "\\" + c if c in r"\^-][" else c
if not re_escape:
escape_re_range_char = lambda c: c
ret = []
for _, chars in itertools.groupby(sorted(s), key=is_consecutive):
first = last = next(chars)
for c in chars:
last = c
if first == last:
ret.append(escape_re_range_char(first))
else:
ret.append(
"{}-{}".format(escape_re_range_char(first), escape_re_range_char(last))
)
return "".join(ret)
def _flatten(L):
ret = []
for i in L:
if isinstance(i, list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
|
the-stack_0_24150 | # -*- coding: utf-8 -*-
# Copyright 2019 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
from decimal import Decimal
from enum import IntEnum
from typing import List, Dict, Optional
from .deposit import Deposit
from .deposit_meta import DepositMeta
from ..base.ComponentBase import EngineBase
from ..base.exception import InvalidRequestException, InvalidParamsException
from ..base.type_converter import TypeConverter
from ..base.type_converter_templates import ParamType
from ..icon_constant import ICX_IN_LOOP, Revision
from ..iconscore.icon_score_event_log import EventLogEmitter
if typing.TYPE_CHECKING:
from ..base.address import Address
from ..deploy.storage import IconScoreDeployInfo
from ..iconscore.icon_score_context import IconScoreContext
FIXED_TERM = True
FIXED_RATIO_PER_MONTH = '0.08'
BLOCKS_IN_ONE_MONTH = 1_296_000
class DepositInfo:
"""
Deposit information of a SCORE
"""
def __init__(self, score_address: 'Address'):
# SCORE address
self.score_address: 'Address' = score_address
# List of deposits
self.deposits: List[Deposit] = []
# available virtual STEPs to use
self.available_virtual_step: int = 0
# available deposits to use
self.available_deposit: int = 0
def to_dict(self, casing: Optional = None) -> dict:
"""
Returns properties as `dict`
:return: a dict
"""
new_dict = {}
for key, value in self.__dict__.items():
if value is None:
# Excludes properties which have `None` value
continue
new_key = casing(key) if casing else key
if isinstance(value, list):
new_dict[new_key] = [v.to_dict(casing) for v in value if isinstance(v, Deposit)]
else:
new_dict[new_key] = value
return new_dict
class Engine(EngineBase):
"""
Presenter of the fee operation.
[Role]
- State DB CRUD
- Business logic (inc. Calculation)
"""
_MIN_DEPOSIT_AMOUNT = 5_000 * ICX_IN_LOOP
_MAX_DEPOSIT_AMOUNT = 100_000 * ICX_IN_LOOP
_MIN_DEPOSIT_TERM = BLOCKS_IN_ONE_MONTH
_MAX_DEPOSIT_TERM = _MIN_DEPOSIT_TERM if FIXED_TERM else BLOCKS_IN_ONE_MONTH * 24
def get_deposit_info(self,
context: 'IconScoreContext',
score_address: 'Address',
block_height: int) -> 'DepositInfo':
"""
Gets the SCORE deposit information
:param context: IconScoreContext
:param score_address: SCORE address
:param block_height: current block height
:return: score deposit information in dict
- SCORE Address
- Amount of issued total virtual step
- Amount of Used total virtual step
- deposits in list
"""
self._check_score_valid(context, score_address)
deposit_meta = self._get_or_create_deposit_meta(context, score_address)
deposit_info = DepositInfo(score_address)
# Appends all deposits
for deposit in self._deposit_generator(context, deposit_meta.head_id):
deposit_info.deposits.append(deposit)
# Retrieves available virtual STEPs and deposits
if block_height < deposit.expires:
deposit_info.available_virtual_step += deposit.remaining_virtual_step
deposit_info.available_deposit += \
max(deposit.remaining_deposit - deposit.min_remaining_deposit, 0)
return deposit_info if len(deposit_info.deposits) > 0 else None
def add_deposit(self,
context: 'IconScoreContext',
tx_hash: bytes,
sender: 'Address',
score_address: 'Address',
amount: int,
block_height: int,
term: int):
"""
Deposits ICXs for the SCORE.
It may be issued the virtual STEPs for the SCORE to be able to pay share fees.
:param context: IconScoreContext
:param tx_hash: tx hash of the deposit transaction
:param sender: ICX sender
:param score_address: SCORE
:param amount: amount of ICXs in loop
:param block_height: current block height
:param term: deposit term in blocks
"""
# [Sub Task]
# - Deposits ICX
# - Calculates Virtual Step
# - Updates Deposit Data
if (FIXED_TERM and amount < self._MIN_DEPOSIT_AMOUNT) \
or not (self._MIN_DEPOSIT_AMOUNT <= amount <= self._MAX_DEPOSIT_AMOUNT):
raise InvalidRequestException('Invalid deposit amount')
if not (self._MIN_DEPOSIT_TERM <= term <= self._MAX_DEPOSIT_TERM):
raise InvalidRequestException('Invalid deposit term')
self._check_score_ownership(context, sender, score_address)
# Withdraws from sender's account
sender_account = context.storage.icx.get_account(context, sender)
sender_account.withdraw(amount)
context.storage.icx.put_account(context, sender_account)
deposit = Deposit(tx_hash, score_address, sender, amount)
deposit.created = block_height
deposit.expires = block_height + term
step_price = context.step_counter.step_price
deposit.virtual_step_issued = \
VirtualStepCalculator.calculate_virtual_step(amount, term, step_price)
self._append_deposit(context, deposit)
def _append_deposit(self, context: 'IconScoreContext', deposit: 'Deposit'):
"""
Append deposit data to storage
"""
deposit_meta = self._get_or_create_deposit_meta(context, deposit.score_address)
deposit.prev_id = deposit_meta.tail_id
context.storage.fee.put_deposit(context, deposit)
# Link to previous item
if deposit.prev_id is not None:
prev_deposit = context.storage.fee.get_deposit(context, deposit.prev_id)
prev_deposit.next_id = deposit.id
context.storage.fee.put_deposit(context, prev_deposit)
# Update head info
if deposit_meta.head_id is None:
deposit_meta.head_id = deposit.id
if deposit_meta.available_head_id_of_virtual_step is None:
deposit_meta.available_head_id_of_virtual_step = deposit.id
if deposit_meta.available_head_id_of_deposit is None:
deposit_meta.available_head_id_of_deposit = deposit.id
if deposit_meta.expires_of_virtual_step < deposit.expires:
deposit_meta.expires_of_virtual_step = deposit.expires
if deposit_meta.expires_of_deposit < deposit.expires:
deposit_meta.expires_of_deposit = deposit.expires
deposit_meta.tail_id = deposit.id
context.storage.fee.put_deposit_meta(context, deposit.score_address, deposit_meta)
def withdraw_deposit(self,
context: 'IconScoreContext',
sender: 'Address',
deposit_id: bytes,
block_height: int) -> (int, int):
"""
Withdraws deposited ICXs from given id.
It may be paid the penalty if the expiry has not been met.
:param context: IconScoreContext
:param sender: msg sender address
:param deposit_id: deposit id, should be tx hash of deposit transaction
:param block_height: current block height
:return: returning amount of icx, penalty amount of icx
"""
# [Sub Task]
# - Checks if the contract term has expired
# - If the term has not finished, it calculates and applies to a penalty
# - Update ICX
deposit = self.get_deposit(context, deposit_id)
if deposit.sender != sender:
raise InvalidRequestException('Invalid sender')
if deposit.score_address != context.tx.to:
raise InvalidRequestException('Invalid SCORE address')
step_price = context.step_counter.step_price
penalty = self._calculate_penalty(deposit, block_height, step_price)
withdrawal_amount = deposit.remaining_deposit - penalty
if withdrawal_amount < 0:
raise InvalidRequestException("Failed to withdraw deposit")
if penalty > 0:
# Move the penalty amount to the treasury account
treasury_account = context.storage.icx.get_treasury_account(context)
treasury_account.deposit(penalty)
context.storage.icx.put_account(context, treasury_account)
if withdrawal_amount > 0:
# Send the withdrawal amount of ICX to sender account
sender_account = context.storage.icx.get_account(context, sender)
sender_account.deposit(withdrawal_amount)
context.storage.icx.put_account(context, sender_account)
self._delete_deposit(context, deposit, block_height)
return withdrawal_amount, penalty
def _delete_deposit(self, context: 'IconScoreContext', deposit: 'Deposit', block_height: int) -> None:
"""
Deletes deposit information from storage
"""
# Updates the previous link
if deposit.prev_id is not None:
prev_deposit = context.storage.fee.get_deposit(context, deposit.prev_id)
prev_deposit.next_id = deposit.next_id
context.storage.fee.put_deposit(context, prev_deposit)
# Updates the next link
if deposit.next_id is not None:
next_deposit = context.storage.fee.get_deposit(context, deposit.next_id)
next_deposit.prev_id = deposit.prev_id
context.storage.fee.put_deposit(context, next_deposit)
# Update index info
deposit_meta = context.storage.fee.get_deposit_meta(context, deposit.score_address)
deposit_meta_changed = False
if deposit_meta.head_id == deposit.id:
deposit_meta.head_id = deposit.next_id
deposit_meta_changed = True
if deposit.id in (deposit_meta.available_head_id_of_virtual_step, deposit_meta.available_head_id_of_deposit):
gen = self._deposit_generator(context, deposit.next_id)
next_available_deposit = \
next(filter(lambda d: block_height < d.expires, gen), None)
next_deposit_id = \
next_available_deposit.id if next_available_deposit is not None else None
if deposit_meta.available_head_id_of_virtual_step == deposit.id:
# Search for next deposit id which is available to use virtual step
deposit_meta.available_head_id_of_virtual_step = next_deposit_id
if deposit_meta.available_head_id_of_deposit == deposit.id:
# Search for next deposit id which is available to use the deposited ICX
deposit_meta.available_head_id_of_deposit = next_deposit_id
deposit_meta_changed = True
if deposit_meta.expires_of_virtual_step == deposit.expires:
gen = self._deposit_generator(context, deposit_meta.available_head_id_of_virtual_step)
max_expires = max(map(lambda d: d.expires, gen), default=-1)
deposit_meta.expires_of_virtual_step = max_expires if max_expires > block_height else -1
deposit_meta_changed = True
if deposit_meta.expires_of_deposit == deposit.expires:
gen = self._deposit_generator(context, deposit_meta.available_head_id_of_deposit)
max_expires = max(map(lambda d: d.expires, gen), default=-1)
deposit_meta.expires_of_deposit = max_expires if max_expires > block_height else -1
deposit_meta_changed = True
if deposit_meta.tail_id == deposit.id:
deposit_meta.tail_id = deposit.prev_id
deposit_meta_changed = True
if deposit_meta_changed:
# Updates if the information has been changed
context.storage.fee.put_deposit_meta(context, deposit.score_address, deposit_meta)
# Deletes deposit info
context.storage.fee.delete_deposit(context, deposit.id)
def get_deposit(self, context: 'IconScoreContext', deposit_id: bytes) -> Deposit:
"""
Gets the deposit data.
Raise an exception if the deposit from the given id does not exist.
:param context: IconScoreContext
:param deposit_id: deposit id, should be tx hash of deposit transaction
:return: deposit data
"""
self._check_deposit_id(deposit_id)
deposit = context.storage.fee.get_deposit(context, deposit_id)
if deposit is None:
raise InvalidRequestException('Deposit not found')
return deposit
def check_score_available(self, context: 'IconScoreContext', score_address: 'Address', block_height: int):
"""
Check if the SCORE is available.
If the SCORE is sharing fee, SCORE should be able to pay the fee,
otherwise, the SCORE is not available.
:param context: IconScoreContext
:param score_address: SCORE address
:param block_height: current block height
"""
deposit_meta: 'DepositMeta' = self._get_or_create_deposit_meta(context, score_address)
if self._is_score_sharing_fee(deposit_meta):
virtual_step_available = \
block_height < deposit_meta.expires_of_virtual_step \
and deposit_meta.available_head_id_of_virtual_step is not None
deposit_available = \
block_height < deposit_meta.expires_of_deposit \
and deposit_meta.available_head_id_of_deposit is not None
if not virtual_step_available and not deposit_available:
raise InvalidRequestException('Out of deposit balance')
@staticmethod
def _is_score_sharing_fee(deposit_meta: 'DepositMeta') -> bool:
return deposit_meta is not None and deposit_meta.head_id is not None
def _get_fee_sharing_proportion(self, context: 'IconScoreContext', deposit_meta: 'DepositMeta'):
if not self._is_score_sharing_fee(deposit_meta):
# If there are no deposits, ignores the fee sharing ratio that the SCORE set.
return 0
return context.fee_sharing_proportion
def charge_transaction_fee(self,
context: 'IconScoreContext',
sender: 'Address',
to: 'Address',
step_price: int,
used_step: int,
block_height: int) -> Dict['Address', int]:
"""
Charges fees for the used STEPs.
It can pay by shared if the msg recipient set to share fees.
:param context: IconScoreContext
:param sender: msg sender
:param to: msg recipient
:param step_price: current STEP price
:param used_step: used STEPs
:param block_height: current block height
:return Address-used_step dict
"""
recipient_step = 0
if to.is_contract:
recipient_step = self._charge_fee_from_score(
context, to, step_price, used_step, block_height)
sender_step = used_step - recipient_step
context.engine.icx.charge_fee(context, sender, sender_step * step_price)
step_used_details = {}
if sender_step > 0:
step_used_details[sender] = sender_step
if recipient_step > 0:
step_used_details[to] = recipient_step
return step_used_details
def _charge_fee_from_score(self,
context: 'IconScoreContext',
score_address: 'Address',
step_price: int,
used_step: int,
block_height: int) -> int:
"""
Charges fees from SCORE
Returns total STEPs SCORE paid
"""
deposit_meta = context.storage.fee.get_deposit_meta(context, score_address)
# Amount of STEPs that SCORE will pay
required_step = used_step * self._get_fee_sharing_proportion(context, deposit_meta) // 100
score_used_step = 0
if required_step > 0:
score_used_step, deposit_meta_changed = self._charge_fee_from_virtual_step(
context, deposit_meta, required_step, block_height)
if score_used_step < required_step:
required_icx = (required_step - score_used_step) * step_price
charged_icx, deposit_indices_changed = self._charge_fee_from_deposit(
context, deposit_meta, required_icx, block_height)
score_used_step += charged_icx // step_price
deposit_meta_changed: bool = deposit_meta_changed or deposit_indices_changed
if deposit_meta_changed:
# Updates if the information has been changed
context.storage.fee.put_deposit_meta(context, score_address, deposit_meta)
return score_used_step
def _charge_fee_from_virtual_step(self,
context: 'IconScoreContext',
deposit_meta: 'DepositMeta',
required_step: int,
block_height: int) -> (int, bytes):
"""
Charges fees from available virtual STEPs
Returns total charged amount and whether the properties of 'deposit_meta' are changed
"""
charged_step = 0
should_update_expire = False
last_paid_deposit = None
gen = self._deposit_generator(context, deposit_meta.available_head_id_of_virtual_step)
for deposit in filter(lambda d: block_height < d.expires, gen):
available_virtual_step = deposit.remaining_virtual_step
if required_step < available_virtual_step:
step = required_step
else:
step = available_virtual_step
# All virtual steps are consumed in this loop.
# So if this `expires` is the `max expires`, should find the next `max expires`.
if deposit.expires == deposit_meta.expires_of_virtual_step:
should_update_expire = True
if step > 0:
deposit.consume_virtual_step(step)
context.storage.fee.put_deposit(context, deposit)
last_paid_deposit = deposit
charged_step += step
required_step -= step
if required_step == 0:
break
indices_changed = self._update_virtual_step_indices(
context, deposit_meta, last_paid_deposit, should_update_expire, block_height)
return charged_step, indices_changed
def _update_virtual_step_indices(self,
context: 'IconScoreContext',
deposit_meta: 'DepositMeta',
last_paid_deposit: 'Deposit',
should_update_expire: bool,
block_height: int) -> bool:
"""
Updates indices of virtual steps to DepositMeta and returns whether there exist changes.
"""
next_available_deposit = last_paid_deposit
if last_paid_deposit is not None and last_paid_deposit.remaining_virtual_step == 0:
# All virtual steps have been consumed in the current deposit
# so should find the next available virtual steps
gen = self._deposit_generator(context, last_paid_deposit.next_id)
next_available_deposit = next(filter(lambda d: block_height < d.expires, gen), None)
next_available_deposit_id = next_available_deposit.id if next_available_deposit else None
next_expires = deposit_meta.expires_of_virtual_step
if next_available_deposit_id is None:
# This means that there are no available virtual steps in all deposits.
next_expires = -1
elif should_update_expire:
# Finds next max expires. Sets to -1 if not exist.
gen = self._deposit_generator(context, next_available_deposit_id)
next_expires = max(map(lambda d: d.expires, gen), default=-1)
if deposit_meta.available_head_id_of_virtual_step != next_available_deposit_id \
or deposit_meta.expires_of_virtual_step != next_expires:
# Updates and return True if some changes exist
deposit_meta.available_head_id_of_virtual_step = next_available_deposit_id
deposit_meta.expires_of_virtual_step = next_expires
return True
return False
def _charge_fee_from_deposit(self,
context: 'IconScoreContext',
deposit_meta: 'DepositMeta',
required_icx: int,
block_height: int) -> (int, bool):
"""
Charges fees from available deposit ICXs
Returns total charged amount and whether the properties of 'deposit_meta' are changed
"""
assert required_icx > 0
if required_icx == 0:
return 0, False
remaining_required_icx = required_icx
should_update_expire = False
last_paid_deposit = None
# Search for next available deposit id
gen = self._deposit_generator(context, deposit_meta.available_head_id_of_deposit)
for deposit in filter(lambda d: block_height < d.expires, gen):
available_deposit = deposit.remaining_deposit - deposit.min_remaining_deposit
if remaining_required_icx < available_deposit:
charged_icx = remaining_required_icx
else:
charged_icx = available_deposit
# All available deposits are consumed in this loop.
# So if this `expires` is the `max expires`, should find the next `max expires`.
if deposit.expires == deposit_meta.expires_of_deposit:
should_update_expire = True
if charged_icx > 0:
deposit.consume_deposit(charged_icx)
context.storage.fee.put_deposit(context, deposit)
last_paid_deposit = deposit
remaining_required_icx -= charged_icx
if remaining_required_icx == 0:
break
if remaining_required_icx > 0:
# Charges all remaining fee regardless of the minimum remaining amount.
gen = self._deposit_generator(context, deposit_meta.head_id)
for deposit in filter(lambda d: block_height < d.expires, gen):
charged_icx = min(remaining_required_icx, deposit.remaining_deposit)
if charged_icx > 0:
deposit.consume_deposit(charged_icx)
context.storage.fee.put_deposit(context, deposit)
remaining_required_icx -= charged_icx
if remaining_required_icx == 0:
break
indices_changed = self._update_deposit_indices(
context, deposit_meta, last_paid_deposit, should_update_expire, block_height)
return required_icx - remaining_required_icx, indices_changed
def _update_deposit_indices(self,
context: 'IconScoreContext',
deposit_meta: 'DepositMeta',
last_paid_deposit: 'Deposit',
should_update_expire: bool,
block_height: int) -> bool:
"""
Updates indices of deposit to deposit_meta and returns whether there exist changes.
"""
next_available_deposit = last_paid_deposit
if last_paid_deposit.remaining_deposit <= last_paid_deposit.min_remaining_deposit:
# All available deposits have been consumed in the current deposit
# so should find the next available deposits
gen = self._deposit_generator(context, last_paid_deposit.next_id)
next_available_deposit = next(filter(lambda d: block_height < d.expires, gen), None)
next_available_deposit_id = next_available_deposit.id if next_available_deposit else None
next_expires = deposit_meta.expires_of_deposit
if next_available_deposit_id is None:
# This means that there are no available deposits.
next_expires = -1
elif should_update_expire:
# Finds next max expires. Sets to -1 if not exist.
gen = self._deposit_generator(context, next_available_deposit_id)
next_expires = max(map(lambda d: d.expires, gen), default=-1)
if deposit_meta.available_head_id_of_deposit != next_available_deposit_id \
or deposit_meta.expires_of_deposit != next_expires:
# Updates and return True if changes are exist
deposit_meta.available_head_id_of_deposit = next_available_deposit_id
deposit_meta.expires_of_deposit = next_expires
return True
return False
def _deposit_generator(self, context: 'IconScoreContext', start_id: Optional[bytes]):
next_id = start_id
while next_id is not None:
deposit = context.storage.fee.get_deposit(context, next_id)
if deposit is None:
break
yield deposit
next_id = deposit.next_id
def _get_score_deploy_info(self, context: 'IconScoreContext', score_address: 'Address') -> 'IconScoreDeployInfo':
deploy_info: 'IconScoreDeployInfo' = context.storage.deploy.get_deploy_info(context, score_address)
if deploy_info is None:
raise InvalidRequestException('Invalid SCORE')
return deploy_info
def _check_score_valid(self, context: 'IconScoreContext', score_address: 'Address') -> None:
deploy_info = self._get_score_deploy_info(context, score_address)
assert deploy_info is not None
def _check_score_ownership(self, context: 'IconScoreContext', sender: 'Address', score_address: 'Address') -> None:
deploy_info = self._get_score_deploy_info(context, score_address)
if deploy_info.owner != sender:
raise InvalidRequestException('Invalid SCORE owner')
@staticmethod
def _check_deposit_id(deposit_id: bytes) -> None:
if not (isinstance(deposit_id, bytes) and len(deposit_id) == 32):
raise InvalidRequestException('Invalid deposit ID')
def _get_or_create_deposit_meta(
self, context: 'IconScoreContext', score_address: 'Address') -> 'DepositMeta':
deposit_meta = context.storage.fee.get_deposit_meta(context, score_address)
return deposit_meta if deposit_meta else DepositMeta()
@staticmethod
def _calculate_penalty(deposit: 'Deposit',
block_height: int,
step_price: int) -> int:
assert isinstance(deposit, Deposit)
assert isinstance(block_height, int)
assert isinstance(step_price, int)
if block_height >= deposit.expires:
return 0
return VirtualStepCalculator.calculate_penalty(
deposit.virtual_step_used,
step_price)
class VirtualStepCalculator:
"""
Calculator for generating Virtual Step
"""
@classmethod
def calculate_virtual_step(cls,
deposit_amount: int,
term: int,
step_price: int) -> int:
"""Returns issuance of virtual-step according to deposit_amount and term
:param deposit_amount: deposit amount in loop unit
:param term: deposit term
:param step_price:
"""
assert term == BLOCKS_IN_ONE_MONTH
return int(Decimal(deposit_amount) * Decimal(FIXED_RATIO_PER_MONTH) / Decimal(step_price))
@classmethod
def calculate_penalty(cls,
virtual_step_used: int,
step_price: int) -> int:
"""Returns penalty according to given parameters
:param virtual_step_used:
:param step_price:
"""
return virtual_step_used * step_price
class DepositHandler:
"""
Deposit Handler
"""
# For eventlog emitting
class EventType(IntEnum):
DEPOSIT = 0
WITHDRAW = 1
SIGNATURE_AND_INDEX = (
# DepositAdded(id: bytes, from_: Address, amount: int, term: int)
('DepositAdded(bytes,Address,int,int)', 2),
# DepositWithdrawn(id: bytes, from_: Address, returnAmount: int, penalty: int)
('DepositWithdrawn(bytes,Address,int,int)', 2)
)
@staticmethod
def get_signature_and_index_count(event_type: 'EventType') -> (str, int):
return DepositHandler.SIGNATURE_AND_INDEX[event_type]
def __init__(self):
pass
def handle_deposit_request(self, context: 'IconScoreContext', data: dict):
"""
Handles fee request(querying or invoking)
:param context: IconScoreContext
:param data: data field
:return:
"""
converted_data = TypeConverter.convert(data, ParamType.DEPOSIT_DATA)
action = converted_data['action']
try:
if action == 'add':
term: int = BLOCKS_IN_ONE_MONTH if FIXED_TERM else converted_data['term']
self._add_deposit(context, term)
elif action == 'withdraw':
self._withdraw_deposit(context, converted_data['id'])
else:
raise InvalidRequestException(f"Invalid action: {action}")
except KeyError:
# missing required params for the action
raise InvalidParamsException("Required params not found")
def _add_deposit(self, context: 'IconScoreContext', term: int):
context.engine.fee.add_deposit(context, context.tx.hash, context.msg.sender, context.tx.to,
context.msg.value, context.block.height, term)
event_log_args = [context.tx.hash, context.msg.sender, context.msg.value, term]
self._emit_event(context, DepositHandler.EventType.DEPOSIT, event_log_args)
def _withdraw_deposit(self, context: 'IconScoreContext', deposit_id: bytes):
if context.msg.value != 0:
raise InvalidRequestException(f'Invalid value: must be zero')
withdrawal_amount, penalty = context.engine.fee.withdraw_deposit(
context, context.msg.sender, deposit_id, context.block.height)
event_log_args = [deposit_id, context.msg.sender, withdrawal_amount, penalty]
self._emit_event(context, DepositHandler.EventType.WITHDRAW, event_log_args)
@staticmethod
def _emit_event(context: 'IconScoreContext', event_type: 'DepositHandler.EventType', event_log_args: list):
signature, index_count = DepositHandler.get_signature_and_index_count(event_type)
fee_charge: bool = True if context.revision < Revision.IISS.value else False
EventLogEmitter.emit_event_log(
context, context.tx.to, signature, event_log_args, index_count, fee_charge)
|
the-stack_0_24151 | from __future__ import print_function, division
from sympy.core.add import Add
from sympy.core.expr import Expr
from sympy.core.mul import Mul
from sympy.core.relational import Equality
from sympy.sets.sets import Interval
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.core.sympify import sympify
from sympy.core.compatibility import is_sequence, range
from sympy.core.containers import Tuple
from sympy.functions.elementary.piecewise import piecewise_fold
from sympy.utilities import flatten
from sympy.utilities.iterables import sift
from sympy.matrices import Matrix
from sympy.tensor.indexed import Idx
def _process_limits(*symbols):
"""Process the list of symbols and convert them to canonical limits,
storing them as Tuple(symbol, lower, upper). The orientation of
the function is also returned when the upper limit is missing
so (x, 1, None) becomes (x, None, 1) and the orientation is changed.
"""
limits = []
orientation = 1
for V in symbols:
if isinstance(V, Symbol):
limits.append(Tuple(V))
continue
elif isinstance(V, Idx):
if V.lower is None or V.upper is None:
limits.append(Tuple(V))
else:
limits.append(Tuple(V, V.lower, V.upper))
continue
elif is_sequence(V, Tuple):
V = sympify(flatten(V))
if isinstance(V[0], (Symbol, Idx)):
newsymbol = V[0]
if len(V) == 2 and isinstance(V[1], Interval):
V[1:] = [V[1].start, V[1].end]
if len(V) == 3:
if V[1] is None and V[2] is not None:
nlim = [V[2]]
elif V[1] is not None and V[2] is None:
orientation *= -1
nlim = [V[1]]
elif V[1] is None and V[2] is None:
nlim = []
else:
nlim = V[1:]
limits.append(Tuple(newsymbol, *nlim))
if isinstance(V[0], Idx):
if V[0].lower is not None and not bool(nlim[0] >= V[0].lower):
raise ValueError("Summation exceeds Idx lower range.")
if V[0].upper is not None and not bool(nlim[1] <= V[0].upper):
raise ValueError("Summation exceeds Idx upper range.")
continue
elif len(V) == 1 or (len(V) == 2 and V[1] is None):
limits.append(Tuple(newsymbol))
continue
elif len(V) == 2:
limits.append(Tuple(newsymbol, V[1]))
continue
raise ValueError('Invalid limits given: %s' % str(symbols))
return limits, orientation
class ExprWithLimits(Expr):
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
# Any embedded piecewise functions need to be brought out to the
# top level so that integration can go into piecewise mode at the
# earliest possible moment.
function = sympify(function)
if hasattr(function, 'func') and function.func is Equality:
lhs = function.lhs
rhs = function.rhs
return Equality(cls(lhs, *symbols, **assumptions), \
cls(rhs, *symbols, **assumptions))
function = piecewise_fold(function)
if function is S.NaN:
return S.NaN
if symbols:
limits, orientation = _process_limits(*symbols)
else:
# symbol not provided -- we can still try to compute a general form
free = function.free_symbols
if len(free) != 1:
raise ValueError(
"specify dummy variables for %s" % function)
limits, orientation = [Tuple(s) for s in free], 1
# denest any nested calls
while cls == type(function):
limits = list(function.limits) + limits
function = function.function
# Only limits with lower and upper bounds are supported; the indefinite form
# is not supported
if any(len(l) != 3 or None in l for l in limits):
raise ValueError('ExprWithLimits requires values for lower and upper bounds.')
obj = Expr.__new__(cls, **assumptions)
arglist = [function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
@property
def function(self):
"""Return the function applied across limits.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x
>>> Integral(x**2, (x,)).function
x**2
See Also
========
limits, variables, free_symbols
"""
return self._args[0]
@property
def limits(self):
"""Return the limits of expression.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, i
>>> Integral(x**i, (i, 1, 3)).limits
((i, 1, 3),)
See Also
========
function, variables, free_symbols
"""
return self._args[1:]
@property
def variables(self):
"""Return a list of the dummy variables
>>> from sympy import Sum
>>> from sympy.abc import x, i
>>> Sum(x**i, (i, 1, 3)).variables
[i]
See Also
========
function, limits, free_symbols
as_dummy : Rename dummy variables
transform : Perform mapping on the dummy variable
"""
return [l[0] for l in self.limits]
@property
def free_symbols(self):
"""
This method returns the symbols in the object, excluding those
that take on a specific value (i.e. the dummy symbols).
Examples
========
>>> from sympy import Sum
>>> from sympy.abc import x, y
>>> Sum(x, (x, y, 1)).free_symbols
set([y])
"""
# don't test for any special values -- nominal free symbols
# should be returned, e.g. don't return set() if the
# function is zero -- treat it like an unevaluated expression.
function, limits = self.function, self.limits
isyms = function.free_symbols
for xab in limits:
if len(xab) == 1:
isyms.add(xab[0])
continue
# take out the target symbol
if xab[0] in isyms:
isyms.remove(xab[0])
# add in the new symbols
for i in xab[1:]:
isyms.update(i.free_symbols)
return isyms
@property
def is_number(self):
"""Return True if the Sum has no free symbols, else False."""
return not self.free_symbols
def as_dummy(self):
"""
Replace instances of the given dummy variables with explicit dummy
counterparts to make clear what are dummy variables and what
are real-world symbols in an object.
Examples
========
>>> from sympy import Integral
>>> from sympy.abc import x, y
>>> Integral(x, (x, x, y), (y, x, y)).as_dummy()
Integral(_x, (_x, x, _y), (_y, x, y))
If the object supperts the "integral at" limit ``(x,)`` it
is not treated as a dummy, but the explicit form, ``(x, x)``
of length 2 does treat the variable as a dummy.
>>> Integral(x, x).as_dummy()
Integral(x, x)
>>> Integral(x, (x, x)).as_dummy()
Integral(_x, (_x, x))
If there were no dummies in the original expression, then the
the symbols which cannot be changed by subs() are clearly seen as
those with an underscore prefix.
See Also
========
variables : Lists the integration variables
transform : Perform mapping on the integration variable
"""
reps = {}
f = self.function
limits = list(self.limits)
for i in range(-1, -len(limits) - 1, -1):
xab = list(limits[i])
if len(xab) == 1:
continue
x = xab[0]
xab[0] = x.as_dummy()
for j in range(1, len(xab)):
xab[j] = xab[j].subs(reps)
reps[x] = xab[0]
limits[i] = xab
f = f.subs(reps)
return self.func(f, *limits)
def _eval_interval(self, x, a, b):
limits = [(i if i[0] != x else (x, a, b)) for i in self.limits]
integrand = self.function
return self.func(integrand, *limits)
def _eval_subs(self, old, new):
"""
Perform substitutions over non-dummy variables
of an expression with limits. Also, can be used
to specify point-evaluation of an abstract antiderivative.
Examples
========
>>> from sympy import Sum, oo
>>> from sympy.abc import s, n
>>> Sum(1/n**s, (n, 1, oo)).subs(s, 2)
Sum(n**(-2), (n, 1, oo))
>>> from sympy import Integral
>>> from sympy.abc import x, a
>>> Integral(a*x**2, x).subs(x, 4)
Integral(a*x**2, (x, 4))
See Also
========
variables : Lists the integration variables
transform : Perform mapping on the dummy variable for intgrals
change_index : Perform mapping on the sum and product dummy variables
"""
from sympy.core.function import AppliedUndef, UndefinedFunction
func, limits = self.function, list(self.limits)
# If one of the expressions we are replacing is used as a func index
# one of two things happens.
# - the old variable first appears as a free variable
# so we perform all free substitutions before it becomes
# a func index.
# - the old variable first appears as a func index, in
# which case we ignore. See change_index.
# Reorder limits to match standard mathematical practice for scoping
limits.reverse()
if not isinstance(old, Symbol) or \
old.free_symbols.intersection(self.free_symbols):
sub_into_func = True
for i, xab in enumerate(limits):
if 1 == len(xab) and old == xab[0]:
xab = (old, old)
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if len(xab[0].free_symbols.intersection(old.free_symbols)) != 0:
sub_into_func = False
break
if isinstance(old, AppliedUndef) or isinstance(old, UndefinedFunction):
sy2 = set(self.variables).intersection(set(new.atoms(Symbol)))
sy1 = set(self.variables).intersection(set(old.args))
if not sy2.issubset(sy1):
raise ValueError(
"substitution can not create dummy dependencies")
sub_into_func = True
if sub_into_func:
func = func.subs(old, new)
else:
# old is a Symbol and a dummy variable of some limit
for i, xab in enumerate(limits):
if len(xab) == 3:
limits[i] = Tuple(xab[0], *[l._subs(old, new) for l in xab[1:]])
if old == xab[0]:
break
# simplify redundant limits (x, x) to (x, )
for i, xab in enumerate(limits):
if len(xab) == 2 and (xab[0] - xab[1]).is_zero:
limits[i] = Tuple(xab[0], )
# Reorder limits back to representation-form
limits.reverse()
return self.func(func, *limits)
class AddWithLimits(ExprWithLimits):
r"""Represents unevaluated oriented additions.
Parent class for Integral and Sum.
"""
def __new__(cls, function, *symbols, **assumptions):
# Any embedded piecewise functions need to be brought out to the
# top level so that integration can go into piecewise mode at the
# earliest possible moment.
#
# This constructor only differs from ExprWithLimits
# in the application of the orientation variable. Perhaps merge?
function = sympify(function)
if hasattr(function, 'func') and function.func is Equality:
lhs = function.lhs
rhs = function.rhs
return Equality(cls(lhs, *symbols, **assumptions), \
cls(rhs, *symbols, **assumptions))
function = piecewise_fold(function)
if function is S.NaN:
return S.NaN
if symbols:
limits, orientation = _process_limits(*symbols)
else:
# symbol not provided -- we can still try to compute a general form
free = function.free_symbols
if len(free) != 1:
raise ValueError(
" specify dummy variables for %s. If the integrand contains"
" more than one free symbol, an integration variable should"
" be supplied explicitly e.g., integrate(f(x, y), x)"
% function)
limits, orientation = [Tuple(s) for s in free], 1
# denest any nested calls
while cls == type(function):
limits = list(function.limits) + limits
function = function.function
obj = Expr.__new__(cls, **assumptions)
arglist = [orientation*function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
def _eval_adjoint(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.adjoint(), *self.limits)
return None
def _eval_conjugate(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.conjugate(), *self.limits)
return None
def _eval_transpose(self):
if all([x.is_real for x in flatten(self.limits)]):
return self.func(self.function.transpose(), *self.limits)
return None
def _eval_factor(self, **hints):
if 1 == len(self.limits):
summand = self.function.factor(**hints)
if summand.is_Mul:
out = sift(summand.args, lambda w: w.is_commutative \
and not set(self.variables) & w.free_symbols)
return Mul(*out[True])*self.func(Mul(*out[False]), \
*self.limits)
else:
summand = self.func(self.function, self.limits[0:-1]).factor()
if not summand.has(self.variables[-1]):
return self.func(1, [self.limits[-1]]).doit()*summand
elif isinstance(summand, Mul):
return self.func(summand, self.limits[-1]).factor()
return self
def _eval_expand_basic(self, **hints):
summand = self.function.expand(**hints)
if summand.is_Add and summand.is_commutative:
return Add(*[self.func(i, *self.limits) for i in summand.args])
elif summand.is_Matrix:
return Matrix._new(summand.rows, summand.cols,
[self.func(i, *self.limits) for i in summand._mat])
elif summand != self.function:
return self.func(summand, *self.limits)
return self
|
the-stack_0_24154 | from typing import Optional, List
from torch.nn import Module
from torch.optim import Optimizer
from torch.utils.data import ConcatDataset
from avalanche.benchmarks.utils import AvalancheConcatDataset
from avalanche.training.plugins.evaluation import default_evaluator
from avalanche.training.plugins import SupervisedPlugin, EvaluationPlugin
from avalanche.training.templates.supervised import SupervisedTemplate
class Cumulative(SupervisedTemplate):
"""Cumulative training strategy.
At each experience, train model with data from all previous experiences
and current experience.
"""
def __init__(
self,
model: Module,
optimizer: Optimizer,
criterion,
train_mb_size: int = 1,
train_epochs: int = 1,
eval_mb_size: int = None,
device=None,
plugins: Optional[List[SupervisedPlugin]] = None,
evaluator: EvaluationPlugin = default_evaluator,
eval_every=-1,
):
"""Init.
:param model: The model.
:param optimizer: The optimizer to use.
:param criterion: The loss criterion to use.
:param train_mb_size: The train minibatch size. Defaults to 1.
:param train_epochs: The number of training epochs. Defaults to 1.
:param eval_mb_size: The eval minibatch size. Defaults to 1.
:param device: The device to use. Defaults to None (cpu).
:param plugins: Plugins to be added. Defaults to None.
:param evaluator: (optional) instance of EvaluationPlugin for logging
and metric computations.
:param eval_every: the frequency of the calls to `eval` inside the
training loop. -1 disables the evaluation. 0 means `eval` is called
only at the end of the learning experience. Values >0 mean that
`eval` is called every `eval_every` epochs and at the end of the
learning experience.
"""
super().__init__(
model,
optimizer,
criterion,
train_mb_size=train_mb_size,
train_epochs=train_epochs,
eval_mb_size=eval_mb_size,
device=device,
plugins=plugins,
evaluator=evaluator,
eval_every=eval_every,
)
self.dataset = None # cumulative dataset
def train_dataset_adaptation(self, **kwargs):
"""
Concatenates all the previous experiences.
"""
if self.dataset is None:
self.dataset = self.experience.dataset
else:
self.dataset = AvalancheConcatDataset(
[self.dataset, self.experience.dataset]
)
self.adapted_dataset = self.dataset
|
the-stack_0_24155 | from flask import Flask
from .views import api
def create_app(config=None):
if not config:
config = '{{ project_name }}.config'
app = Flask('{{ project_name }}')
app.config.from_oject(config)
app.register_blueprint(api)
return app
|
the-stack_0_24157 | ## @file
# This file contained the parser for [Libraries] sections in INF file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
InfLibrarySectionParser
'''
##
# Import Modules
#
import Logger.Log as Logger
from Logger import StringTable as ST
from Logger.ToolError import FORMAT_INVALID
from Parser.InfParserMisc import InfExpandMacro
from Library import DataType as DT
from Library.Parsing import MacroParser
from Library.Misc import GetSplitValueList
from Object.Parser.InfCommonObject import InfLineCommentObject
from Library import GlobalData
from Parser.InfParserMisc import IsLibInstanceInfo
from Parser.InfAsBuiltProcess import GetLibInstanceInfo
from Parser.InfParserMisc import InfParserSectionRoot
class InfLibrarySectionParser(InfParserSectionRoot):
## InfLibraryParser
#
#
def InfLibraryParser(self, SectionString, InfSectionObject, FileName):
#
# For Common INF file
#
if not GlobalData.gIS_BINARY_INF:
#
# Macro defined in this section
#
SectionMacros = {}
ValueList = []
LibraryList = []
LibStillCommentFalg = False
LibHeaderComments = []
LibLineComment = None
#
# Parse section content
#
for Line in SectionString:
LibLineContent = Line[0]
LibLineNo = Line[1]
if LibLineContent.strip() == '':
continue
#
# Found Header Comments
#
if LibLineContent.strip().startswith(DT.TAB_COMMENT_SPLIT):
#
# Last line is comments, and this line go on.
#
if LibStillCommentFalg:
LibHeaderComments.append(Line)
continue
#
# First time encounter comment
#
else:
#
# Clear original data
#
LibHeaderComments = []
LibHeaderComments.append(Line)
LibStillCommentFalg = True
continue
else:
LibStillCommentFalg = False
if len(LibHeaderComments) >= 1:
LibLineComment = InfLineCommentObject()
LineCommentContent = ''
for Item in LibHeaderComments:
LineCommentContent += Item[0] + DT.END_OF_LINE
LibLineComment.SetHeaderComments(LineCommentContent)
#
# Find Tail comment.
#
if LibLineContent.find(DT.TAB_COMMENT_SPLIT) > -1:
LibTailComments = LibLineContent[LibLineContent.find(DT.TAB_COMMENT_SPLIT):]
LibLineContent = LibLineContent[:LibLineContent.find(DT.TAB_COMMENT_SPLIT)]
if LibLineComment is None:
LibLineComment = InfLineCommentObject()
LibLineComment.SetTailComments(LibTailComments)
#
# Find Macro
#
Name, Value = MacroParser((LibLineContent, LibLineNo),
FileName,
DT.MODEL_EFI_LIBRARY_CLASS,
self.FileLocalMacros)
if Name is not None:
SectionMacros[Name] = Value
LibLineComment = None
LibHeaderComments = []
continue
TokenList = GetSplitValueList(LibLineContent, DT.TAB_VALUE_SPLIT, 1)
ValueList[0:len(TokenList)] = TokenList
#
# Replace with Local section Macro and [Defines] section Macro.
#
ValueList = [InfExpandMacro(Value, (FileName, LibLineContent, LibLineNo),
self.FileLocalMacros, SectionMacros, True)
for Value in ValueList]
LibraryList.append((ValueList, LibLineComment,
(LibLineContent, LibLineNo, FileName)))
ValueList = []
LibLineComment = None
LibTailComments = ''
LibHeaderComments = []
continue
#
# Current section archs
#
KeyList = []
for Item in self.LastSectionHeaderContent:
if (Item[1], Item[2]) not in KeyList:
KeyList.append((Item[1], Item[2]))
if not InfSectionObject.SetLibraryClasses(LibraryList, KeyList=KeyList):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR % ("[Library]"),
File=FileName,
Line=Item[3])
#
# For Binary INF
#
else:
self.InfAsBuiltLibraryParser(SectionString, InfSectionObject, FileName)
def InfAsBuiltLibraryParser(self, SectionString, InfSectionObject, FileName):
LibraryList = []
LibInsFlag = False
for Line in SectionString:
LineContent = Line[0]
LineNo = Line[1]
if LineContent.strip() == '':
LibInsFlag = False
continue
if not LineContent.strip().startswith("#"):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_LIB_CONTATIN_ASBUILD_AND_COMMON,
File=FileName,
Line=LineNo,
ExtraData=LineContent)
if IsLibInstanceInfo(LineContent):
LibInsFlag = True
continue
if LibInsFlag:
LibGuid, LibVer = GetLibInstanceInfo(LineContent, GlobalData.gWORKSPACE, LineNo, FileName)
#
# If the VERSION_STRING is missing from the INF file, tool should default to "0".
#
if LibVer == '':
LibVer = '0'
if LibGuid != '':
if (LibGuid, LibVer) not in LibraryList:
LibraryList.append((LibGuid, LibVer))
#
# Current section archs
#
KeyList = []
Item = ['', '', '']
for Item in self.LastSectionHeaderContent:
if (Item[1], Item[2]) not in KeyList:
KeyList.append((Item[1], Item[2]))
if not InfSectionObject.SetLibraryClasses(LibraryList, KeyList=KeyList):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR % ("[Library]"),
File=FileName,
Line=Item[3])
|
the-stack_0_24158 | # -*- coding: utf-8 -*-
import os
import re
import random
import unittest
import itertools
from collections import namedtuple
from gspread.exceptions import APIError
from oauth2client.service_account import ServiceAccountCredentials
from betamax import Betamax
from betamax.fixtures.unittest import BetamaxTestCase
from betamax_json_body_serializer import JSONBodySerializer
import gspread
from gspread import utils
try:
unicode
except NameError:
basestring = unicode = str
CREDS_FILENAME = os.getenv('GS_CREDS_FILENAME')
SCOPE = [
'https://spreadsheets.google.com/feeds',
'https://www.googleapis.com/auth/drive.file',
]
DUMMY_ACCESS_TOKEN = '<ACCESS_TOKEN>'
I18N_STR = u'Iñtërnâtiônàlizætiøn' # .encode('utf8')
Betamax.register_serializer(JSONBodySerializer)
def sanitize_token(interaction, current_cassette):
headers = interaction.data['request']['headers']
token = headers.get('Authorization')
if token is None:
return
interaction.data['request']['headers']['Authorization'] = [
'Bearer %s' % DUMMY_ACCESS_TOKEN
]
with Betamax.configure() as config:
config.cassette_library_dir = 'tests/cassettes'
config.default_cassette_options['serialize_with'] = 'json_body'
config.before_record(callback=sanitize_token)
record_mode = os.environ.get('GS_RECORD_MODE', 'once')
config.default_cassette_options['record_mode'] = record_mode
def read_credentials(filename):
return ServiceAccountCredentials.from_json_keyfile_name(filename, SCOPE)
def prefixed_counter(prefix, start=1):
c = itertools.count(start)
for value in c:
yield u'%s %s' % (prefix, value)
def get_method_name(self_id):
return self_id.split('.')[-1]
DummyCredentials = namedtuple('DummyCredentials', 'access_token')
class BetamaxGspreadTest(BetamaxTestCase):
@classmethod
def get_temporary_spreadsheet_title(cls):
return 'Test %s' % cls.__name__
@classmethod
def setUpClass(cls):
if CREDS_FILENAME:
cls.auth_credentials = read_credentials(CREDS_FILENAME)
cls.base_gc = gspread.authorize(cls.auth_credentials)
title = 'Test %s' % cls.__name__
cls.temporary_spreadsheet = cls.base_gc.create(title)
else:
cls.auth_credentials = DummyCredentials(DUMMY_ACCESS_TOKEN)
@classmethod
def tearDownClass(cls):
try:
cls.base_gc.del_spreadsheet(cls.temporary_spreadsheet.id)
except AttributeError:
pass
def setUp(self):
super(BetamaxGspreadTest, self).setUp()
self.session.headers.update({'accept-encoding': 'identity'})
self.gc = gspread.Client(self.auth_credentials, session=self.session)
self.gc.login()
self.assertTrue(isinstance(self.gc, gspread.client.Client))
class UtilsTest(unittest.TestCase):
def test_extract_id_from_url(self):
url_id_list = [
# New-style url
(
'https://docs.google.com/spreadsheets/d/'
'1qpyC0X3A0MwQoFDE8p-Bll4hps/edit#gid=0',
'1qpyC0X3A0MwQoFDE8p-Bll4hps',
),
(
'https://docs.google.com/spreadsheets/d/'
'1qpyC0X3A0MwQoFDE8p-Bll4hps/edit',
'1qpyC0X3A0MwQoFDE8p-Bll4hps',
),
(
'https://docs.google.com/spreadsheets/d/'
'1qpyC0X3A0MwQoFDE8p-Bll4hps',
'1qpyC0X3A0MwQoFDE8p-Bll4hps',
),
# Old-style url
(
'https://docs.google.com/spreadsheet/'
'ccc?key=1qpyC0X3A0MwQoFDE8p-Bll4hps&usp=drive_web#gid=0',
'1qpyC0X3A0MwQoFDE8p-Bll4hps',
),
]
for url, id in url_id_list:
self.assertEqual(id, utils.extract_id_from_url(url))
def test_no_extract_id_from_url(self):
self.assertRaises(
gspread.NoValidUrlKeyFound,
utils.extract_id_from_url,
'http://example.org'
)
def test_a1_to_rowcol(self):
self.assertEqual(utils.a1_to_rowcol('ABC3'), (3, 731))
def test_rowcol_to_a1(self):
self.assertEqual(utils.rowcol_to_a1(3, 731), 'ABC3')
self.assertEqual(utils.rowcol_to_a1(1, 104), 'CZ1')
def test_addr_converters(self):
for row in range(1, 257):
for col in range(1, 512):
addr = utils.rowcol_to_a1(row, col)
(r, c) = utils.a1_to_rowcol(addr)
self.assertEqual((row, col), (r, c))
def test_get_gid(self):
gid = 'od6'
self.assertEqual(utils.wid_to_gid(gid), '0')
gid = 'osyqnsz'
self.assertEqual(utils.wid_to_gid(gid), '1751403737')
gid = 'ogsrar0'
self.assertEqual(utils.wid_to_gid(gid), '1015761654')
def test_numericise(self):
self.assertEqual(utils.numericise('faa'), 'faa')
self.assertEqual(utils.numericise('3'), 3)
self.assertEqual(utils.numericise('3_2'), '3_2')
self.assertEqual(utils.numericise('3_2', allow_underscores_in_numeric_literals=False), '3_2')
self.assertEqual(utils.numericise('3_2', allow_underscores_in_numeric_literals=True), 32)
self.assertEqual(utils.numericise('3.1'), 3.1)
self.assertEqual(utils.numericise('', empty2zero=True), 0)
self.assertEqual(utils.numericise('', empty2zero=False), '')
self.assertEqual(utils.numericise('', default_blank=None), None)
self.assertEqual(utils.numericise('', default_blank='foo'), 'foo')
self.assertEqual(utils.numericise(''), '')
self.assertEqual(utils.numericise(None), None)
class GspreadTest(BetamaxGspreadTest):
def _sequence_generator(self):
return prefixed_counter(get_method_name(self.id()))
class ClientTest(GspreadTest):
"""Test for gspread.client."""
def test_no_found_exeption(self):
noexistent_title = "Please don't use this phrase as a name of a sheet."
self.assertRaises(gspread.SpreadsheetNotFound, self.gc.open, noexistent_title)
def test_openall(self):
spreadsheet_list = self.gc.openall()
spreadsheet_list2 = self.gc.openall(spreadsheet_list[0].title)
self.assertTrue(len(spreadsheet_list2) < len(spreadsheet_list))
for s in spreadsheet_list:
self.assertTrue(isinstance(s, gspread.models.Spreadsheet))
for s in spreadsheet_list2:
self.assertTrue(isinstance(s, gspread.models.Spreadsheet))
def test_create(self):
title = 'Test Spreadsheet'
new_spreadsheet = self.gc.create(title)
self.assertTrue(isinstance(new_spreadsheet, gspread.models.Spreadsheet))
def test_copy(self):
original_spreadsheet = self.gc.create("Original")
spreadsheet_copy = self.gc.copy(original_spreadsheet.id)
self.assertTrue(isinstance(spreadsheet_copy, gspread.models.Spreadsheet))
original_metadata = original_spreadsheet.fetch_sheet_metadata()
copy_metadata = spreadsheet_copy.fetch_sheet_metadata()
self.assertEqual(original_metadata['sheets'], copy_metadata['sheets'])
def test_import_csv(self):
title = 'TestImportSpreadsheet'
new_spreadsheet = self.gc.create(title)
sg = self._sequence_generator()
csv_rows = 4
csv_cols = 4
rows = [[next(sg) for j in range(csv_cols)] for i in range(csv_rows)]
simple_csv_data = '\n'.join([','.join(row) for row in rows])
self.gc.import_csv(new_spreadsheet.id, simple_csv_data)
sh = self.gc.open_by_key(new_spreadsheet.id)
self.assertEqual(sh.sheet1.get_all_values(), rows)
self.gc.del_spreadsheet(new_spreadsheet.id)
def test_access_non_existing_spreadsheet(self):
wks = self.gc.open_by_key('test')
with self.assertRaises(APIError) as error:
wks.worksheets()
self.assertEqual(error.exception.args[0]['code'], 404)
self.assertEqual(error.exception.args[0]['message'], 'Requested entity was not found.')
self.assertEqual(error.exception.args[0]['status'], 'NOT_FOUND')
class SpreadsheetTest(GspreadTest):
"""Test for gspread.Spreadsheet."""
def setUp(self):
super(SpreadsheetTest, self).setUp()
self.spreadsheet = self.gc.open(self.get_temporary_spreadsheet_title())
def test_properties(self):
self.assertTrue(re.match(r'^[a-zA-Z0-9-_]+$', self.spreadsheet.id))
self.assertTrue(len(self.spreadsheet.title) > 0)
def test_sheet1(self):
sheet1 = self.spreadsheet.sheet1
self.assertTrue(isinstance(sheet1, gspread.Worksheet))
def test_get_worksheet(self):
sheet1 = self.spreadsheet.get_worksheet(0)
self.assertTrue(isinstance(sheet1, gspread.Worksheet))
def test_worksheet(self):
sheet_title = 'Sheet1'
sheet = self.spreadsheet.worksheet(sheet_title)
self.assertTrue(isinstance(sheet, gspread.Worksheet))
def test_worksheet_iteration(self):
self.assertEqual(
[x.id for x in self.spreadsheet.worksheets()],
[sheet.id for sheet in self.spreadsheet],
)
def test_values_get(self):
sg = self._sequence_generator()
worksheet1_name = u'%s %s' % (u'🌵', next(sg))
worksheet = self.spreadsheet.add_worksheet(worksheet1_name, 10, 10)
range_label = '%s!%s' % (worksheet1_name, 'A1')
values = [
[u'🍇', u'🍉', u'🍋'],
[u'🍐', u'🍎', u'🍓']
]
self.spreadsheet.values_update(
range_label,
params={
'valueInputOption': 'RAW'
},
body={
'values': values
}
)
read_data = self.spreadsheet.values_get(worksheet1_name)
self.assertEqual(values, read_data['values'])
self.spreadsheet.del_worksheet(worksheet)
def test_add_del_worksheet(self):
sg = self._sequence_generator()
worksheet1_name = next(sg)
worksheet2_name = next(sg)
worksheet_list = self.spreadsheet.worksheets()
self.assertEqual(len(worksheet_list), 1)
existing_sheet_title = worksheet_list[0].title
# Add
worksheet1 = self.spreadsheet.add_worksheet(worksheet1_name, 1, 1)
worksheet2 = self.spreadsheet.add_worksheet(worksheet2_name, 1, 1)
# Re-read, check again
worksheet_list = self.spreadsheet.worksheets()
self.assertEqual(len(worksheet_list), 3)
# Delete
self.spreadsheet.del_worksheet(worksheet1)
self.spreadsheet.del_worksheet(worksheet2)
worksheet_list = self.spreadsheet.worksheets()
self.assertEqual(len(worksheet_list), 1)
self.assertEqual(worksheet_list[0].title, existing_sheet_title)
def test_values_batch_get(self):
sg = self._sequence_generator()
worksheet1_name = u'%s %s' % (u'🌵', next(sg))
worksheet = self.spreadsheet.add_worksheet(worksheet1_name, 10, 10)
range_label = '%s!%s' % (worksheet1_name, 'A1')
values = [
[u'🍇', u'🍉', u'🍋'],
[u'🍐', u'🍎', u'🍓']
]
self.spreadsheet.values_update(
range_label,
params={
'valueInputOption': 'RAW'
},
body={
'values': values
}
)
ranges = ["%s!%s:%s" % (worksheet1_name, col, col) for col in ["A", "B", "C"]]
read_data = self.spreadsheet.values_batch_get(ranges)
for colix, rng in enumerate(read_data['valueRanges']):
for rowix, ele in enumerate(rng['values']):
self.assertEqual(values[rowix][colix], ele[0])
self.spreadsheet.del_worksheet(worksheet)
class WorksheetTest(GspreadTest):
"""Test for gspread.Worksheet."""
def setUp(self):
super(WorksheetTest, self).setUp()
self.spreadsheet = self.gc.open(self.get_temporary_spreadsheet_title())
# NOTE(msuozzo): Here, a new worksheet is created for each test.
# This was determined to be faster than reusing a single sheet and
# having to clear its contents after each test.
# Basically: Time(add_wks + del_wks) < Time(range + update_cells)
self.sheet = self.spreadsheet.add_worksheet('wksht_test', 20, 20)
def tearDown(self):
self.spreadsheet.del_worksheet(self.sheet)
super(WorksheetTest, self).tearDown()
def test_acell(self):
cell = self.sheet.acell('A1')
self.assertTrue(isinstance(cell, gspread.models.Cell))
def test_cell(self):
cell = self.sheet.cell(1, 1)
self.assertTrue(isinstance(cell, gspread.models.Cell))
def test_range(self):
cell_range1 = self.sheet.range('A1:A5')
cell_range2 = self.sheet.range(1, 1, 5, 1)
for c1, c2 in zip(cell_range1, cell_range2):
self.assertTrue(isinstance(c1, gspread.models.Cell))
self.assertTrue(isinstance(c2, gspread.models.Cell))
self.assertTrue(c1.col == c2.col)
self.assertTrue(c1.row == c2.row)
self.assertTrue(c1.value == c2.value)
def test_update_acell(self):
sg = self._sequence_generator()
value = next(sg)
self.sheet.update_acell('A2', value)
self.assertEqual(self.sheet.acell('A2').value, value)
def test_update_cell(self):
sg = self._sequence_generator()
value = next(sg)
self.sheet.update_cell(1, 2, value)
self.assertEqual(self.sheet.cell(1, 2).value, value)
self.sheet.update_cell(1, 2, 42)
self.assertEqual(self.sheet.cell(1, 2).value, '42')
self.sheet.update_cell(1, 2, '0042')
self.assertEqual(self.sheet.cell(1, 2).value, '42')
self.sheet.update_cell(1, 2, 42.01)
self.assertEqual(self.sheet.cell(1, 2).value, '42.01')
self.sheet.update_cell(1, 2, u'Артур')
self.assertEqual(self.sheet.cell(1, 2).value, u'Артур')
def test_update_cell_multiline(self):
sg = self._sequence_generator()
value = next(sg)
value = "%s\n%s" % (value, value)
self.sheet.update_cell(1, 2, value)
self.assertEqual(self.sheet.cell(1, 2).value, value)
def test_update_cell_unicode(self):
self.sheet.update_cell(1, 1, I18N_STR)
cell = self.sheet.cell(1, 1)
self.assertEqual(cell.value, I18N_STR)
def test_update_cells(self):
sg = self._sequence_generator()
list_len = 10
value_list = [next(sg) for i in range(list_len)]
# Test multiline
value_list[0] = "%s\n%s" % (value_list[0], value_list[0])
range_label = 'A1:A%s' % list_len
cell_list = self.sheet.range(range_label)
for c, v in zip(cell_list, value_list):
c.value = v
self.sheet.update_cells(cell_list)
cell_list = self.sheet.range(range_label)
for c, v in zip(cell_list, value_list):
self.assertEqual(c.value, v)
def test_update_cells_unicode(self):
cell = self.sheet.cell(1, 1)
cell.value = I18N_STR
self.sheet.update_cells([cell])
cell = self.sheet.cell(1, 1)
self.assertEqual(cell.value, I18N_STR)
def test_update_cells_noncontiguous(self):
sg = self._sequence_generator()
num_rows = 6
num_cols = 4
rows = [[next(sg) for j in range(num_cols)] for i in range(num_rows)]
cell_list = self.sheet.range('A1:D6')
for cell, value in zip(cell_list, itertools.chain(*rows)):
cell.value = value
self.sheet.update_cells(cell_list)
# Re-fetch cells
cell_list = self.sheet.range('A1:D6')
test_values = [c.value for c in cell_list]
top_left = cell_list[0]
bottom_right = cell_list[-1]
top_left.value = top_left_value = next(sg) + ' top_left'
bottom_right.value = bottom_right_value = next(sg) + ' bottom_right'
self.sheet.update_cells([top_left, bottom_right])
cell_list = self.sheet.range('A1:D6')
read_values = [c.value for c in cell_list]
test_values[0] = top_left_value
test_values[-1] = bottom_right_value
self.assertEqual(test_values, read_values)
def test_resize(self):
add_num = 10
new_rows = self.sheet.row_count + add_num
def get_grid_props():
sheets = self.sheet.spreadsheet.fetch_sheet_metadata()['sheets']
return utils.finditem(
lambda x: x['properties']['sheetId'] == self.sheet.id, sheets
)['properties']['gridProperties']
self.sheet.add_rows(add_num)
grid_props = get_grid_props()
self.assertEqual(grid_props['rowCount'], new_rows)
new_cols = self.sheet.col_count + add_num
self.sheet.add_cols(add_num)
grid_props = get_grid_props()
self.assertEqual(grid_props['columnCount'], new_cols)
new_rows -= add_num
new_cols -= add_num
self.sheet.resize(new_rows, new_cols)
grid_props = get_grid_props()
self.assertEqual(grid_props['rowCount'], new_rows)
self.assertEqual(grid_props['columnCount'], new_cols)
def test_freeze(self):
freeze_cols = 1
freeze_rows = 2
def get_grid_props():
sheets = self.sheet.spreadsheet.fetch_sheet_metadata()['sheets']
return utils.finditem(
lambda x: x['properties']['sheetId'] == self.sheet.id, sheets
)['properties']['gridProperties']
self.sheet.freeze(freeze_rows)
grid_props = get_grid_props()
self.assertEqual(grid_props['frozenRowCount'], freeze_rows)
self.sheet.freeze(cols=freeze_cols)
grid_props = get_grid_props()
self.assertEqual(grid_props['frozenColumnCount'], freeze_cols)
self.sheet.freeze(0, 0)
grid_props = get_grid_props()
self.assertTrue('frozenRowCount' not in grid_props)
self.assertTrue('frozenColumnCount' not in grid_props)
def test_basic_filters(self):
def get_sheet():
sheets = self.sheet.spreadsheet.fetch_sheet_metadata()['sheets']
return utils.finditem(
lambda x: x['properties']['sheetId'] == self.sheet.id, sheets
)
def get_basic_filter_range():
return get_sheet()['basicFilter']['range']
self.sheet.resize(20, 20)
self.sheet.add_basic_filter()
filter_range = get_basic_filter_range()
self.assertEquals(filter_range['startRowIndex'], 0)
self.assertEquals(filter_range['startColumnIndex'], 0)
self.assertEquals(filter_range['endRowIndex'], 20)
self.assertEquals(filter_range['endColumnIndex'], 20)
self.sheet.add_basic_filter('B1:C2')
filter_range = get_basic_filter_range()
self.assertEquals(filter_range['startRowIndex'], 0)
self.assertEquals(filter_range['startColumnIndex'], 1)
self.assertEquals(filter_range['endRowIndex'], 2)
self.assertEquals(filter_range['endColumnIndex'], 3)
self.sheet.add_basic_filter(1, 2, 2, 3)
filter_range = get_basic_filter_range()
self.assertEquals(filter_range['startRowIndex'], 0)
self.assertEquals(filter_range['startColumnIndex'], 1)
self.assertEquals(filter_range['endRowIndex'], 2)
self.assertEquals(filter_range['endColumnIndex'], 3)
self.sheet.remove_basic_filter()
self.assertTrue('basicFilter' not in get_sheet())
def test_find(self):
sg = self._sequence_generator()
value = next(sg)
self.sheet.update_cell(2, 10, value)
self.sheet.update_cell(2, 11, value)
cell = self.sheet.find(value)
self.assertEqual(cell.value, value)
value2 = next(sg)
value = "%so_O%s" % (value, value2)
self.sheet.update_cell(2, 11, value)
o_O_re = re.compile('[a-z]_[A-Z]%s' % value2)
cell = self.sheet.find(o_O_re)
self.assertEqual(cell.value, value)
def test_findall(self):
list_len = 10
range_label = 'A1:A%s' % list_len
cell_list = self.sheet.range(range_label)
sg = self._sequence_generator()
value = next(sg)
for c in cell_list:
c.value = value
self.sheet.update_cells(cell_list)
result_list = self.sheet.findall(value)
self.assertEqual(list_len, len(result_list))
for c in result_list:
self.assertEqual(c.value, value)
cell_list = self.sheet.range(range_label)
value = next(sg)
for c in cell_list:
char = chr(random.randrange(ord('a'), ord('z')))
c.value = "%s%s_%s%s" % (c.value, char, char.upper(), value)
self.sheet.update_cells(cell_list)
o_O_re = re.compile('[a-z]_[A-Z]%s' % value)
result_list = self.sheet.findall(o_O_re)
self.assertEqual(list_len, len(result_list))
def test_get_all_values(self):
self.sheet.resize(4, 4)
# put in new values, made from three lists
rows = [
["A1", "B1", "", "D1"],
["", "b2", "", ""],
["", "", "", ""],
["A4", "B4", "", "D4"],
]
cell_list = self.sheet.range('A1:D1')
cell_list.extend(self.sheet.range('A2:D2'))
cell_list.extend(self.sheet.range('A3:D3'))
cell_list.extend(self.sheet.range('A4:D4'))
for cell, value in zip(cell_list, itertools.chain(*rows)):
cell.value = value
self.sheet.update_cells(cell_list)
# read values with get_all_values, get a list of lists
read_data = self.sheet.get_all_values()
# values should match with original lists
self.assertEqual(read_data, rows)
def test_get_all_values_title_is_a1_notation(self):
self.sheet.resize(4, 4)
# renames sheet to contain single and double quotes
self.sheet.update_title("D3")
# put in new values, made from three lists
rows = [
["A1", "B1", "", "D1"],
["", "b2", "", ""],
["", "", "", ""],
["A4", "B4", "", "d4"],
]
cell_list = self.sheet.range('A1:D1')
cell_list.extend(self.sheet.range('A2:D2'))
cell_list.extend(self.sheet.range('A3:D3'))
cell_list.extend(self.sheet.range('A4:D4'))
for cell, value in zip(cell_list, itertools.chain(*rows)):
cell.value = value
self.sheet.update_cells(cell_list)
# read values with get_all_values, get a list of lists
read_data = self.sheet.get_all_values()
# values should match with original lists
self.assertEqual(read_data, rows)
def test_get_all_records(self):
self.sheet.resize(4, 4)
# put in new values, made from three lists
rows = [
["A1", "B1", "", "D1"],
[1, "b2", 1.45, ""],
["", "", "", ""],
["A4", 0.4, "", 4],
]
cell_list = self.sheet.range('A1:D4')
for cell, value in zip(cell_list, itertools.chain(*rows)):
cell.value = value
self.sheet.update_cells(cell_list)
# first, read empty strings to empty strings
read_records = self.sheet.get_all_records()
d0 = dict(zip(rows[0], rows[1]))
d1 = dict(zip(rows[0], rows[2]))
d2 = dict(zip(rows[0], rows[3]))
self.assertEqual(read_records[0], d0)
self.assertEqual(read_records[1], d1)
self.assertEqual(read_records[2], d2)
# then, read empty strings to zeros
read_records = self.sheet.get_all_records(empty2zero=True)
d1 = dict(zip(rows[0], (0, 0, 0, 0)))
self.assertEqual(read_records[1], d1)
# then, read empty strings to None
read_records = self.sheet.get_all_records(default_blank=None)
d1 = dict(zip(rows[0], (None, None, None, None)))
self.assertEqual(read_records[1], d1)
# then, read empty strings to something else
read_records = self.sheet.get_all_records(default_blank='foo')
d1 = dict(zip(rows[0], ('foo', 'foo', 'foo', 'foo')))
self.assertEqual(read_records[1], d1)
def test_get_all_records_different_header(self):
self.sheet.resize(6, 4)
# put in new values, made from three lists
rows = [
["", "", "", ""],
["", "", "", ""],
["A1", "B1", "", "D1"],
[1, "b2", 1.45, ""],
["", "", "", ""],
["A4", 0.4, "", 4],
]
cell_list = self.sheet.range('A1:D6')
for cell, value in zip(cell_list, itertools.chain(*rows)):
cell.value = value
self.sheet.update_cells(cell_list)
# first, read empty strings to empty strings
read_records = self.sheet.get_all_records(head=3)
d0 = dict(zip(rows[2], rows[3]))
d1 = dict(zip(rows[2], rows[4]))
d2 = dict(zip(rows[2], rows[5]))
self.assertEqual(read_records[0], d0)
self.assertEqual(read_records[1], d1)
self.assertEqual(read_records[2], d2)
# then, read empty strings to zeros
read_records = self.sheet.get_all_records(empty2zero=True, head=3)
d1 = dict(zip(rows[2], (0, 0, 0, 0)))
self.assertEqual(read_records[1], d1)
# then, read empty strings to None
read_records = self.sheet.get_all_records(default_blank=None, head=3)
d1 = dict(zip(rows[2], (None, None, None, None)))
self.assertEqual(read_records[1], d1)
# then, read empty strings to something else
read_records = self.sheet.get_all_records(default_blank='foo', head=3)
d1 = dict(zip(rows[2], ('foo', 'foo', 'foo', 'foo')))
self.assertEqual(read_records[1], d1)
def test_append_row(self):
sg = self._sequence_generator()
value_list = [next(sg) for i in range(10)]
self.sheet.append_row(value_list)
read_values = self.sheet.row_values(1)
self.assertEqual(value_list, read_values)
def test_append_row_with_empty_value(self):
sg = self._sequence_generator()
value_list = [next(sg) for i in range(3)]
value_list[1] = '' # Skip one cell to create two "tables" as in #537
self.sheet.append_row(value_list)
# Append it again
self.sheet.append_row(value_list)
# This should produce a shift in rows as in #537
shifted_value_list = ['', ''] + value_list
read_values = self.sheet.row_values(2)
self.assertEqual(shifted_value_list, read_values)
def test_append_row_with_empty_value_and_table_range(self):
sg = self._sequence_generator()
value_list = [next(sg) for i in range(3)]
value_list[1] = '' # Skip one cell to create two "tables" as in #537
self.sheet.append_row(value_list)
# Append it again
self.sheet.append_row(value_list, table_range='A1')
# This should produce no shift in rows
# contrary to test_append_row_with_empty_value
read_values = self.sheet.row_values(2)
self.assertEqual(value_list, read_values)
def test_insert_row(self):
sg = self._sequence_generator()
num_rows = 6
num_cols = 4
rows = [[next(sg) for j in range(num_cols)] for i in range(num_rows)]
cell_list = self.sheet.range('A1:D6')
for cell, value in zip(cell_list, itertools.chain(*rows)):
cell.value = value
self.sheet.update_cells(cell_list)
new_row_values = [next(sg) for i in range(num_cols + 4)]
self.sheet.insert_row(new_row_values, 2)
read_values = self.sheet.row_values(2)
self.assertEqual(new_row_values, read_values)
formula = '=1+1'
self.sheet.update_acell('B2', formula)
values = [next(sg) for i in range(num_cols + 4)]
self.sheet.insert_row(values, 1)
b3 = self.sheet.acell('B3', value_render_option='FORMULA')
self.assertEqual(b3.value, formula)
def test_delete_row(self):
sg = self._sequence_generator()
for i in range(5):
value_list = [next(sg) for i in range(10)]
self.sheet.append_row(value_list)
prev_row = self.sheet.row_values(1)
next_row = self.sheet.row_values(3)
self.sheet.delete_row(2)
self.assertEqual(self.sheet.row_values(1), prev_row)
self.assertEqual(self.sheet.row_values(2), next_row)
def test_clear(self):
rows = [
["", "", "", ""],
["", "", "", ""],
["A1", "B1", "", "D1"],
[1, "b2", 1.45, ""],
["", "", "", ""],
["A4", 0.4, "", 4],
]
cell_list = self.sheet.range('A1:D6')
for cell, value in zip(cell_list, itertools.chain(*rows)):
cell.value = value
self.sheet.update_cells(cell_list)
self.sheet.clear()
self.assertEqual(self.sheet.get_all_values(), [])
def test_update_and_get(self):
values = [
['A1', 'B1', '', 'D1'],
['', 'b2', '', ''],
['', '', '', ''],
['A4', 'B4', '', 'D4'],
]
self.sheet.update('A1', values)
read_data = self.sheet.get('A1:D4')
self.assertEqual(read_data, [
['A1', 'B1', '', 'D1'],
['', 'b2'],
[],
['A4', 'B4', '', 'D4']]
)
def test_batch_get(self):
values = [
['A1', 'B1', '', 'D1'],
['', 'b2', '', ''],
['', '', '', ''],
['A4', 'B4', '', 'D4'],
]
self.sheet.update('A1', values)
value_ranges = self.sheet.batch_get(['A1:B1', 'B4:D4'])
self.assertEqual(value_ranges, [[['A1', 'B1']], [['B4', '', 'D4']]])
self.assertEqual(value_ranges[0].range, 'wksht_test!A1:B1')
self.assertEqual(value_ranges[1].range, 'wksht_test!B4:D4')
self.assertEqual(value_ranges[0].first(), 'A1')
def test_batch_update(self):
self.sheet.batch_update([{
'range': 'A1:D1',
'values': [['A1', 'B1', '', 'D1']],
}, {
'range': 'A4:D4',
'values': [['A4', 'B4', '', 'D4']],
}])
data = self.sheet.get('A1:D4')
self.assertEqual(data, [
['A1', 'B1', '', 'D1'],
[],
[],
['A4', 'B4', '', 'D4']
])
def test_format(self):
cell_format = {
"backgroundColor": {
"green": 1,
"blue": 1
},
"horizontalAlignment": "CENTER",
"textFormat": {
"foregroundColor": {
"red": 1,
"green": 1,
},
"fontSize": 12,
"bold": True
}
}
self.maxDiff = None
self.sheet.format("A2:B2", cell_format)
data = self.spreadsheet._spreadsheets_get({
'includeGridData': False,
'ranges': ['wksht_test!A2'],
'fields': 'sheets.data.rowData.values.userEnteredFormat'
})
uef = (
data
['sheets'][0]
['data'][0]
['rowData'][0]
['values'][0]
['userEnteredFormat']
)
del uef['backgroundColorStyle']
del uef['textFormat']['foregroundColorStyle']
self.assertEqual(
uef,
cell_format
)
class CellTest(GspreadTest):
"""Test for gspread.Cell."""
def setUp(self):
super(CellTest, self).setUp()
self.spreadsheet = self.gc.open(self.get_temporary_spreadsheet_title())
self.sheet = self.spreadsheet.sheet1
def test_properties(self):
sg = self._sequence_generator()
update_value = next(sg)
self.sheet.update_acell('A1', update_value)
cell = self.sheet.acell('A1')
self.assertEqual(cell.value, update_value)
self.assertEqual(cell.row, 1)
self.assertEqual(cell.col, 1)
def test_numeric_value(self):
numeric_value = 1.0 / 1024
# Use a formula here to avoid issues with differing decimal marks:
self.sheet.update_acell('A1', '= 1 / 1024')
cell = self.sheet.acell('A1')
self.assertEqual(cell.numeric_value, numeric_value)
self.assertTrue(isinstance(cell.numeric_value, float))
self.sheet.update_acell('A1', 'Non-numeric value')
cell = self.sheet.acell('A1')
self.assertEqual(cell.numeric_value, None)
|
the-stack_0_24159 | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Common cli utilities for transport plugins."""
from functools import partial
import inspect
import click
from aiida.cmdline.params import arguments, options
from aiida.cmdline.params.options.interactive import InteractiveOption
from aiida.cmdline.utils import echo
from aiida.cmdline.utils.decorators import with_dbenv
from aiida.common.exceptions import NotExistent
from aiida.manage import get_manager
TRANSPORT_PARAMS = []
# pylint: disable=unused-argument
def match_comp_transport(ctx, param, computer, transport_type):
"""Check the computer argument against the transport type."""
if computer.transport_type != transport_type:
echo.echo_critical(
f'Computer {computer.label} has transport of type "{computer.transport_type}", not {transport_type}!'
)
return computer
@with_dbenv()
def configure_computer_main(computer, user, **kwargs):
"""Configure a computer via the CLI."""
from aiida import orm
user = user or orm.User.objects.get_default()
echo.echo_report(f'Configuring computer {computer.label} for user {user.email}.')
if user.email != get_manager().get_profile().default_user_email:
echo.echo_report('Configuring different user, defaults may not be appropriate.')
computer.configure(user=user, **kwargs)
echo.echo_success(f'{computer.label} successfully configured for {user.email}')
def common_params(command_func):
"""Decorate a command function with common click parameters for all transport plugins."""
for param in TRANSPORT_PARAMS.copy().reverse():
command_func = param(command_func)
return command_func
def transport_option_default(name, computer):
"""Determine the default value for an auth_param key."""
transport_cls = computer.get_transport_class()
suggester_name = f'_get_{name}_suggestion_string'
members = dict(inspect.getmembers(transport_cls))
suggester = members.get(suggester_name, None)
default = None
if suggester:
default = suggester(computer)
else:
default = transport_cls.auth_options[name].get('default')
return default
def interactive_default(key, also_non_interactive=False):
"""Create a contextual_default value callback for an auth_param key.
:param key: the name of the option.
:param also_non_interactive: indicates whether this option should provide a default also in non-interactive mode. If
False, the option will raise `MissingParameter` if no explicit value is specified when the command is called in
non-interactive mode.
"""
@with_dbenv()
def get_default(ctx):
"""Determine the default value from the context."""
from aiida import orm
if not also_non_interactive and ctx.params['non_interactive']:
raise click.MissingParameter()
user = ctx.params.get('user', None) or orm.User.objects.get_default()
computer = ctx.params.get('computer', None)
if computer is None:
return None
try:
authinfo = orm.AuthInfo.objects.get(dbcomputer_id=computer.id, aiidauser_id=user.id)
except NotExistent:
authinfo = orm.AuthInfo(computer=computer, user=user)
auth_params = authinfo.get_auth_params()
suggestion = auth_params.get(key)
suggestion = suggestion or transport_option_default(key, computer)
return suggestion
return get_default
def create_option(name, spec):
"""Create a click option from a name and partial specs as used in transport auth_options."""
from copy import deepcopy
spec = deepcopy(spec)
name_dashed = name.replace('_', '-')
option_name = f'--{name_dashed}'
existing_option = spec.pop('option', None)
if spec.pop('switch', False):
option_name = '--{name}/--no-{name}'.format(name=name_dashed)
kwargs = {'cls': InteractiveOption, 'show_default': True}
non_interactive_default = spec.pop('non_interactive_default', False)
kwargs['contextual_default'] = interactive_default(name, also_non_interactive=non_interactive_default)
kwargs.update(spec)
if existing_option:
return existing_option(**kwargs)
return click.option(option_name, **kwargs)
def list_transport_options(transport_type):
from aiida.plugins import TransportFactory
options_list = [create_option(*item) for item in TransportFactory(transport_type).auth_options.items()]
return options_list
def transport_options(transport_type):
"""Decorate a command with all options for a computer configure subcommand for transport_type."""
def apply_options(func):
"""Decorate the command functionn with the appropriate options for the transport type."""
options_list = list_transport_options(transport_type)
options_list.reverse()
func = arguments.COMPUTER(callback=partial(match_comp_transport, transport_type=transport_type))(func)
func = options.NON_INTERACTIVE()(func)
for option in options_list:
func = option(func)
func = options.USER()(func)
func = options.CONFIG_FILE()(func)
return func
return apply_options
def create_configure_cmd(transport_type):
"""Create verdi computer configure subcommand for a transport type."""
help_text = f"""Configure COMPUTER for {transport_type} transport."""
# pylint: disable=unused-argument
def transport_configure_command(computer, user, non_interactive, **kwargs):
"""Configure COMPUTER for a type of transport."""
configure_computer_main(computer, user, **kwargs)
transport_configure_command.__doc__ = help_text
return click.command(transport_type)(transport_options(transport_type)(transport_configure_command))
|
the-stack_0_24161 | # Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.
import dace
import numpy as np
N = dace.symbol('N')
@dace.program(dace.float64[N], dace.float64[N])
def floor_div(Input, Output):
@dace.map(_[0:N])
def div(i):
inp << Input[i // 2]
out >> Output[i]
out = inp
def test():
N.set(25)
A = np.random.rand(N.get())
B = np.zeros([N.get()], dtype=np.float64)
floor_div(A, B)
if N.get() % 2 == 0:
expected = 2.0 * np.sum(A[0:N.get() // 2])
else:
expected = 2.0 * np.sum(A[0:N.get() // 2]) + A[N.get() // 2]
actual = np.sum(B)
diff = abs(actual - expected)
print('Difference:', diff)
assert diff <= 1e-5
if __name__ == "__main__":
test() |
the-stack_0_24164 | from django.template import Library
from musician.models import Song
register = Library()
@register.inclusion_tag('templatetags/learn_button.html')
def learn_button(user, song):
if not user:
return {'song': song, 'state': None}
try:
musician_song = Song.objects.get(user=user, song=song)
state = musician_song.state
except Song.DoesNotExist:
state = None
return {'song': song, 'state': state}
|
the-stack_0_24165 | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) Martin Renou.
# Distributed under the terms of the Modified BSD License.
import warnings
from contextlib import contextmanager
import numpy as np
from traitlets import Bool, Bytes, CInt, Enum, Float, Instance, List, Unicode, TraitError, Union
from ipywidgets import CallbackDispatcher, Color, DOMWidget, Image, Widget, widget_serialization
from ipywidgets.widgets.trait_types import (
bytes_serialization, _color_names, _color_hex_re, _color_hexa_re, _color_rgbhsl_re
)
from ._frontend import module_name, module_version
from .utils import binary_image, populate_args, image_bytes_to_array, commands_to_buffer
COMMANDS = {
'fillRect': 0, 'strokeRect': 1, 'fillRects': 2, 'strokeRects': 3, 'clearRect': 4, 'fillArc': 5,
'fillCircle': 6, 'strokeArc': 7, 'strokeCircle': 8, 'fillArcs': 9, 'strokeArcs': 10,
'fillCircles': 11, 'strokeCircles': 12, 'strokeLine': 13, 'beginPath': 14, 'closePath': 15,
'stroke': 16, 'fillPath': 17, 'fill': 18, 'moveTo': 19, 'lineTo': 20,
'rect': 21, 'arc': 22, 'ellipse': 23, 'arcTo': 24, 'quadraticCurveTo': 25,
'bezierCurveTo': 26, 'fillText': 27, 'strokeText': 28, 'setLineDash': 29, 'drawImage': 30,
'putImageData': 31, 'clip': 32, 'save': 33, 'restore': 34, 'translate': 35,
'rotate': 36, 'scale': 37, 'transform': 38, 'setTransform': 39, 'resetTransform': 40,
'set': 41, 'clear': 42, 'sleep': 43, 'fillPolygon': 44, 'strokePolygon': 45,
'strokeLines': 46,
}
# Traitlets does not allow validating without creating a trait class, so we need this
def _validate_color(value):
if isinstance(value, str):
if (value.lower() in _color_names or _color_hex_re.match(value)
or _color_hexa_re.match(value) or _color_rgbhsl_re.match(value)):
return value
raise TraitError('{} is not a valid HTML Color'.format(value))
def _validate_number(value, min_val, max_val):
try:
number = float(value)
if number >= min_val and number <= max_val:
return number
except ValueError:
raise TraitError('{} is not a number'.format(value))
raise TraitError('{} is not in the range [{}, {}]'.format(value, min_val, max_val))
class Path2D(Widget):
"""Create a Path2D.
Args:
value (str): The path value, e.g. "M10 10 h 80 v 80 h -80 Z"
"""
_model_module = Unicode(module_name).tag(sync=True)
_model_module_version = Unicode(module_version).tag(sync=True)
_model_name = Unicode('Path2DModel').tag(sync=True)
value = Unicode(allow_none=False, read_only=True).tag(sync=True)
def __init__(self, value):
"""Create a Path2D object given the path string."""
self.set_trait('value', value)
super(Path2D, self).__init__()
class Pattern(Widget):
"""Create a Pattern.
Args:
image (Canvas or MultiCanvas or ipywidgets.Image): The source to be used as the pattern's image
repetition (str): A string indicating how to repeat the pattern's image, can be "repeat" (both directions), "repeat-x" (horizontal only), "repeat-y" (vertical only), "no-repeat" (neither direction)
"""
_model_module = Unicode(module_name).tag(sync=True)
_model_module_version = Unicode(module_version).tag(sync=True)
_model_name = Unicode('PatternModel').tag(sync=True)
image = Union((Instance(Image), Instance('ipycanvas.Canvas'), Instance('ipycanvas.MultiCanvas')), allow_none=False, read_only=True).tag(sync=True, **widget_serialization)
repetition = Enum(['repeat', 'repeat-x', 'repeat-y', 'no-repeat'], allow_none=False, read_only=True).tag(sync=True)
def __init__(self, image, repetition='repeat'):
"""Create a Pattern object given the image and the type of repetition."""
self.set_trait('image', image)
self.set_trait('repetition', repetition)
super(Pattern, self).__init__()
def _ipython_display_(self, *args, **kwargs):
return self.image._ipython_display_(*args, **kwargs)
class _CanvasGradient(Widget):
_model_module = Unicode(module_name).tag(sync=True)
_model_module_version = Unicode(module_version).tag(sync=True)
x0 = Float(allow_none=False, read_only=True).tag(sync=True)
y0 = Float(allow_none=False, read_only=True).tag(sync=True)
x1 = Float(allow_none=False, read_only=True).tag(sync=True)
y1 = Float(allow_none=False, read_only=True).tag(sync=True)
color_stops = List(allow_none=False, read_only=True).tag(sync=True)
def __init__(self, x0, y0, x1, y1, color_stops):
self.set_trait('x0', x0)
self.set_trait('y0', y0)
self.set_trait('x1', x1)
self.set_trait('y1', y1)
for color_stop in color_stops:
_validate_number(color_stop[0], 0, 1)
_validate_color(color_stop[1])
self.set_trait('color_stops', color_stops)
super(_CanvasGradient, self).__init__()
class LinearGradient(_CanvasGradient):
"""Create a LinearGradient."""
_model_name = Unicode('LinearGradientModel').tag(sync=True)
def __init__(self, x0, y0, x1, y1, color_stops):
"""Create a LinearGradient object given the start point, end point and color stops.
Args:
x0 (float): The x-axis coordinate of the start point.
y0 (float): The y-axis coordinate of the start point.
x1 (float): The x-axis coordinate of the end point.
y1 (float): The y-axis coordinate of the end point.
color_stops (list): The list of color stop tuples (offset, color) defining the gradient.
"""
super(LinearGradient, self).__init__(x0, y0, x1, y1, color_stops)
class RadialGradient(_CanvasGradient):
"""Create a RadialGradient."""
_model_name = Unicode('RadialGradientModel').tag(sync=True)
r0 = Float(allow_none=False, read_only=True).tag(sync=True)
r1 = Float(allow_none=False, read_only=True).tag(sync=True)
def __init__(self, x0, y0, r0, x1, y1, r1, color_stops):
"""Create a RadialGradient object given the start circle, end circle and color stops.
Args:
x0 (float): The x-axis coordinate of the start circle.
y0 (float): The y-axis coordinate of the start circle.
r0 (float): The radius of the start circle.
x1 (float): The x-axis coordinate of the end circle.
y1 (float): The y-axis coordinate of the end circle.
r1 (float): The radius of the end circle.
color_stops (list): The list of color stop tuples (offset, color) defining the gradient.
"""
_validate_number(r0, 0, float('inf'))
_validate_number(r1, 0, float('inf'))
self.set_trait('r0', r0)
self.set_trait('r1', r1)
super(RadialGradient, self).__init__(x0, y0, x1, y1, color_stops)
class _CanvasBase(DOMWidget):
_model_module = Unicode(module_name).tag(sync=True)
_model_module_version = Unicode(module_version).tag(sync=True)
_view_module = Unicode(module_name).tag(sync=True)
_view_module_version = Unicode(module_version).tag(sync=True)
width = CInt(700).tag(sync=True)
height = CInt(500).tag(sync=True)
#: (bool) Specifies if the image should be synchronized from front-end to Python back-end
sync_image_data = Bool(False).tag(sync=True)
#: (bytes) Current image data as bytes (PNG encoded). It is ``None`` by default and will not be
#: updated if ``sync_image_data`` is ``False``.
image_data = Bytes(default_value=None, allow_none=True, read_only=True).tag(sync=True, **bytes_serialization)
def to_file(self, filename):
"""Save the current Canvas image to a PNG file.
This will raise an exception if there is no image to save (_e.g._ if ``image_data`` is ``None``).
"""
if self.image_data is None:
raise RuntimeError('No image data to save, please be sure that ``sync_image_data`` is set to True')
if not filename.endswith('.png') and not filename.endswith('.PNG'):
raise RuntimeError('Can only save to a PNG file')
with open(filename, 'wb') as fobj:
fobj.write(self.image_data)
def get_image_data(self, x=0, y=0, width=None, height=None):
"""Return a NumPy array representing the underlying pixel data for a specified portion of the canvas.
This will throw an error if there is no ``image_data`` to retrieve, this happens when nothing was drawn yet or
when the ``sync_image_data`` attribute is not set to ``True``.
The returned value is a NumPy array containing the image data for the rectangle of the canvas specified. The
coordinates of the rectangle's top-left corner are (``x``, ``y``), while the coordinates of the bottom corner
are (``x + width``, ``y + height``).
"""
if self.image_data is None:
raise RuntimeError('No image data, please be sure that ``sync_image_data`` is set to True')
x = int(x)
y = int(y)
if width is None:
width = self.width - x
if height is None:
height = self.height - y
width = int(width)
height = int(height)
image_data = image_bytes_to_array(self.image_data)
return image_data[y:y + height, x:x + width]
@property
def size(self):
"""Get the canvas size."""
return (self.width, self.height)
@size.setter
def size(self, value):
"""Set the size of the canvas, this is deprecated, use width and height attributes instead."""
warnings.warn(
'size is deprecated and will be removed in a future release, please use width and height instead.',
DeprecationWarning
)
(self.width, self.height) = value
class Canvas(_CanvasBase):
"""Create a Canvas widget.
Args:
width (int): The width (in pixels) of the canvas
height (int): The height (in pixels) of the canvas
caching (boolean): Whether commands should be cached or not
"""
_model_name = Unicode('CanvasModel').tag(sync=True)
_view_name = Unicode('CanvasView').tag(sync=True)
#: (valid HTML color or Gradient or Pattern) The color for filling rectangles and paths. Default to ``'black'``.
fill_style = Union((Color(), Instance(_CanvasGradient), Instance(Pattern)), default_value='black')
#: (valid HTML color or Gradient or Pattern) The color for rectangles and paths stroke. Default to ``'black'``.
stroke_style = Union((Color(), Instance(_CanvasGradient), Instance(Pattern)), default_value='black')
#: (float) Transparency level. Default to ``1.0``.
global_alpha = Float(1.0)
#: (str) Font for the text rendering. Default to ``'12px serif'``.
font = Unicode('12px serif')
#: (str) Text alignment, possible values are ``'start'``, ``'end'``, ``'left'``, ``'right'``, and ``'center'``.
#: Default to ``'start'``.
text_align = Enum(['start', 'end', 'left', 'right', 'center'], default_value='start')
#: (str) Text baseline, possible values are ``'top'``, ``'hanging'``, ``'middle'``, ``'alphabetic'``, ``'ideographic'``
#: and ``'bottom'``.
#: Default to ``'alphabetic'``.
text_baseline = Enum(['top', 'hanging', 'middle', 'alphabetic', 'ideographic', 'bottom'], default_value='alphabetic')
#: (str) Text direction, possible values are ``'ltr'``, ``'rtl'``, and ``'inherit'``.
#: Default to ``'inherit'``.
direction = Enum(['ltr', 'rtl', 'inherit'], default_value='inherit')
#: (str) Global composite operation, possible values are listed below:
#: https://developer.mozilla.org/en-US/docs/Web/API/Canvas_API/Tutorial/Compositing#globalCompositeOperation
global_composite_operation = Enum(
['source-over', 'source-in', 'source-out', 'source-atop',
'destination-over', 'destination-in', 'destination-out',
'destination-atop', 'lighter', 'copy', 'xor', 'multiply',
'screen', 'overlay', 'darken', 'lighten', 'color-dodge',
'color-burn', 'hard-light', 'soft-light', 'difference',
'exclusion', 'hue', 'saturation', 'color', 'luminosity'],
default_value='source-over'
)
#: (float) Indicates the horizontal distance the shadow should extend from the object.
#: This value isn't affected by the transformation matrix. The default is 0.
shadow_offset_x = Float(0.0)
#: (float) Indicates the vertical distance the shadow should extend from the object.
#: This value isn't affected by the transformation matrix. The default is 0.
shadow_offset_y = Float(0.0)
#: (float) Indicates the size of the blurring effect; this value doesn't correspond to a number of pixels
#: and is not affected by the current transformation matrix. The default value is 0.
shadow_blur = Float(0.0)
#: (valid HTML color) A standard CSS color value indicating the color of the shadow effect; by default,
#: it is fully-transparent black.
shadow_color = Color('rgba(0, 0, 0, 0)')
#: (float) Sets the width of lines drawn in the future, must be a positive number. Default to ``1.0``.
line_width = Float(1.0)
#: (str) Sets the appearance of the ends of lines, possible values are ``'butt'``, ``'round'`` and ``'square'``.
#: Default to ``'butt'``.
line_cap = Enum(['butt', 'round', 'square'], default_value='butt')
#: (str) Sets the appearance of the "corners" where lines meet, possible values are ``'round'``, ``'bevel'`` and ``'miter'``.
#: Default to ``'miter'``
line_join = Enum(['round', 'bevel', 'miter'], default_value='miter')
#: (float) Establishes a limit on the miter when two lines join at a sharp angle, to let you control how thick
#: the junction becomes. Default to ``10.``.
miter_limit = Float(10.)
_line_dash = List()
#: (float) Specifies where to start a dash array on a line. Default is ``0.``.
line_dash_offset = Float(0.)
_client_ready_callbacks = Instance(CallbackDispatcher, ())
_mouse_move_callbacks = Instance(CallbackDispatcher, ())
_mouse_down_callbacks = Instance(CallbackDispatcher, ())
_mouse_up_callbacks = Instance(CallbackDispatcher, ())
_mouse_out_callbacks = Instance(CallbackDispatcher, ())
_touch_start_callbacks = Instance(CallbackDispatcher, ())
_touch_end_callbacks = Instance(CallbackDispatcher, ())
_touch_move_callbacks = Instance(CallbackDispatcher, ())
_touch_cancel_callbacks = Instance(CallbackDispatcher, ())
ATTRS = {
'fill_style': 0, 'stroke_style': 1, 'global_alpha': 2, 'font': 3, 'text_align': 4,
'text_baseline': 5, 'direction': 6, 'global_composite_operation': 7,
'line_width': 8, 'line_cap': 9, 'line_join': 10, 'miter_limit': 11, 'line_dash_offset': 12,
'shadow_offset_x': 13, 'shadow_offset_y': 14, 'shadow_blur': 15, 'shadow_color': 16,
}
def __init__(self, *args, **kwargs):
"""Create a Canvas widget."""
#: Whether commands should be cached or not
self.caching = kwargs.get('caching', False)
self._commands_cache = []
self._buffers_cache = []
if 'size' in kwargs:
size = kwargs['size']
kwargs['width'] = size[0]
kwargs['height'] = size[1]
del kwargs['size']
warnings.warn(
'size is deprecated and will be removed in a future release, please use width and height instead.',
DeprecationWarning
)
super(Canvas, self).__init__(*args, **kwargs)
self.on_msg(self._handle_frontend_event)
def sleep(self, time):
"""Make the Canvas sleep for `time` milliseconds."""
self._send_canvas_command(COMMANDS['sleep'], [time])
# Gradient methods
def create_linear_gradient(self, x0, y0, x1, y1, color_stops):
"""Create a LinearGradient object given the start point, end point, and color stops.
Args:
x0 (float): The x-axis coordinate of the start point.
y0 (float): The y-axis coordinate of the start point.
x1 (float): The x-axis coordinate of the end point.
y1 (float): The y-axis coordinate of the end point.
color_stops (list): The list of color stop tuples (offset, color) defining the gradient.
"""
return LinearGradient(x0, y0, x1, y1, color_stops)
def create_radial_gradient(self, x0, y0, r0, x1, y1, r1, color_stops):
"""Create a RadialGradient object given the start circle, end circle and color stops.
Args:
x0 (float): The x-axis coordinate of the start circle.
y0 (float): The y-axis coordinate of the start circle.
r0 (float): The radius of the start circle.
x1 (float): The x-axis coordinate of the end circle.
y1 (float): The y-axis coordinate of the end circle.
r1 (float): The radius of the end circle.
color_stops (list): The list of color stop tuples (offset, color) defining the gradient.
"""
return RadialGradient(x0, y0, r0, x1, y1, r1, color_stops)
# Pattern method
def create_pattern(self, image, repetition='repeat'):
"""Create a Pattern.
Args:
image (Canvas or MultiCanvas or ipywidgets.Image): The source to be used as the pattern's image
repetition (str): A string indicating how to repeat the pattern's image, can be "repeat" (both directions), "repeat-x" (horizontal only), "repeat-y" (vertical only), "no-repeat" (neither direction)
"""
return Pattern(image, repetition)
# Rectangles methods
def fill_rect(self, x, y, width, height=None):
"""Draw a filled rectangle of size ``(width, height)`` at the ``(x, y)`` position."""
if height is None:
height = width
self._send_canvas_command(COMMANDS['fillRect'], [x, y, width, height])
def stroke_rect(self, x, y, width, height=None):
"""Draw a rectangular outline of size ``(width, height)`` at the ``(x, y)`` position."""
if height is None:
height = width
self._send_canvas_command(COMMANDS['strokeRect'], [x, y, width, height])
def fill_rects(self, x, y, width, height=None):
"""Draw filled rectangles of sizes ``(width, height)`` at the ``(x, y)`` positions.
Where ``x``, ``y``, ``width`` and ``height`` arguments are NumPy arrays, lists or scalar values.
If ``height`` is None, it is set to the same value as width.
"""
args = []
buffers = []
populate_args(x, args, buffers)
populate_args(y, args, buffers)
populate_args(width, args, buffers)
if height is None:
args.append(args[-1])
else:
populate_args(height, args, buffers)
self._send_canvas_command(COMMANDS['fillRects'], args, buffers)
def stroke_rects(self, x, y, width, height=None):
"""Draw a rectangular outlines of sizes ``(width, height)`` at the ``(x, y)`` positions.
Where ``x``, ``y``, ``width`` and ``height`` arguments are NumPy arrays, lists or scalar values.
If ``height`` is None, it is set to the same value as width.
"""
args = []
buffers = []
populate_args(x, args, buffers)
populate_args(y, args, buffers)
populate_args(width, args, buffers)
if height is None:
args.append(args[-1])
else:
populate_args(height, args, buffers)
self._send_canvas_command(COMMANDS['strokeRects'], args, buffers)
def clear_rect(self, x, y, width, height=None):
"""Clear the specified rectangular area of size ``(width, height)`` at the ``(x, y)`` position, making it fully transparent."""
if height is None:
height = width
self._send_canvas_command(COMMANDS['clearRect'], [x, y, width, height])
# Arc methods
def fill_arc(self, x, y, radius, start_angle, end_angle, anticlockwise=False):
"""Draw a filled arc centered at ``(x, y)`` with a radius of ``radius`` from ``start_angle`` to ``end_angle``."""
self._send_canvas_command(COMMANDS['fillArc'], [x, y, radius, start_angle, end_angle, anticlockwise])
def fill_circle(self, x, y, radius):
"""Draw a filled circle centered at ``(x, y)`` with a radius of ``radius``."""
self._send_canvas_command(COMMANDS['fillCircle'], [x, y, radius])
def stroke_arc(self, x, y, radius, start_angle, end_angle, anticlockwise=False):
"""Draw an arc outline centered at ``(x, y)`` with a radius of ``radius``."""
self._send_canvas_command(COMMANDS['strokeArc'], [x, y, radius, start_angle, end_angle, anticlockwise])
def stroke_circle(self, x, y, radius):
"""Draw a circle centered at ``(x, y)`` with a radius of ``radius``."""
self._send_canvas_command(COMMANDS['strokeCircle'], [x, y, radius])
def fill_arcs(self, x, y, radius, start_angle, end_angle, anticlockwise=False):
"""Draw filled arcs centered at ``(x, y)`` with a radius of ``radius``.
Where ``x``, ``y``, ``radius`` and other arguments are NumPy arrays, lists or scalar values.
"""
args = []
buffers = []
populate_args(x, args, buffers)
populate_args(y, args, buffers)
populate_args(radius, args, buffers)
populate_args(start_angle, args, buffers)
populate_args(end_angle, args, buffers)
args.append(anticlockwise)
self._send_canvas_command(COMMANDS['fillArcs'], args, buffers)
def stroke_arcs(self, x, y, radius, start_angle, end_angle, anticlockwise=False):
"""Draw an arc outlines centered at ``(x, y)`` with a radius of ``radius``.
Where ``x``, ``y``, ``radius`` and other arguments are NumPy arrays, lists or scalar values.
"""
args = []
buffers = []
populate_args(x, args, buffers)
populate_args(y, args, buffers)
populate_args(radius, args, buffers)
populate_args(start_angle, args, buffers)
populate_args(end_angle, args, buffers)
args.append(anticlockwise)
self._send_canvas_command(COMMANDS['strokeArcs'], args, buffers)
def fill_circles(self, x, y, radius):
"""Draw filled circles centered at ``(x, y)`` with a radius of ``radius``.
Where ``x``, ``y``, ``radius`` and other arguments are NumPy arrays, lists or scalar values.
"""
args = []
buffers = []
populate_args(x, args, buffers)
populate_args(y, args, buffers)
populate_args(radius, args, buffers)
self._send_canvas_command(COMMANDS['fillCircles'], args, buffers)
def stroke_circles(self, x, y, radius):
"""Draw a circle outlines centered at ``(x, y)`` with a radius of ``radius``.
Where ``x``, ``y``, ``radius`` and other arguments are NumPy arrays, lists or scalar values.
"""
args = []
buffers = []
populate_args(x, args, buffers)
populate_args(y, args, buffers)
populate_args(radius, args, buffers)
self._send_canvas_command(COMMANDS['strokeCircles'], args, buffers)
# Polygon methods
def fill_polygon(self, points):
"""Fill a polygon from a list of points ``[(x1, y1), (x2, y2), ..., (xn, yn)]``."""
args = []
buffers = []
populate_args(points, args, buffers)
self._send_canvas_command(COMMANDS['fillPolygon'], args, buffers)
def stroke_polygon(self, points):
"""Draw polygon outline from a list of points ``[(x1, y1), (x2, y2), ..., (xn, yn)]``."""
args = []
buffers = []
populate_args(points, args, buffers)
self._send_canvas_command(COMMANDS['strokePolygon'], args, buffers)
# Lines methods
def stroke_line(self, x1, y1, x2, y2):
"""Draw a line from ``(x1, y1)`` to ``(x2, y2)``."""
self._send_canvas_command(COMMANDS['strokeLine'], [x1, y1, x2, y2])
def stroke_lines(self, points):
"""Draw a path of consecutive lines from a list of points ``[(x1, y1), (x2, y2), ..., (xn, yn)]``."""
args = []
buffers = []
populate_args(points, args, buffers)
self._send_canvas_command(COMMANDS['strokeLines'], args, buffers)
# Paths methods
def begin_path(self):
"""Call this method when you want to create a new path."""
self._send_canvas_command(COMMANDS['beginPath'])
def close_path(self):
"""Add a straight line from the current point to the start of the current path.
If the shape has already been closed or has only one point, this function does nothing.
This method doesn't draw anything to the canvas directly. You can render the path using the stroke() or fill() methods.
"""
self._send_canvas_command(COMMANDS['closePath'])
def stroke(self):
"""Stroke (outlines) the current path with the current ``stroke_style``."""
self._send_canvas_command(COMMANDS['stroke'])
def fill(self, rule_or_path='nonzero'):
"""Fill the current path with the current ``fill_style`` and given the rule, or fill the given Path2D.
Possible rules are ``nonzero`` and ``evenodd``.
"""
if isinstance(rule_or_path, Path2D):
self._send_canvas_command(COMMANDS['fillPath'], [widget_serialization['to_json'](rule_or_path, None)])
else:
self._send_canvas_command(COMMANDS['fill'], [rule_or_path])
def move_to(self, x, y):
"""Move the "pen" to the given ``(x, y)`` coordinates."""
self._send_canvas_command(COMMANDS['moveTo'], [x, y])
def line_to(self, x, y):
"""Add a straight line to the current path by connecting the path's last point to the specified ``(x, y)`` coordinates.
Like other methods that modify the current path, this method does not directly render anything. To
draw the path onto the canvas, you can use the fill() or stroke() methods.
"""
self._send_canvas_command(COMMANDS['lineTo'], [x, y])
def rect(self, x, y, width, height):
"""Add a rectangle of size ``(width, height)`` at the ``(x, y)`` position in the current path."""
self._send_canvas_command(COMMANDS['rect'], [x, y, width, height])
def arc(self, x, y, radius, start_angle, end_angle, anticlockwise=False):
"""Add a circular arc centered at ``(x, y)`` with a radius of ``radius`` to the current path.
The path starts at ``start_angle`` and ends at ``end_angle``, and travels in the direction given by
``anticlockwise`` (defaulting to clockwise: ``False``).
"""
self._send_canvas_command(COMMANDS['arc'], [x, y, radius, start_angle, end_angle, anticlockwise])
def ellipse(self, x, y, radius_x, radius_y, rotation, start_angle, end_angle, anticlockwise=False):
"""Add an ellipse centered at ``(x, y)`` with the radii ``radius_x`` and ``radius_y`` to the current path.
The path starts at ``start_angle`` and ends at ``end_angle``, and travels in the direction given by
``anticlockwise`` (defaulting to clockwise: ``False``).
"""
self._send_canvas_command(COMMANDS['ellipse'], [x, y, radius_x, radius_y, rotation, start_angle, end_angle, anticlockwise])
def arc_to(self, x1, y1, x2, y2, radius):
"""Add a circular arc to the current path.
Using the given control points ``(x1, y1)`` and ``(x2, y2)`` and the ``radius``.
"""
self._send_canvas_command(COMMANDS['arcTo'], [x1, y1, x2, y2, radius])
def quadratic_curve_to(self, cp1x, cp1y, x, y):
"""Add a quadratic Bezier curve to the current path.
It requires two points: the first one is a control point and the second one is the end point.
The starting point is the latest point in the current path, which can be changed using move_to()
before creating the quadratic Bezier curve.
"""
self._send_canvas_command(COMMANDS['quadraticCurveTo'], [cp1x, cp1y, x, y])
def bezier_curve_to(self, cp1x, cp1y, cp2x, cp2y, x, y):
"""Add a cubic Bezier curve to the current path.
It requires three points: the first two are control points and the third one is the end point.
The starting point is the latest point in the current path, which can be changed using move_to()
before creating the Bezier curve.
"""
self._send_canvas_command(COMMANDS['bezierCurveTo'], [cp1x, cp1y, cp2x, cp2y, x, y])
# Text methods
def fill_text(self, text, x, y, max_width=None):
"""Fill a given text at the given ``(x, y)`` position. Optionally with a maximum width to draw."""
self._send_canvas_command(COMMANDS['fillText'], [text, x, y, max_width])
def stroke_text(self, text, x, y, max_width=None):
"""Stroke a given text at the given ``(x, y)`` position. Optionally with a maximum width to draw."""
self._send_canvas_command(COMMANDS['strokeText'], [text, x, y, max_width])
# Line methods
def get_line_dash(self):
"""Return the current line dash pattern array containing an even number of non-negative numbers."""
return self._line_dash
def set_line_dash(self, segments):
"""Set the current line dash pattern."""
if len(segments) % 2:
self._line_dash = segments + segments
else:
self._line_dash = segments
self._send_canvas_command(COMMANDS['setLineDash'], [self._line_dash])
# Image methods
def draw_image(self, image, x=0, y=0, width=None, height=None):
"""Draw an ``image`` on the Canvas at the coordinates (``x``, ``y``) and scale it to (``width``, ``height``)."""
if (not isinstance(image, (Canvas, MultiCanvas, Image))):
raise TypeError('The image argument should be an Image, a Canvas or a MultiCanvas widget')
if width is not None and height is None:
height = width
serialized_image = widget_serialization['to_json'](image, None)
self._send_canvas_command(COMMANDS['drawImage'], [serialized_image, x, y, width, height])
def put_image_data(self, image_data, x=0, y=0):
"""Draw an image on the Canvas.
``image_data`` should be a NumPy array containing the image to draw and ``x`` and ``y`` the pixel position where to
draw. Unlike the CanvasRenderingContext2D.putImageData method, this method **is** affected by the canvas transformation
matrix, and supports transparency.
"""
image_metadata, image_buffer = binary_image(image_data)
self._send_canvas_command(COMMANDS['putImageData'], [image_metadata, x, y], [image_buffer])
def create_image_data(self, width, height):
"""Create a NumPy array of shape (width, height, 4) representing a table of pixel colors."""
return np.zeros((width, height, 4), dtype=int)
# Clipping
def clip(self):
"""Turn the path currently being built into the current clipping path.
You can use clip() instead of close_path() to close a path and turn it into a clipping
path instead of stroking or filling the path.
"""
self._send_canvas_command(COMMANDS['clip'])
# Transformation methods
def save(self):
"""Save the entire state of the canvas."""
self._send_canvas_command(COMMANDS['save'])
def restore(self):
"""Restore the most recently saved canvas state."""
self._send_canvas_command(COMMANDS['restore'])
def translate(self, x, y):
"""Move the canvas and its origin on the grid.
``x`` indicates the horizontal distance to move,
and ``y`` indicates how far to move the grid vertically.
"""
self._send_canvas_command(COMMANDS['translate'], [x, y])
def rotate(self, angle):
"""Rotate the canvas clockwise around the current origin by the ``angle`` number of radians."""
self._send_canvas_command(COMMANDS['rotate'], [angle])
def scale(self, x, y=None):
"""Scale the canvas units by ``x`` horizontally and by ``y`` vertically. Both parameters are real numbers.
If ``y`` is not provided, it is defaulted to the same value as ``x``.
Values that are smaller than 1.0 reduce the unit size and values above 1.0 increase the unit size.
Values of 1.0 leave the units the same size.
"""
if y is None:
y = x
self._send_canvas_command(COMMANDS['scale'], [x, y])
def transform(self, a, b, c, d, e, f):
"""Multiply the current transformation matrix with the matrix described by its arguments.
The transformation matrix is described by:
``[[a, c, e], [b, d, f], [0, 0, 1]]``.
"""
self._send_canvas_command(COMMANDS['transform'], [a, b, c, d, e, f])
def set_transform(self, a, b, c, d, e, f):
"""Reset the current transform to the identity matrix, and then invokes the transform() method with the same arguments.
This basically undoes the current transformation, then sets the specified transform, all in one step.
"""
self._send_canvas_command(COMMANDS['setTransform'], [a, b, c, d, e, f])
def reset_transform(self):
"""Reset the current transform to the identity matrix.
This is the same as calling: set_transform(1, 0, 0, 1, 0, 0).
"""
self._send_canvas_command(COMMANDS['resetTransform'])
# Extras
def clear(self):
"""Clear the entire canvas. This is the same as calling ``clear_rect(0, 0, canvas.width, canvas.height)``."""
self._send_command([COMMANDS['clear']])
def flush(self):
"""Flush all the cached commands and clear the cache."""
if not self.caching or not len(self._commands_cache):
return
self._send_custom(self._commands_cache, self._buffers_cache)
self._commands_cache = []
self._buffers_cache = []
# Events
def on_client_ready(self, callback, remove=False):
"""Register a callback that will be called when a new client is ready to receive draw commands.
When a new client connects to the kernel he will get an empty Canvas (because the canvas is
almost stateless, the new client does not know what draw commands were previously sent). So
this function is useful for replaying your drawing whenever a new client connects and is
ready to receive draw commands.
"""
self._client_ready_callbacks.register_callback(callback, remove=remove)
def on_mouse_move(self, callback, remove=False):
"""Register a callback that will be called on mouse move."""
self._mouse_move_callbacks.register_callback(callback, remove=remove)
def on_mouse_down(self, callback, remove=False):
"""Register a callback that will be called on mouse click down."""
self._mouse_down_callbacks.register_callback(callback, remove=remove)
def on_mouse_up(self, callback, remove=False):
"""Register a callback that will be called on mouse click up."""
self._mouse_up_callbacks.register_callback(callback, remove=remove)
def on_mouse_out(self, callback, remove=False):
"""Register a callback that will be called on mouse out of the canvas."""
self._mouse_out_callbacks.register_callback(callback, remove=remove)
def on_touch_start(self, callback, remove=False):
"""Register a callback that will be called on touch start (new finger on the screen)."""
self._touch_start_callbacks.register_callback(callback, remove=remove)
def on_touch_end(self, callback, remove=False):
"""Register a callback that will be called on touch end (a finger is not touching the screen anymore)."""
self._touch_end_callbacks.register_callback(callback, remove=remove)
def on_touch_move(self, callback, remove=False):
"""Register a callback that will be called on touch move (finger moving on the screen)."""
self._touch_move_callbacks.register_callback(callback, remove=remove)
def on_touch_cancel(self, callback, remove=False):
"""Register a callback that will be called on touch cancel."""
self._touch_cancel_callbacks.register_callback(callback, remove=remove)
def __setattr__(self, name, value):
super(Canvas, self).__setattr__(name, value)
if name in self.ATTRS:
# If it's a Widget we need to serialize it
if isinstance(value, Widget):
value = widget_serialization['to_json'](value, None)
self._send_command([COMMANDS['set'], [self.ATTRS[name], value]])
def _send_canvas_command(self, name, args=[], buffers=[]):
while len(args) and args[len(args) - 1] is None:
args.pop()
self._send_command([name, args, len(buffers)], buffers)
def _send_command(self, command, buffers=[]):
if self.caching:
self._commands_cache.append(command)
self._buffers_cache += buffers
else:
self._send_custom(command, buffers)
def _send_custom(self, command, buffers=[]):
metadata, command_buffer = commands_to_buffer(command)
self.send(metadata, buffers=[command_buffer] + buffers)
def _handle_frontend_event(self, _, content, buffers):
if content.get('event', '') == 'client_ready':
self._client_ready_callbacks()
if content.get('event', '') == 'mouse_move':
self._mouse_move_callbacks(content['x'], content['y'])
if content.get('event', '') == 'mouse_down':
self._mouse_down_callbacks(content['x'], content['y'])
if content.get('event', '') == 'mouse_up':
self._mouse_up_callbacks(content['x'], content['y'])
if content.get('event', '') == 'mouse_out':
self._mouse_out_callbacks(content['x'], content['y'])
if content.get('event', '') == 'touch_start':
self._touch_start_callbacks([(touch['x'], touch['y']) for touch in content['touches']])
if content.get('event', '') == 'touch_end':
self._touch_end_callbacks([(touch['x'], touch['y']) for touch in content['touches']])
if content.get('event', '') == 'touch_move':
self._touch_move_callbacks([(touch['x'], touch['y']) for touch in content['touches']])
if content.get('event', '') == 'touch_cancel':
self._touch_cancel_callbacks([(touch['x'], touch['y']) for touch in content['touches']])
class RoughCanvas(Canvas):
"""Create a RoughCanvas widget. It gives a hand-drawn-like style to your drawings.
Args:
width (int): The width (in pixels) of the canvas
height (int): The height (in pixels) of the canvas
caching (boolean): Whether commands should be cached or not
"""
_model_name = Unicode('RoughCanvasModel').tag(sync=True)
_view_name = Unicode('CanvasView').tag(sync=True)
#: (str) Sets the appearance of the filling, possible values are ``'hachure'``, ``'solid'``, ``'zigzag'``,
#: ``'cross-hatch'``, ``'dots'``, ``'sunburst'``, ``'dashed'``, ``'zigzag-line'``.
#: Default to ``'hachure'``.
rough_fill_style = Enum(['hachure', 'solid', 'zigzag', 'cross-hatch', 'dots', 'sunburst', 'dashed', 'zigzag-line'], default_value='hachure')
#: (float) Numerical value indicating how rough the drawing is. A rectangle with the roughness of 0 would be a perfect rectangle.
#: There is no upper limit to this value, but a value over 10 is mostly useless.
#: Default to ``'1'``.
roughness = Float(1)
#: (float) Numerical value indicating how curvy the lines are when drawing a sketch. A value of 0 will cause straight lines.
#: Default to ``'1'``.
bowing = Float(1)
ROUGH_ATTRS = {
'rough_fill_style': 100, 'roughness': 101, 'bowing': 102,
}
def __setattr__(self, name, value):
super(RoughCanvas, self).__setattr__(name, value)
if name in self.ROUGH_ATTRS:
self._send_command([COMMANDS['set'], [self.ROUGH_ATTRS[name], value]])
class MultiCanvas(_CanvasBase):
"""Create a MultiCanvas widget with n_canvases Canvas widgets.
Args:
n_canvases (int): The number of canvases to create
width (int): The width (in pixels) of the canvases
height (int): The height (in pixels) of the canvases
"""
_model_name = Unicode('MultiCanvasModel').tag(sync=True)
_view_name = Unicode('MultiCanvasView').tag(sync=True)
_canvases = List(Instance(Canvas)).tag(sync=True, **widget_serialization)
def __init__(self, n_canvases=3, *args, **kwargs):
"""Constructor."""
super(MultiCanvas, self).__init__(*args, _canvases=[Canvas() for _ in range(n_canvases)], **kwargs)
# The latest canvas receives events (interaction layer)
self.on_msg(self._canvases[-1]._handle_frontend_event)
def __getitem__(self, key):
"""Access one of the Canvas instances."""
return self._canvases[key]
def __setattr__(self, name, value):
super(MultiCanvas, self).__setattr__(name, value)
if name in ('caching', 'width', 'height'):
for layer in self._canvases:
setattr(layer, name, value)
def __getattr__(self, name):
if name in ('caching', 'width', 'height'):
return getattr(self._canvases[0], name)
return super(MultiCanvas, self).__getattr__(name)
def on_client_ready(self, callback, remove=False):
"""Register a callback that will be called when a new client is ready to receive draw commands.
When a new client connects to the kernel he will get an empty Canvas (because the canvas is
almost stateless, the new client does not know what draw commands were previously sent). So
this function is useful for replaying your drawing whenever a new client connects and is
ready to receive draw commands.
"""
self._canvases[-1]._client_ready_callbacks.register_callback(callback, remove=remove)
def clear(self):
"""Clear the Canvas."""
for layer in self._canvases:
layer.clear()
def flush(self):
"""Flush all the cached commands and clear the cache."""
for layer in self._canvases:
layer.flush()
class MultiRoughCanvas(MultiCanvas):
"""Create a MultiRoughCanvas widget with n_canvases RoughCanvas widgets.
Args:
n_canvases (int): The number of rough canvases to create
width (int): The width (in pixels) of the canvases
height (int): The height (in pixels) of the canvases
"""
_canvases = List(Instance(RoughCanvas)).tag(sync=True, **widget_serialization)
def __init__(self, n_canvases=3, *args, **kwargs):
"""Constructor."""
super(MultiCanvas, self).__init__(*args, _canvases=[RoughCanvas() for _ in range(n_canvases)], **kwargs)
@contextmanager
def hold_canvas(canvas):
"""Hold any drawing on the canvas, and perform all commands in a single shot at the end.
This is way more efficient than sending commands one by one.
Args:
canvas (ipycanvas.canvas.Canvas): The canvas widget on which to hold the commands
"""
orig_caching = canvas.caching
canvas.caching = True
yield
canvas.flush()
if not orig_caching:
canvas.caching = False
|
the-stack_0_24166 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.dialogflowcx_v3beta1.services.environments import pagers
from google.cloud.dialogflowcx_v3beta1.types import environment
from google.cloud.dialogflowcx_v3beta1.types import environment as gcdc_environment
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import EnvironmentsTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import EnvironmentsGrpcTransport
from .transports.grpc_asyncio import EnvironmentsGrpcAsyncIOTransport
class EnvironmentsClientMeta(type):
"""Metaclass for the Environments client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[EnvironmentsTransport]]
_transport_registry["grpc"] = EnvironmentsGrpcTransport
_transport_registry["grpc_asyncio"] = EnvironmentsGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[EnvironmentsTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class EnvironmentsClient(metaclass=EnvironmentsClientMeta):
"""Service for managing
[Environments][google.cloud.dialogflow.cx.v3beta1.Environment].
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "dialogflow.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
EnvironmentsClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
EnvironmentsClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> EnvironmentsTransport:
"""Returns the transport used by the client instance.
Returns:
EnvironmentsTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def continuous_test_result_path(
project: str,
location: str,
agent: str,
environment: str,
continuous_test_result: str,
) -> str:
"""Returns a fully-qualified continuous_test_result string."""
return "projects/{project}/locations/{location}/agents/{agent}/environments/{environment}/continuousTestResults/{continuous_test_result}".format(
project=project,
location=location,
agent=agent,
environment=environment,
continuous_test_result=continuous_test_result,
)
@staticmethod
def parse_continuous_test_result_path(path: str) -> Dict[str, str]:
"""Parses a continuous_test_result path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/environments/(?P<environment>.+?)/continuousTestResults/(?P<continuous_test_result>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def environment_path(
project: str, location: str, agent: str, environment: str,
) -> str:
"""Returns a fully-qualified environment string."""
return "projects/{project}/locations/{location}/agents/{agent}/environments/{environment}".format(
project=project, location=location, agent=agent, environment=environment,
)
@staticmethod
def parse_environment_path(path: str) -> Dict[str, str]:
"""Parses a environment path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/environments/(?P<environment>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def test_case_path(project: str, location: str, agent: str, test_case: str,) -> str:
"""Returns a fully-qualified test_case string."""
return "projects/{project}/locations/{location}/agents/{agent}/testCases/{test_case}".format(
project=project, location=location, agent=agent, test_case=test_case,
)
@staticmethod
def parse_test_case_path(path: str) -> Dict[str, str]:
"""Parses a test_case path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/testCases/(?P<test_case>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def test_case_result_path(
project: str, location: str, agent: str, test_case: str, result: str,
) -> str:
"""Returns a fully-qualified test_case_result string."""
return "projects/{project}/locations/{location}/agents/{agent}/testCases/{test_case}/results/{result}".format(
project=project,
location=location,
agent=agent,
test_case=test_case,
result=result,
)
@staticmethod
def parse_test_case_result_path(path: str) -> Dict[str, str]:
"""Parses a test_case_result path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/testCases/(?P<test_case>.+?)/results/(?P<result>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def version_path(
project: str, location: str, agent: str, flow: str, version: str,
) -> str:
"""Returns a fully-qualified version string."""
return "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/versions/{version}".format(
project=project, location=location, agent=agent, flow=flow, version=version,
)
@staticmethod
def parse_version_path(path: str) -> Dict[str, str]:
"""Parses a version path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/flows/(?P<flow>.+?)/versions/(?P<version>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, EnvironmentsTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the environments client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, EnvironmentsTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, EnvironmentsTransport):
# transport is a EnvironmentsTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def list_environments(
self,
request: Union[environment.ListEnvironmentsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListEnvironmentsPager:
r"""Returns the list of all environments in the specified
[Agent][google.cloud.dialogflow.cx.v3beta1.Agent].
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.ListEnvironmentsRequest, dict]):
The request object. The request message for
[Environments.ListEnvironments][google.cloud.dialogflow.cx.v3beta1.Environments.ListEnvironments].
parent (str):
Required. The
[Agent][google.cloud.dialogflow.cx.v3beta1.Agent] to
list all environments for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.services.environments.pagers.ListEnvironmentsPager:
The response message for
[Environments.ListEnvironments][google.cloud.dialogflow.cx.v3beta1.Environments.ListEnvironments].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a environment.ListEnvironmentsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, environment.ListEnvironmentsRequest):
request = environment.ListEnvironmentsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_environments]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListEnvironmentsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_environment(
self,
request: Union[environment.GetEnvironmentRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> environment.Environment:
r"""Retrieves the specified
[Environment][google.cloud.dialogflow.cx.v3beta1.Environment].
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.GetEnvironmentRequest, dict]):
The request object. The request message for
[Environments.GetEnvironment][google.cloud.dialogflow.cx.v3beta1.Environments.GetEnvironment].
name (str):
Required. The name of the
[Environment][google.cloud.dialogflow.cx.v3beta1.Environment].
Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/environments/<Environment ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.types.Environment:
Represents an environment for an
agent. You can create multiple versions
of your agent and publish them to
separate environments. When you edit an
agent, you are editing the draft agent.
At any point, you can save the draft
agent as an agent version, which is an
immutable snapshot of your agent. When
you save the draft agent, it is
published to the default environment.
When you create agent versions, you can
publish them to custom environments. You
can create a variety of custom
environments for testing, development,
production, etc.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a environment.GetEnvironmentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, environment.GetEnvironmentRequest):
request = environment.GetEnvironmentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_environment]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_environment(
self,
request: Union[gcdc_environment.CreateEnvironmentRequest, dict] = None,
*,
parent: str = None,
environment: gcdc_environment.Environment = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Creates an
[Environment][google.cloud.dialogflow.cx.v3beta1.Environment] in
the specified [Agent][google.cloud.dialogflow.cx.v3beta1.Agent].
This method is a `long-running
operation <https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation>`__.
The returned ``Operation`` type has the following
method-specific fields:
- ``metadata``: An empty `Struct
message <https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#struct>`__
- ``response``:
[Environment][google.cloud.dialogflow.cx.v3beta1.Environment]
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.CreateEnvironmentRequest, dict]):
The request object. The request message for
[Environments.CreateEnvironment][google.cloud.dialogflow.cx.v3beta1.Environments.CreateEnvironment].
parent (str):
Required. The
[Agent][google.cloud.dialogflow.cx.v3beta1.Agent] to
create an
[Environment][google.cloud.dialogflow.cx.v3beta1.Environment]
for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
environment (google.cloud.dialogflowcx_v3beta1.types.Environment):
Required. The environment to create.
This corresponds to the ``environment`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dialogflowcx_v3beta1.types.Environment` Represents an environment for an agent. You can create multiple versions
of your agent and publish them to separate
environments. When you edit an agent, you are editing
the draft agent. At any point, you can save the draft
agent as an agent version, which is an immutable
snapshot of your agent. When you save the draft
agent, it is published to the default environment.
When you create agent versions, you can publish them
to custom environments. You can create a variety of
custom environments for testing, development,
production, etc.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, environment])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcdc_environment.CreateEnvironmentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gcdc_environment.CreateEnvironmentRequest):
request = gcdc_environment.CreateEnvironmentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if environment is not None:
request.environment = environment
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_environment]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
gcdc_environment.Environment,
metadata_type=struct_pb2.Struct,
)
# Done; return the response.
return response
def update_environment(
self,
request: Union[gcdc_environment.UpdateEnvironmentRequest, dict] = None,
*,
environment: gcdc_environment.Environment = None,
update_mask: field_mask_pb2.FieldMask = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Updates the specified
[Environment][google.cloud.dialogflow.cx.v3beta1.Environment].
This method is a `long-running
operation <https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation>`__.
The returned ``Operation`` type has the following
method-specific fields:
- ``metadata``: An empty `Struct
message <https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#struct>`__
- ``response``:
[Environment][google.cloud.dialogflow.cx.v3beta1.Environment]
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.UpdateEnvironmentRequest, dict]):
The request object. The request message for
[Environments.UpdateEnvironment][google.cloud.dialogflow.cx.v3beta1.Environments.UpdateEnvironment].
environment (google.cloud.dialogflowcx_v3beta1.types.Environment):
Required. The environment to update.
This corresponds to the ``environment`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The mask to control which
fields get updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dialogflowcx_v3beta1.types.Environment` Represents an environment for an agent. You can create multiple versions
of your agent and publish them to separate
environments. When you edit an agent, you are editing
the draft agent. At any point, you can save the draft
agent as an agent version, which is an immutable
snapshot of your agent. When you save the draft
agent, it is published to the default environment.
When you create agent versions, you can publish them
to custom environments. You can create a variety of
custom environments for testing, development,
production, etc.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([environment, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcdc_environment.UpdateEnvironmentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, gcdc_environment.UpdateEnvironmentRequest):
request = gcdc_environment.UpdateEnvironmentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if environment is not None:
request.environment = environment
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_environment]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("environment.name", request.environment.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
gcdc_environment.Environment,
metadata_type=struct_pb2.Struct,
)
# Done; return the response.
return response
def delete_environment(
self,
request: Union[environment.DeleteEnvironmentRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the specified
[Environment][google.cloud.dialogflow.cx.v3beta1.Environment].
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.DeleteEnvironmentRequest, dict]):
The request object. The request message for
[Environments.DeleteEnvironment][google.cloud.dialogflow.cx.v3beta1.Environments.DeleteEnvironment].
name (str):
Required. The name of the
[Environment][google.cloud.dialogflow.cx.v3beta1.Environment]
to delete. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/environments/<Environment ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a environment.DeleteEnvironmentRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, environment.DeleteEnvironmentRequest):
request = environment.DeleteEnvironmentRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_environment]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def lookup_environment_history(
self,
request: Union[environment.LookupEnvironmentHistoryRequest, dict] = None,
*,
name: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.LookupEnvironmentHistoryPager:
r"""Looks up the history of the specified
[Environment][google.cloud.dialogflow.cx.v3beta1.Environment].
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.LookupEnvironmentHistoryRequest, dict]):
The request object. The request message for
[Environments.LookupEnvironmentHistory][google.cloud.dialogflow.cx.v3beta1.Environments.LookupEnvironmentHistory].
name (str):
Required. Resource name of the environment to look up
the history for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/environments/<Environment ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.services.environments.pagers.LookupEnvironmentHistoryPager:
The response message for
[Environments.LookupEnvironmentHistory][google.cloud.dialogflow.cx.v3beta1.Environments.LookupEnvironmentHistory].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a environment.LookupEnvironmentHistoryRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, environment.LookupEnvironmentHistoryRequest):
request = environment.LookupEnvironmentHistoryRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.lookup_environment_history
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.LookupEnvironmentHistoryPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def run_continuous_test(
self,
request: Union[environment.RunContinuousTestRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Kicks off a continuous test under the specified
[Environment][google.cloud.dialogflow.cx.v3beta1.Environment].
This method is a `long-running
operation <https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation>`__.
The returned ``Operation`` type has the following
method-specific fields:
- ``metadata``:
[RunContinuousTestMetadata][google.cloud.dialogflow.cx.v3beta1.RunContinuousTestMetadata]
- ``response``:
[RunContinuousTestResponse][google.cloud.dialogflow.cx.v3beta1.RunContinuousTestResponse]
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.RunContinuousTestRequest, dict]):
The request object. The request message for
[Environments.RunContinuousTest][google.cloud.dialogflow.cx.v3beta1.Environments.RunContinuousTest].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.dialogflowcx_v3beta1.types.RunContinuousTestResponse`
The response message for
[Environments.RunContinuousTest][google.cloud.dialogflow.cx.v3beta1.Environments.RunContinuousTest].
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a environment.RunContinuousTestRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, environment.RunContinuousTestRequest):
request = environment.RunContinuousTestRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.run_continuous_test]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("environment", request.environment),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
environment.RunContinuousTestResponse,
metadata_type=environment.RunContinuousTestMetadata,
)
# Done; return the response.
return response
def list_continuous_test_results(
self,
request: Union[environment.ListContinuousTestResultsRequest, dict] = None,
*,
parent: str = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListContinuousTestResultsPager:
r"""Fetches a list of continuous test results for a given
environment.
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.ListContinuousTestResultsRequest, dict]):
The request object. The request message for
[Environments.ListContinuousTestResults][google.cloud.dialogflow.cx.v3beta1.Environments.ListContinuousTestResults].
parent (str):
Required. The environment to list results for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/ environments/<Environment ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.services.environments.pagers.ListContinuousTestResultsPager:
The response message for
[Environments.ListTestCaseResults][].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a environment.ListContinuousTestResultsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, environment.ListContinuousTestResultsRequest):
request = environment.ListContinuousTestResultsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.list_continuous_test_results
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListContinuousTestResultsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def deploy_flow(
self,
request: Union[environment.DeployFlowRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Deploys a flow to the specified
[Environment][google.cloud.dialogflow.cx.v3beta1.Environment].
This method is a `long-running
operation <https://cloud.google.com/dialogflow/cx/docs/how/long-running-operation>`__.
The returned ``Operation`` type has the following
method-specific fields:
- ``metadata``:
[DeployFlowMetadata][google.cloud.dialogflow.cx.v3beta1.DeployFlowMetadata]
- ``response``:
[DeployFlowResponse][google.cloud.dialogflow.cx.v3beta1.DeployFlowResponse]
Args:
request (Union[google.cloud.dialogflowcx_v3beta1.types.DeployFlowRequest, dict]):
The request object. The request message for
[Environments.DeployFlow][google.cloud.dialogflow.cx.v3beta1.Environments.DeployFlow].
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.dialogflowcx_v3beta1.types.DeployFlowResponse`
The response message for
[Environments.DeployFlow][google.cloud.dialogflow.cx.v3beta1.Environments.DeployFlow].
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a environment.DeployFlowRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, environment.DeployFlowRequest):
request = environment.DeployFlowRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.deploy_flow]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("environment", request.environment),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
environment.DeployFlowResponse,
metadata_type=environment.DeployFlowMetadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflowcx",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("EnvironmentsClient",)
|
the-stack_0_24169 | import csv
import logging
import pandas as pd
import requests
from contextlib import contextmanager
from nba_api.stats.endpoints import shotchartdetail, commonteamroster
from nba_api.stats.static import players
from nba_api.stats.static import teams
from sqlalchemy.orm import sessionmaker, scoped_session
logger = logging.getLogger('root')
def get_all_players_shotchart():
# all_players = players.get_players()
all_teams = teams.get_teams()
print('all_teams : %s' %(all_teams))
# all_team_roster_list = []
# for team in all_teams:
# team_roster = commonteamroster.CommonTeamRoster(season='2018-19', team_id=team['id'])
# print('team_roster for team : %s is %s' %(team['full_name'], team_roster.common_team_roster.get_dict() ))
# all_team_roster_list.append(team_roster.common_team_roster.get_data_frame())
# all_team_roster_df = pd.concat(all_team_roster_list)
# all_team_roster_df.to_csv(
# path_or_buf='all_team_roster.csv',
# sep='|',
# encoding='utf-8',
# # don't add quote on all columns because nexxus marketing adds quote
# quoting=csv.QUOTE_NONE,
# float_format='%.0f', # make sure integer are not being casted to float like 123 to 123.0
# index=False) # don't export dataframe order index
# for player in all_players:
# team_id = teams.find_team_name_by_id(player['id'])
# print(team_id)
# shotchart = shotchartdetail.ShotChartDetail(team_id=, player_id=player['id'])
# print('shotchart for player %s is %s' %(player['full_name'], shotchart))
def get_all_teams():
all_teams = teams.get_teams()
print('all_teams are %s' % (all_teams))
return all_teams
def nba_api_get(url):
try:
if url:
response = requests.get(url)
print("get request to : %s's response is : %s, content is %s"
%(url, str(response), response.content))
# if response.status_code == requests.codes.ok:
# print(response)
# else:
# pass
else:
raise ValueError('no url has been provided!')
except requests.exceptions.Timeout:
logger.error('GET request to %s Timed out with exception' %
(url))
# Maybe set up for a retry, or continue in a retry loop
except requests.exceptions.TooManyRedirects:
logger.error('GET request to %s has TooManyRedirects' %
(url))
# Tell the user their URL was bad and try a different one
except requests.exceptions.RequestException as e:
logger.error('GET request to %s failed with exception %s' %
(url, e))
except Exception as e:
logger.error("exception occured in get_entity_nm_id : %s" % (e))
get_all_players_shotchart()
|
the-stack_0_24170 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2021-01-15 12:35:35
# @Author : Joe Gao ([email protected])
import os
import json
import time
import pandas as pd
import streamlit as st
from celery.app.control import Control
from utils import (
get_params,
get_lines,
get_key_from_json,
kill_process,
dump_json,
)
from celery_training import training_excute, app
celery_control = Control(app=app)
class TrainingGUI(object):
def __init__(self, task_path, is_running=False, is_eval=False):
self.fn_state = f'{task_path}/state.json'
self.is_running = is_running
self._action = 'evaluating' if is_eval else 'training'
self.fn_model = f'{task_path}/model.h5'
self.is_configed = all([
os.path.exists(f'{task_path}/model_common_params.json'),
os.path.exists(f'{task_path}/model_bases_params.json'),
# os.path.exists(f'{task_path}/model_embeded_params.json'),
os.path.exists(f'{task_path}/model_inputs_params.json'),
os.path.exists(f'{task_path}/model_layer_params.json'),
os.path.exists(f'{task_path}/model_outputs_params.json'),
os.path.exists(f'{task_path}/model_optimizer_params.json'),
os.path.exists(f'{task_path}/params_data.json'),
os.path.exists(f'{task_path}/params_train.json'),
os.path.exists(f'{task_path}/params_pred.json'),
])
self.task_path = task_path
self.is_eval = is_eval
self.is_model_structure_showed = False
self.epoch = 0
st.title(f'Task {task_path.split("/")[-1]} {self._action}...')
def _print_logs(self, _n_curr=0, state='begin', _freq=None, train_graph=None, valid_graph=None, is_running=True, logs_graph=None):
logs = get_lines(f'{self.task_path}/{self._action}_logs.json')
for i, log in enumerate(logs[_n_curr:]):
if 'EPOCH' in log:
log = json.loads(log.strip())
state = self.epoch = log.get('EPOCH')
if 'scores' in log:
if not self.is_eval:
scores = {
k: v
for k, v in eval(str(log.get('scores'))).items() if k.startswith('val_')
}
valid_graph.add_rows(pd.json_normalize([scores]))
st.text(f'EPOCH: {state}, Scores: {scores}')
else:
try:
log = json.loads(log.strip())
if self.is_eval:
if is_running:
train_graph.json(log)
else:
if not _freq:
_freq = len(logs) // 10
if log.get('batch') % _freq == 0:
scores = {k: v for k, v in eval(str(log.get('scores'))).items() if k not in ['batch', 'size']}
train_graph.add_rows(pd.json_normalize([scores]))
except:
continue
if 'scores' in log:
batch = log.get('batch')
time = log.get('time')
log = eval(str(log.get('scores')))
if log:
log['batch'] = f'{batch} (EPOCH: {self.epoch}) {time if time else ""}'
if log:
if 'size' in log:
del log['size']
if log:
logs_graph.json(log)
_n_curr = len(logs)
return _n_curr, state, train_graph
def _show_structure(self):
if not self.is_model_structure_showed:
st.info(f'Model layers')
st.json(get_lines(f'{self.task_path}/model_structs.txt'))
self.is_model_structure_showed = True
def _monitoring(self):
_block = st.empty()
_start = _block.button('Start monitor', key=f'{self._action}_button')
_freq = st.number_input('Lines per update:', min_value=5, max_value=1000,
value=10, step=5, key=f'{self._action}_frequecy')
if _start:
_stop = _block.button('Stop monitor')
st.info(f'{self._action.capitalize()} logs')
train_graph = st.json('') if self.is_eval else st.line_chart()
valid_graph = st.empty() if self.is_eval else st.line_chart()
logs_graph = st.empty()
self._show_structure()
state = 'begin'
_n_curr = 0
while not state == 'Finished':
_n_curr, state, _ = self._print_logs(
_n_curr=_n_curr,
state=state,
_freq=_freq,
train_graph=train_graph,
valid_graph=valid_graph,
logs_graph=logs_graph,
)
if self.is_eval:
train_graph.empty()
st.success(f'{self._action.capitalize()} accomplished.')
self._show_structure()
def _start_training(self):
_block = st.empty()
_start = _block.button(f'🚀 Start {self._action}...')
if _start:
dump_json(self.fn_state, {f'{self._action}_state': True})
res = training_excute.delay(self.task_path, action=self._action)
dump_json(f'{self.task_path}/training_task.id', {'task_id': res.id})
_block.empty()
time.sleep(15)
_stop = _block.button(f'Stop {self._action}, or think twice before you click me...')
if _stop:
_block.empty()
self._stop_training()
self._start_training()
else:
self._monitoring()
else:
if os.path.exists(f'{self.task_path}/{self._action}_logs.json'):
st.info(f'Last {self._action} logs')
_n_curr, state, train_graph = self._print_logs(
train_graph=st.json('') if self.is_eval else st.line_chart(),
valid_graph=st.empty() if self.is_eval else st.line_chart(),
logs_graph=st.empty(),
)
if self.is_eval:
train_graph.empty()
self._show_structure()
def _stop_training(self):
task_id = get_key_from_json(f'{self.task_path}/training_task.id', 'task_id')
if task_id:
celery_control.revoke(str(task_id), terminate=True)
kill_process(f'{self.task_path}/training.pid')
os.remove(self.fn_state)
time.sleep(5)
st.warning(f'{self._action} stopped.')
def train(self):
if not self.is_configed:
st.warning('Task params not found, please customize the params for the task first.')
else:
_state = get_key_from_json(self.fn_state, f'{self._action}_state')
if not self.is_running:
self._start_training()
else:
if _state:
_block = st.empty()
_stop = _block.button(f'Stop {self._action}, or think twice before you click me...')
if _stop:
_block.empty()
self._stop_training()
self._start_training()
else:
self._monitoring()
else:
_, _, train_graph = self._print_logs(
train_graph=st.empty() if self.is_eval else st.line_chart(),
valid_graph=st.empty() if self.is_eval else st.line_chart(),
is_running=_state,
logs_graph=st.empty(),
)
if self.is_eval:
train_graph.empty()
self._show_structure()
|
the-stack_0_24173 | """changes to pitch class
Revision ID: 2c6a8a3228be
Revises: 1eb964d9a352
Create Date: 2018-06-26 08:55:02.095198
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2c6a8a3228be'
down_revision = '1eb964d9a352'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('pitches', sa.Column('pitch_title', sa.String(), nullable=True))
op.drop_column('pitches', 'title')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('pitches', sa.Column('title', sa.VARCHAR(), autoincrement=False, nullable=True))
op.drop_column('pitches', 'pitch_title')
# ### end Alembic commands ###
|
the-stack_0_24176 | # Copyright 2019 Mattias Åkesson, Prashant Singh, Fredrik Wrede and Andreas Hellander
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Generate data from the Moving Averages 2 (MA2) model
"""
from ma2_model import simulate, prior
import pickle
import numpy as np
sim = simulate
true_param = [0.6, 0.2] # moving average2
data = simulate(true_param)
modelname = 'moving_average2'
n = 1000000
train_thetas = np.array(prior(n=n))
train_ts = np.expand_dims(np.array([simulate(p, n=100) for p in train_thetas]), 2)
validation_thetas = np.array(prior(n=10000))
validation_ts = np.expand_dims(np.array([simulate(p, n=100) for p in validation_thetas]), 2)
test_thetas = np.array(prior(n=10000))
test_ts = np.expand_dims(np.array([simulate(p, n=100) for p in validation_thetas]), 2)
abc_trial_thetas = np.array(prior(n=500000))
abc_trial_ts = np.expand_dims(np.array([simulate(p, n=100) for p in abc_trial_thetas]), 2)
with open('datasets/' + modelname + '/true_param.p', "wb") as f:
pickle.dump(true_param, f)
with open('datasets/' + modelname + '/obs_data.p', "wb") as f:
pickle.dump(data, f)
with open('datasets/' + modelname + '/train_thetas.p', "wb") as f:
pickle.dump(train_thetas, f)
with open('datasets/' + modelname + '/train_ts.p', "wb") as f:
pickle.dump(train_ts, f)
with open('datasets/' + modelname + '/validation_thetas.p', "wb") as f:
pickle.dump(validation_thetas, f)
with open('datasets/' + modelname + '/validation_ts.p', "wb") as f:
pickle.dump(validation_ts, f)
with open('datasets/' + modelname + '/test_thetas.p', "wb") as f:
pickle.dump(test_thetas, f)
with open('datasets/' + modelname + '/test_ts.p', "wb") as f:
pickle.dump(test_ts, f)
with open('datasets/' + modelname + '/abc_trial_thetas.p', "wb") as f:
pickle.dump(abc_trial_thetas, f)
with open('datasets/' + modelname + '/abc_trial_ts.p', "wb") as f:
pickle.dump(abc_trial_ts, f)
|
the-stack_0_24179 | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: [email protected]
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class OrganizationRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'display_name': 'string',
'name': 'string'
}
attribute_map = {
'display_name': 'display_name',
'name': 'name'
}
def __init__(self, display_name=None, name=None): # noqa: E501
"""OrganizationRequest - a model defined in Swagger""" # noqa: E501
self._display_name = None
self._name = None
self.discriminator = None
if display_name is not None:
self.display_name = display_name
if name is not None:
self.name = name
@property
def display_name(self):
"""Gets the display_name of this OrganizationRequest. # noqa: E501
The display name of the organization # noqa: E501
:return: The display_name of this OrganizationRequest. # noqa: E501
:rtype: string
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this OrganizationRequest.
The display name of the organization # noqa: E501
:param display_name: The display_name of this OrganizationRequest. # noqa: E501
:type: string
"""
self._display_name = display_name
@property
def name(self):
"""Gets the name of this OrganizationRequest. # noqa: E501
The name of the organization used in URLs # noqa: E501
:return: The name of this OrganizationRequest. # noqa: E501
:rtype: string
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this OrganizationRequest.
The name of the organization used in URLs # noqa: E501
:param name: The name of this OrganizationRequest. # noqa: E501
:type: string
"""
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OrganizationRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
the-stack_0_24180 | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DetectionMetricsEvaluator call defination."""
from vega.trainer.callbacks.metrics_evaluator import MetricsEvaluator
from vega.common import ClassFactory, ClassType
@ClassFactory.register(ClassType.CALLBACK)
class DetectionMetricsEvaluator(MetricsEvaluator):
"""Callback that shows the progress of detection evaluating metrics."""
def __init__(self, *args, **kwargs):
"""Initialize class."""
super().__init__(*args, **kwargs)
def before_train(self, logs=None):
"""Be called before the training process."""
super().before_train(logs)
self.step_count_during_train_period = 0
self.loss_sum_during_train_period = 0
def before_epoch(self, epoch, logs=None):
"""Be called before each epoach."""
super().before_epoch(epoch, logs)
self.loss_sum_during_epoch_period = 0
self.step_count_during_epoch_period = 0
def after_train_step(self, batch_index, logs=None):
"""Be called after each train batch."""
input, _ = self.train_batch
batch_size = input.size(0)
self.cur_loss = logs['loss']
self.loss_avg = self._average_loss_during_train_period(batch_size, self.cur_loss)
logs.update({'cur_loss': self.cur_loss, 'loss_avg': self.loss_avg})
def after_valid_step(self, batch_index, logs=None):
"""Be called after each batch of validation."""
if self.trainer.do_validation and self.valid_metrics is not None:
_, target = self.valid_batch
output = logs['valid_batch_output']
self.valid_metrics(output, target)
def _average_loss_during_epoch_period(self, batch_size, cur_loss):
self.loss_sum_during_epoch_period = self.loss_sum_during_epoch_period + cur_loss * batch_size
self.step_count_during_epoch_period = self.step_count_during_epoch_period + batch_size
avg_loss = self.loss_sum_during_epoch_period / self.step_count_during_epoch_period
return avg_loss
def _average_loss_during_train_period(self, batch_size, cur_loss):
self.step_count_during_train_period = self.step_count_during_train_period + batch_size
self.loss_sum_during_train_period = self.loss_sum_during_train_period + cur_loss * batch_size
avg_loss = self.loss_sum_during_train_period / self.step_count_during_train_period
return avg_loss
|
the-stack_0_24181 | from . import *
import pytest
# from . import * 在测试中只要导入的包,就会执行里面的代码,尤其是有运行环节的,所以包中执行的部分要放在main下面
@pytest.mark.test1
@pytest.mark.parametrize(
"shuru,shuchu_expected",
[
('a','a'),
(1,1),
([1],[1]),
({"test":1},{"test":1}),
((1,),(1,)),
({1},{1}),
]
)
def test_N1_encapsulation_1(shuru,shuchu_expected):
val=N1_encapsulation_1.MyClass()
val.set_val(shuru)
assert shuchu_expected==val.get_val()
@pytest.mark.parametrize(
"shuru,shuchu_expected",
[
('a','a'),
(1,1),
([1],[1]),
({"test":1},{"test":1}),
((1,),(1,)),
({1},{1}),
]
)
def test_N2_encapsulation_1(shuru,shuchu_expected):
val=N2_encapsulation_2.MyClass()
val.set_val(shuru)
assert shuchu_expected==val.value
@pytest.mark.parametrize(
"set_val_param1,val_expected_after_increment",
[
('1',2),
(1,2),
]
)
def test_N3_encapsulation_3(set_val_param1,val_expected_after_increment):
#数字取整,获得或加一
object = N3_encapsulation_3.MyInteger()
object.set_val(set_val_param1)
object.increment_val()
assert val_expected_after_increment==object.val
@pytest.mark.parametrize(
"val_expected_after_increment",
[
1,
]
)
def test_N4_init_constructor_1(val_expected_after_increment):
n4 =N4_init_constructor_1.MyNum()
n4.increment()
assert val_expected_after_increment==n4.val
@pytest.mark.parametrize(
"set_val_param1,val_expected_after_increment",
[
(1,2),
('1',2),
('a',1),
]
)
def test_N5_init_constructor_2(set_val_param1,val_expected_after_increment):
n5 =N5_init_constructor_2.MyNum(set_val_param1)
n5.increment()
assert val_expected_after_increment==n5.value
@pytest.mark.parametrize(
"class_attri,instance_attri",
[
(10,100),
]
)
def test_N6_class_attributes_1(class_attri,instance_attri):
n6=N6_class_attributes_1.YourClass()
n6.set_val()
assert class_attri==n6.classy and instance_attri==n6.insty
@pytest.mark.parametrize(
"class_attri",
[
"class value",
]
)
def test_N7_class_attributes_2(class_attri):
n7=N7_class_attributes_2.YourClass()
assert class_attri==n7.classy
@pytest.mark.parametrize(
"set_value",
[
"set_value",
1,
]
)
def test_N8_class_instance_attributes_1(set_value):
count=N8_class_instance_attributes_1.InstanceCounter.count
n8=N8_class_instance_attributes_1.InstanceCounter(set_value)
#print(N8_class_instance_attributes_1.InstanceCounter.count) 内部测试print函数不好使
assert count+1==N8_class_instance_attributes_1.InstanceCounter.count
@pytest.mark.parametrize(
"date,time",
[
('2020-09-19','21:28:10')
]
)
def test_N9_inheritance_1(date,time):
n9=N9_inheritance_1.Time()
assert date==n9.get_date(date) and time==n9.get_time(time)
@pytest.mark.parametrize(
"init_value",
[
1,
]
)
def test_N18_instance_methods_2(init_value):
count=N18_instance_methods_2.InstanceCounter.count
a=N18_instance_methods_2.InstanceCounter(init_value)
assert init_value==a.get_val() and count+1==N18_instance_methods_2.InstanceCounter.count
@pytest.mark.parametrize(
"init_value,set_value",
[
[1,100],
]
)
def test_N18_instance_methods_2(init_value,set_value):
count=N18_instance_methods_2.InstanceCounter.count
a=N18_instance_methods_2.InstanceCounter(init_value)
a.set_val(set_value)
assert set_value==a.get_val() and count+1==N18_instance_methods_2.InstanceCounter.count
@pytest.mark.parametrize(
"x,y,expected",
[
[1,1,1],
[1,0,None]
]
)
def test_N23_decorators_5(x,y,expected):
N23_decorators_5.divide(x,y)
assert expected==N23_decorators_5.divide(x,y)
@pytest.mark.parametrize(
"x,y,expected_add,expected_sub",
[
[2,1,6,2],
]
)
def test_N25_decorators_7(x,y,expected_add,expected_sub):
assert expected_add==N25_decorators_7.adder(x,y) and expected_sub==N25_decorators_7.subtractor(x,y)
@pytest.mark.parametrize(
"first_name, last_name,expected",
[
['Dong', 'Liu','Dr. Dong Liu'],
]
)
def test_N26_class_decorators(first_name, last_name,expected):
n26=N26_class_decorators.Name(first_name, last_name)
assert expected==n26.full_name()
@pytest.mark.parametrize(
"init_val, result",
[
['test','test'],
]
)
def test_N28_classmethod_2(set_val, result):
count=N28_classmethod_2.MyClass.count
instance=N28_classmethod_2.MyClass(init_val)
assert result==instance.get_val() and count+1==instance.get_count()
@pytest.mark.parametrize(
"set_val, result",
[
['test','test'],
]
)
def test_N28_classmethod_2(set_val, result):
count=N28_classmethod_2.MyClass.count
instance=N28_classmethod_2.MyClass('init_value')
instance.set_val(set_val)
assert result==instance.get_val() and count+1==instance.get_count()
@pytest.mark.parametrize(
"set_val,expected",
[
[1,1],
[1.5,0],
]
)
def test_N29_staticmethod_1(set_val,expected):
n29=N29_staticmethod_1.MyClass('init_value')
assert expected==n29.filterint(set_val)
@pytest.mark.parametrize(
"set_val,expected",
[
[[1, 2, 3],'[1, 2, 3]'], #注意非PEP8书写,但输出会被规范
]
)
def test_N31_magicmethods_1(set_val,expected):
n31=N31_magicmethods_1.PrintList(set_val)
assert expected==n31.__repr__()
@pytest.mark.parametrize(
"set_val,expected",
[
[[1, 2, 3],[[1, 2, 3],'test']], # 注意非PEP8书写,但输出会被规范
]
)
def test_N38_method_overloading_2(set_val,expected):
n38=N38_method_overloading_2.GetSetList(set_val)
n38.set_val('test')
assert 'test'==n38.get_val() and expected==n38.get_vals()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.